repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
Julian/urwid | examples/input_test.py | 11 | 3001 | #!/usr/bin/python
#
# Urwid keyboard input test app
# Copyright (C) 2004-2009 Ian Ward
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Urwid web site: http://excess.org/urwid/
"""
Keyboard test application
"""
import urwid.curses_display
import urwid.raw_display
import urwid.web_display
import urwid
import sys
if urwid.web_display.is_web_request():
Screen = urwid.web_display.Screen
else:
if len(sys.argv)>1 and sys.argv[1][:1] == "r":
Screen = urwid.raw_display.Screen
else:
Screen = urwid.curses_display.Screen
def key_test():
screen = Screen()
header = urwid.Text("Values from get_input(). Q exits.")
header = urwid.AttrWrap(header,'header')
lw = urwid.SimpleListWalker([])
listbox = urwid.ListBox(lw)
listbox = urwid.AttrWrap(listbox, 'listbox')
top = urwid.Frame(listbox, header)
def input_filter(keys, raw):
if 'q' in keys or 'Q' in keys:
raise urwid.ExitMainLoop
t = []
a = []
for k in keys:
if type(k) == tuple:
out = []
for v in k:
if out:
out += [', ']
out += [('key',repr(v))]
t += ["("] + out + [")"]
else:
t += ["'",('key',k),"' "]
rawt = urwid.Text(", ".join(["%d"%r for r in raw]))
if t:
lw.append(
urwid.Columns([
('weight',2,urwid.Text(t)),
rawt])
)
listbox.set_focus(len(lw)-1,'above')
return keys
loop = urwid.MainLoop(top, [
('header', 'black', 'dark cyan', 'standout'),
('key', 'yellow', 'dark blue', 'bold'),
('listbox', 'light gray', 'black' ),
], screen, input_filter=input_filter)
try:
old = screen.tty_signal_keys('undefined','undefined',
'undefined','undefined','undefined')
loop.run()
finally:
screen.tty_signal_keys(*old)
def main():
urwid.web_display.set_preferences('Input Test')
if urwid.web_display.handle_short_request():
return
key_test()
if '__main__'==__name__ or urwid.web_display.is_web_request():
main()
| lgpl-2.1 |
vasyarv/edx-platform | lms/djangoapps/instructor_task/tasks.py | 12 | 12056 | """
This file contains tasks that are designed to perform background operations on the
running state of a course.
At present, these tasks all operate on StudentModule objects in one way or another,
so they share a visitor architecture. Each task defines an "update function" that
takes a module_descriptor, a particular StudentModule object, and xmodule_instance_args.
A task may optionally specify a "filter function" that takes a query for StudentModule
objects, and adds additional filter clauses.
A task also passes through "xmodule_instance_args", that are used to provide
information to our code that instantiates xmodule instances.
The task definition then calls the traversal function, passing in the three arguments
above, along with the id value for an InstructorTask object. The InstructorTask
object contains a 'task_input' row which is a JSON-encoded dict containing
a problem URL and optionally a student. These are used to set up the initial value
of the query for traversing StudentModule objects.
"""
import logging
from functools import partial
from django.conf import settings
from django.utils.translation import ugettext_noop
from celery import task
from bulk_email.tasks import perform_delegate_email_batches
from instructor_task.tasks_helper import (
run_main_task,
BaseInstructorTask,
perform_module_state_update,
rescore_problem_module_state,
reset_attempts_module_state,
delete_problem_module_state,
upload_grades_csv,
upload_problem_grade_report,
upload_students_csv,
cohort_students_and_upload,
upload_enrollment_report,
upload_may_enroll_csv,
upload_exec_summary_report,
generate_students_certificates,
upload_proctored_exam_results_report
)
TASK_LOG = logging.getLogger('edx.celery.task')
@task(base=BaseInstructorTask) # pylint: disable=not-callable
def rescore_problem(entry_id, xmodule_instance_args):
"""Rescores a problem in a course, for all students or one specific student.
`entry_id` is the id value of the InstructorTask entry that corresponds to this task.
The entry contains the `course_id` that identifies the course, as well as the
`task_input`, which contains task-specific input.
The task_input should be a dict with the following entries:
'problem_url': the full URL to the problem to be rescored. (required)
'student': the identifier (username or email) of a particular user whose
problem submission should be rescored. If not specified, all problem
submissions for the problem will be rescored.
`xmodule_instance_args` provides information needed by _get_module_instance_for_task()
to instantiate an xmodule instance.
"""
# Translators: This is a past-tense verb that is inserted into task progress messages as {action}.
action_name = ugettext_noop('rescored')
update_fcn = partial(rescore_problem_module_state, xmodule_instance_args)
def filter_fcn(modules_to_update):
"""Filter that matches problems which are marked as being done"""
return modules_to_update.filter(state__contains='"done": true')
visit_fcn = partial(perform_module_state_update, update_fcn, filter_fcn)
return run_main_task(entry_id, visit_fcn, action_name)
@task(base=BaseInstructorTask) # pylint: disable=not-callable
def reset_problem_attempts(entry_id, xmodule_instance_args):
"""Resets problem attempts to zero for a particular problem for all students in a course.
`entry_id` is the id value of the InstructorTask entry that corresponds to this task.
The entry contains the `course_id` that identifies the course, as well as the
`task_input`, which contains task-specific input.
The task_input should be a dict with the following entries:
'problem_url': the full URL to the problem to be rescored. (required)
`xmodule_instance_args` provides information needed by _get_module_instance_for_task()
to instantiate an xmodule instance.
"""
# Translators: This is a past-tense verb that is inserted into task progress messages as {action}.
action_name = ugettext_noop('reset')
update_fcn = partial(reset_attempts_module_state, xmodule_instance_args)
visit_fcn = partial(perform_module_state_update, update_fcn, None)
return run_main_task(entry_id, visit_fcn, action_name)
@task(base=BaseInstructorTask) # pylint: disable=not-callable
def delete_problem_state(entry_id, xmodule_instance_args):
"""Deletes problem state entirely for all students on a particular problem in a course.
`entry_id` is the id value of the InstructorTask entry that corresponds to this task.
The entry contains the `course_id` that identifies the course, as well as the
`task_input`, which contains task-specific input.
The task_input should be a dict with the following entries:
'problem_url': the full URL to the problem to be rescored. (required)
`xmodule_instance_args` provides information needed by _get_module_instance_for_task()
to instantiate an xmodule instance.
"""
# Translators: This is a past-tense verb that is inserted into task progress messages as {action}.
action_name = ugettext_noop('deleted')
update_fcn = partial(delete_problem_module_state, xmodule_instance_args)
visit_fcn = partial(perform_module_state_update, update_fcn, None)
return run_main_task(entry_id, visit_fcn, action_name)
@task(base=BaseInstructorTask) # pylint: disable=not-callable
def send_bulk_course_email(entry_id, _xmodule_instance_args):
"""Sends emails to recipients enrolled in a course.
`entry_id` is the id value of the InstructorTask entry that corresponds to this task.
The entry contains the `course_id` that identifies the course, as well as the
`task_input`, which contains task-specific input.
The task_input should be a dict with the following entries:
'email_id': the full URL to the problem to be rescored. (required)
`_xmodule_instance_args` provides information needed by _get_module_instance_for_task()
to instantiate an xmodule instance. This is unused here.
"""
# Translators: This is a past-tense verb that is inserted into task progress messages as {action}.
action_name = ugettext_noop('emailed')
visit_fcn = perform_delegate_email_batches
return run_main_task(entry_id, visit_fcn, action_name)
@task(base=BaseInstructorTask, routing_key=settings.GRADES_DOWNLOAD_ROUTING_KEY) # pylint: disable=not-callable
def calculate_grades_csv(entry_id, xmodule_instance_args):
"""
Grade a course and push the results to an S3 bucket for download.
"""
# Translators: This is a past-tense verb that is inserted into task progress messages as {action}.
action_name = ugettext_noop('graded')
TASK_LOG.info(
u"Task: %s, InstructorTask ID: %s, Task type: %s, Preparing for task execution",
xmodule_instance_args.get('task_id'), entry_id, action_name
)
task_fn = partial(upload_grades_csv, xmodule_instance_args)
return run_main_task(entry_id, task_fn, action_name)
@task(base=BaseInstructorTask, routing_key=settings.GRADES_DOWNLOAD_ROUTING_KEY) # pylint: disable=not-callable
def calculate_problem_grade_report(entry_id, xmodule_instance_args):
"""
Generate a CSV for a course containing all students' problem
grades and push the results to an S3 bucket for download.
"""
# Translators: This is a past-tense phrase that is inserted into task progress messages as {action}.
action_name = ugettext_noop('problem distribution graded')
TASK_LOG.info(
u"Task: %s, InstructorTask ID: %s, Task type: %s, Preparing for task execution",
xmodule_instance_args.get('task_id'), entry_id, action_name
)
task_fn = partial(upload_problem_grade_report, xmodule_instance_args)
return run_main_task(entry_id, task_fn, action_name)
@task(base=BaseInstructorTask, routing_key=settings.GRADES_DOWNLOAD_ROUTING_KEY) # pylint: disable=not-callable
def calculate_students_features_csv(entry_id, xmodule_instance_args):
"""
Compute student profile information for a course and upload the
CSV to an S3 bucket for download.
"""
# Translators: This is a past-tense verb that is inserted into task progress messages as {action}.
action_name = ugettext_noop('generated')
task_fn = partial(upload_students_csv, xmodule_instance_args)
return run_main_task(entry_id, task_fn, action_name)
@task(base=BaseInstructorTask, routing_key=settings.GRADES_DOWNLOAD_ROUTING_KEY) # pylint: disable=not-callable
def enrollment_report_features_csv(entry_id, xmodule_instance_args):
"""
Compute student profile information for a course and upload the
CSV to an S3 bucket for download.
"""
# Translators: This is a past-tense verb that is inserted into task progress messages as {action}.
action_name = ugettext_noop('generating_enrollment_report')
task_fn = partial(upload_enrollment_report, xmodule_instance_args)
return run_main_task(entry_id, task_fn, action_name)
@task(base=BaseInstructorTask, routing_key=settings.GRADES_DOWNLOAD_ROUTING_KEY) # pylint: disable=not-callable
def exec_summary_report_csv(entry_id, xmodule_instance_args):
"""
Compute executive summary report for a course and upload the
Html generated report to an S3 bucket for download.
"""
# Translators: This is a past-tense verb that is inserted into task progress messages as {action}.
action_name = 'generating_exec_summary_report'
task_fn = partial(upload_exec_summary_report, xmodule_instance_args)
return run_main_task(entry_id, task_fn, action_name)
@task(base=BaseInstructorTask, routing_key=settings.GRADES_DOWNLOAD_ROUTING_KEY) # pylint: disable=not-callable
def proctored_exam_results_csv(entry_id, xmodule_instance_args):
"""
Compute proctored exam results report for a course and upload the
CSV for download.
"""
action_name = 'generating_proctored_exam_results_report'
task_fn = partial(upload_proctored_exam_results_report, xmodule_instance_args)
return run_main_task(entry_id, task_fn, action_name)
@task(base=BaseInstructorTask, routing_key=settings.GRADES_DOWNLOAD_ROUTING_KEY) # pylint: disable=not-callable
def calculate_may_enroll_csv(entry_id, xmodule_instance_args):
"""
Compute information about invited students who have not enrolled
in a given course yet and upload the CSV to an S3 bucket for
download.
"""
# Translators: This is a past-tense verb that is inserted into task progress messages as {action}.
action_name = ugettext_noop('generated')
task_fn = partial(upload_may_enroll_csv, xmodule_instance_args)
return run_main_task(entry_id, task_fn, action_name)
@task(base=BaseInstructorTask, routing_key=settings.GRADES_DOWNLOAD_ROUTING_KEY) # pylint: disable=not-callable
def generate_certificates(entry_id, xmodule_instance_args):
"""
Grade students and generate certificates.
"""
# Translators: This is a past-tense verb that is inserted into task progress messages as {action}.
action_name = ugettext_noop('certificates generated')
TASK_LOG.info(
u"Task: %s, InstructorTask ID: %s, Task type: %s, Preparing for task execution",
xmodule_instance_args.get('task_id'), entry_id, action_name
)
task_fn = partial(generate_students_certificates, xmodule_instance_args)
return run_main_task(entry_id, task_fn, action_name)
@task(base=BaseInstructorTask) # pylint: disable=E1102
def cohort_students(entry_id, xmodule_instance_args):
"""
Cohort students in bulk, and upload the results.
"""
# Translators: This is a past-tense verb that is inserted into task progress messages as {action}.
# An example of such a message is: "Progress: {action} {succeeded} of {attempted} so far"
action_name = ugettext_noop('cohorted')
task_fn = partial(cohort_students_and_upload, xmodule_instance_args)
return run_main_task(entry_id, task_fn, action_name)
| agpl-3.0 |
MaximLich/oppia | core/tests/performance_tests/splash_test.py | 16 | 1597 | # Copyright 2016 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Performance tests for the splash page."""
from core.tests.performance_tests import base
from core.tests.performance_tests import test_config
class SplashPagePerformanceTest(base.TestBase):
"""Performance tests for the splash page."""
PAGE_KEY = test_config.PAGE_KEY_SPLASH
def setUp(self):
super(SplashPagePerformanceTest, self).setUp()
page_config = test_config.TEST_DATA[self.PAGE_KEY]
self._set_page_config(page_config)
self._initialize_data_fetcher()
self._load_page_to_cache_server_resources()
def test_page_size_under_specified_limit(self):
self._test_total_page_size()
def test_page_size_under_specified_limit_for_cached_session(self):
self._test_total_page_size_for_cached_session()
def test_page_loads_under_specified_limit(self):
self._test_page_load_time()
def test_page_loads_under_specified_limit_cached_session(self):
self._test_page_load_time_for_cached_session()
| apache-2.0 |
hmgaudecker/econ-project-templates | {{cookiecutter.project_slug}}/.mywaflib/waflib/Tools/cs.py | 55 | 6397 | #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006-2018 (ita)
"""
C# support. A simple example::
def configure(conf):
conf.load('cs')
def build(bld):
bld(features='cs', source='main.cs', gen='foo')
Note that the configuration may compile C# snippets::
FRAG = '''
namespace Moo {
public class Test { public static int Main(string[] args) { return 0; } }
}'''
def configure(conf):
conf.check(features='cs', fragment=FRAG, compile_filename='test.cs', gen='test.exe',
bintype='exe', csflags=['-pkg:gtk-sharp-2.0'], msg='Checking for Gtksharp support')
"""
from waflib import Utils, Task, Options, Errors
from waflib.TaskGen import before_method, after_method, feature
from waflib.Tools import ccroot
from waflib.Configure import conf
ccroot.USELIB_VARS['cs'] = set(['CSFLAGS', 'ASSEMBLIES', 'RESOURCES'])
ccroot.lib_patterns['csshlib'] = ['%s']
@feature('cs')
@before_method('process_source')
def apply_cs(self):
"""
Create a C# task bound to the attribute *cs_task*. There can be only one C# task by task generator.
"""
cs_nodes = []
no_nodes = []
for x in self.to_nodes(self.source):
if x.name.endswith('.cs'):
cs_nodes.append(x)
else:
no_nodes.append(x)
self.source = no_nodes
bintype = getattr(self, 'bintype', self.gen.endswith('.dll') and 'library' or 'exe')
self.cs_task = tsk = self.create_task('mcs', cs_nodes, self.path.find_or_declare(self.gen))
tsk.env.CSTYPE = '/target:%s' % bintype
tsk.env.OUT = '/out:%s' % tsk.outputs[0].abspath()
self.env.append_value('CSFLAGS', '/platform:%s' % getattr(self, 'platform', 'anycpu'))
inst_to = getattr(self, 'install_path', bintype=='exe' and '${BINDIR}' or '${LIBDIR}')
if inst_to:
# note: we are making a copy, so the files added to cs_task.outputs won't be installed automatically
mod = getattr(self, 'chmod', bintype=='exe' and Utils.O755 or Utils.O644)
self.install_task = self.add_install_files(install_to=inst_to, install_from=self.cs_task.outputs[:], chmod=mod)
@feature('cs')
@after_method('apply_cs')
def use_cs(self):
"""
C# applications honor the **use** keyword::
def build(bld):
bld(features='cs', source='My.cs', bintype='library', gen='my.dll', name='mylib')
bld(features='cs', source='Hi.cs', includes='.', bintype='exe', gen='hi.exe', use='mylib', name='hi')
"""
names = self.to_list(getattr(self, 'use', []))
get = self.bld.get_tgen_by_name
for x in names:
try:
y = get(x)
except Errors.WafError:
self.env.append_value('CSFLAGS', '/reference:%s' % x)
continue
y.post()
tsk = getattr(y, 'cs_task', None) or getattr(y, 'link_task', None)
if not tsk:
self.bld.fatal('cs task has no link task for use %r' % self)
self.cs_task.dep_nodes.extend(tsk.outputs) # dependency
self.cs_task.set_run_after(tsk) # order (redundant, the order is inferred from the nodes inputs/outputs)
self.env.append_value('CSFLAGS', '/reference:%s' % tsk.outputs[0].abspath())
@feature('cs')
@after_method('apply_cs', 'use_cs')
def debug_cs(self):
"""
The C# targets may create .mdb or .pdb files::
def build(bld):
bld(features='cs', source='My.cs', bintype='library', gen='my.dll', csdebug='full')
# csdebug is a value in (True, 'full', 'pdbonly')
"""
csdebug = getattr(self, 'csdebug', self.env.CSDEBUG)
if not csdebug:
return
node = self.cs_task.outputs[0]
if self.env.CS_NAME == 'mono':
out = node.parent.find_or_declare(node.name + '.mdb')
else:
out = node.change_ext('.pdb')
self.cs_task.outputs.append(out)
if getattr(self, 'install_task', None):
self.pdb_install_task = self.add_install_files(
install_to=self.install_task.install_to, install_from=out)
if csdebug == 'pdbonly':
val = ['/debug+', '/debug:pdbonly']
elif csdebug == 'full':
val = ['/debug+', '/debug:full']
else:
val = ['/debug-']
self.env.append_value('CSFLAGS', val)
@feature('cs')
@after_method('debug_cs')
def doc_cs(self):
"""
The C# targets may create .xml documentation files::
def build(bld):
bld(features='cs', source='My.cs', bintype='library', gen='my.dll', csdoc=True)
# csdoc is a boolean value
"""
csdoc = getattr(self, 'csdoc', self.env.CSDOC)
if not csdoc:
return
node = self.cs_task.outputs[0]
out = node.change_ext('.xml')
self.cs_task.outputs.append(out)
if getattr(self, 'install_task', None):
self.doc_install_task = self.add_install_files(
install_to=self.install_task.install_to, install_from=out)
self.env.append_value('CSFLAGS', '/doc:%s' % out.abspath())
class mcs(Task.Task):
"""
Compile C# files
"""
color = 'YELLOW'
run_str = '${MCS} ${CSTYPE} ${CSFLAGS} ${ASS_ST:ASSEMBLIES} ${RES_ST:RESOURCES} ${OUT} ${SRC}'
def split_argfile(self, cmd):
inline = [cmd[0]]
infile = []
for x in cmd[1:]:
# csc doesn't want /noconfig in @file
if x.lower() == '/noconfig':
inline.append(x)
else:
infile.append(self.quote_flag(x))
return (inline, infile)
def configure(conf):
"""
Find a C# compiler, set the variable MCS for the compiler and CS_NAME (mono or csc)
"""
csc = getattr(Options.options, 'cscbinary', None)
if csc:
conf.env.MCS = csc
conf.find_program(['csc', 'mcs', 'gmcs'], var='MCS')
conf.env.ASS_ST = '/r:%s'
conf.env.RES_ST = '/resource:%s'
conf.env.CS_NAME = 'csc'
if str(conf.env.MCS).lower().find('mcs') > -1:
conf.env.CS_NAME = 'mono'
def options(opt):
"""
Add a command-line option for the configuration::
$ waf configure --with-csc-binary=/foo/bar/mcs
"""
opt.add_option('--with-csc-binary', type='string', dest='cscbinary')
class fake_csshlib(Task.Task):
"""
Task used for reading a foreign .net assembly and adding the dependency on it
"""
color = 'YELLOW'
inst_to = None
def runnable_status(self):
return Task.SKIP_ME
@conf
def read_csshlib(self, name, paths=[]):
"""
Read a foreign .net assembly for the *use* system::
def build(bld):
bld.read_csshlib('ManagedLibrary.dll', paths=[bld.env.mylibrarypath])
bld(features='cs', source='Hi.cs', bintype='exe', gen='hi.exe', use='ManagedLibrary.dll')
:param name: Name of the library
:type name: string
:param paths: Folders in which the library may be found
:type paths: list of string
:return: A task generator having the feature *fake_lib* which will call :py:func:`waflib.Tools.ccroot.process_lib`
:rtype: :py:class:`waflib.TaskGen.task_gen`
"""
return self(name=name, features='fake_lib', lib_paths=paths, lib_type='csshlib')
| bsd-3-clause |
dhruve/spark | examples/src/main/python/ml/string_indexer_example.py | 123 | 1402 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
# $example on$
from pyspark.ml.feature import StringIndexer
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("StringIndexerExample")\
.getOrCreate()
# $example on$
df = spark.createDataFrame(
[(0, "a"), (1, "b"), (2, "c"), (3, "a"), (4, "a"), (5, "c")],
["id", "category"])
indexer = StringIndexer(inputCol="category", outputCol="categoryIndex")
indexed = indexer.fit(df).transform(df)
indexed.show()
# $example off$
spark.stop()
| apache-2.0 |
piyushroshan/xen-4.3 | tools/python/logging/logging-0.4.9.2/logging/handlers.py | 42 | 28606 | # Copyright 2001-2004 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Additional handlers for the logging package for Python. The core package is
based on PEP 282 and comments thereto in comp.lang.python, and influenced by
Apache's log4j system.
Should work under Python versions >= 1.5.2, except that source line
information is not available unless 'sys._getframe()' is.
Copyright (C) 2001-2004 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging' and log away!
"""
import sys, logging, socket, types, os, string, cPickle, struct, time
from SocketServer import ThreadingTCPServer, StreamRequestHandler
#
# Some constants...
#
DEFAULT_TCP_LOGGING_PORT = 9020
DEFAULT_UDP_LOGGING_PORT = 9021
DEFAULT_HTTP_LOGGING_PORT = 9022
DEFAULT_SOAP_LOGGING_PORT = 9023
SYSLOG_UDP_PORT = 514
class RotatingFileHandler(logging.FileHandler):
def __init__(self, filename, mode="a", maxBytes=0, backupCount=0):
"""
Open the specified file and use it as the stream for logging.
By default, the file grows indefinitely. You can specify particular
values of maxBytes and backupCount to allow the file to rollover at
a predetermined size.
Rollover occurs whenever the current log file is nearly maxBytes in
length. If backupCount is >= 1, the system will successively create
new files with the same pathname as the base file, but with extensions
".1", ".2" etc. appended to it. For example, with a backupCount of 5
and a base file name of "app.log", you would get "app.log",
"app.log.1", "app.log.2", ... through to "app.log.5". The file being
written to is always "app.log" - when it gets filled up, it is closed
and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
exist, then they are renamed to "app.log.2", "app.log.3" etc.
respectively.
If maxBytes is zero, rollover never occurs.
"""
logging.FileHandler.__init__(self, filename, mode)
self.maxBytes = maxBytes
self.backupCount = backupCount
if maxBytes > 0:
self.mode = "a"
def doRollover(self):
"""
Do a rollover, as described in __init__().
"""
self.stream.close()
if self.backupCount > 0:
for i in range(self.backupCount - 1, 0, -1):
sfn = "%s.%d" % (self.baseFilename, i)
dfn = "%s.%d" % (self.baseFilename, i + 1)
if os.path.exists(sfn):
#print "%s -> %s" % (sfn, dfn)
if os.path.exists(dfn):
os.remove(dfn)
os.rename(sfn, dfn)
dfn = self.baseFilename + ".1"
if os.path.exists(dfn):
os.remove(dfn)
os.rename(self.baseFilename, dfn)
#print "%s -> %s" % (self.baseFilename, dfn)
self.stream = open(self.baseFilename, "w")
def emit(self, record):
"""
Emit a record.
Output the record to the file, catering for rollover as described
in doRollover().
"""
if self.maxBytes > 0: # are we rolling over?
msg = "%s\n" % self.format(record)
self.stream.seek(0, 2) #due to non-posix-compliant Windows feature
if self.stream.tell() + len(msg) >= self.maxBytes:
self.doRollover()
logging.FileHandler.emit(self, record)
class SocketHandler(logging.Handler):
"""
A handler class which writes logging records, in pickle format, to
a streaming socket. The socket is kept open across logging calls.
If the peer resets it, an attempt is made to reconnect on the next call.
The pickle which is sent is that of the LogRecord's attribute dictionary
(__dict__), so that the receiver does not need to have the logging module
installed in order to process the logging event.
To unpickle the record at the receiving end into a LogRecord, use the
makeLogRecord function.
"""
def __init__(self, host, port):
"""
Initializes the handler with a specific host address and port.
The attribute 'closeOnError' is set to 1 - which means that if
a socket error occurs, the socket is silently closed and then
reopened on the next logging call.
"""
logging.Handler.__init__(self)
self.host = host
self.port = port
self.sock = None
self.closeOnError = 0
self.retryTime = None
#
# Exponential backoff parameters.
#
self.retryStart = 1.0
self.retryMax = 30.0
self.retryFactor = 2.0
def makeSocket(self):
"""
A factory method which allows subclasses to define the precise
type of socket they want.
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.host, self.port))
return s
def createSocket(self):
"""
Try to create a socket, using an exponential backoff with
a max retry time. Thanks to Robert Olson for the original patch
(SF #815911) which has been slightly refactored.
"""
now = time.time()
# Either retryTime is None, in which case this
# is the first time back after a disconnect, or
# we've waited long enough.
if self.retryTime is None:
attempt = 1
else:
attempt = (now >= self.retryTime)
if attempt:
try:
self.sock = self.makeSocket()
self.retryTime = None # next time, no delay before trying
except:
#Creation failed, so set the retry time and return.
if self.retryTime is None:
self.retryPeriod = self.retryStart
else:
self.retryPeriod = self.retryPeriod * self.retryFactor
if self.retryPeriod > self.retryMax:
self.retryPeriod = self.retryMax
self.retryTime = now + self.retryPeriod
def send(self, s):
"""
Send a pickled string to the socket.
This function allows for partial sends which can happen when the
network is busy.
"""
if self.sock is None:
self.createSocket()
#self.sock can be None either because we haven't reached the retry
#time yet, or because we have reached the retry time and retried,
#but are still unable to connect.
if self.sock:
try:
if hasattr(self.sock, "sendall"):
self.sock.sendall(s)
else:
sentsofar = 0
left = len(s)
while left > 0:
sent = self.sock.send(s[sentsofar:])
sentsofar = sentsofar + sent
left = left - sent
except socket.error:
self.sock.close()
self.sock = None # so we can call createSocket next time
def makePickle(self, record):
"""
Pickles the record in binary format with a length prefix, and
returns it ready for transmission across the socket.
"""
ei = record.exc_info
if ei:
dummy = self.format(record) # just to get traceback text into record.exc_text
record.exc_info = None # to avoid Unpickleable error
s = cPickle.dumps(record.__dict__, 1)
if ei:
record.exc_info = ei # for next handler
slen = struct.pack(">L", len(s))
return slen + s
def handleError(self, record):
"""
Handle an error during logging.
An error has occurred during logging. Most likely cause -
connection lost. Close the socket so that we can retry on the
next event.
"""
if self.closeOnError and self.sock:
self.sock.close()
self.sock = None #try to reconnect next time
else:
logging.Handler.handleError(self, record)
def emit(self, record):
"""
Emit a record.
Pickles the record and writes it to the socket in binary format.
If there is an error with the socket, silently drop the packet.
If there was a problem with the socket, re-establishes the
socket.
"""
try:
s = self.makePickle(record)
self.send(s)
except:
self.handleError(record)
def close(self):
"""
Closes the socket.
"""
if self.sock:
self.sock.close()
self.sock = None
logging.Handler.close(self)
class DatagramHandler(SocketHandler):
"""
A handler class which writes logging records, in pickle format, to
a datagram socket. The pickle which is sent is that of the LogRecord's
attribute dictionary (__dict__), so that the receiver does not need to
have the logging module installed in order to process the logging event.
To unpickle the record at the receiving end into a LogRecord, use the
makeLogRecord function.
"""
def __init__(self, host, port):
"""
Initializes the handler with a specific host address and port.
"""
SocketHandler.__init__(self, host, port)
self.closeOnError = 0
def makeSocket(self):
"""
The factory method of SocketHandler is here overridden to create
a UDP socket (SOCK_DGRAM).
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return s
def send(self, s):
"""
Send a pickled string to a socket.
This function no longer allows for partial sends which can happen
when the network is busy - UDP does not guarantee delivery and
can deliver packets out of sequence.
"""
self.sock.sendto(s, (self.host, self.port))
class SysLogHandler(logging.Handler):
"""
A handler class which sends formatted logging records to a syslog
server. Based on Sam Rushing's syslog module:
http://www.nightmare.com/squirl/python-ext/misc/syslog.py
Contributed by Nicolas Untz (after which minor refactoring changes
have been made).
"""
# from <linux/sys/syslog.h>:
# ======================================================================
# priorities/facilities are encoded into a single 32-bit quantity, where
# the bottom 3 bits are the priority (0-7) and the top 28 bits are the
# facility (0-big number). Both the priorities and the facilities map
# roughly one-to-one to strings in the syslogd(8) source code. This
# mapping is included in this file.
#
# priorities (these are ordered)
LOG_EMERG = 0 # system is unusable
LOG_ALERT = 1 # action must be taken immediately
LOG_CRIT = 2 # critical conditions
LOG_ERR = 3 # error conditions
LOG_WARNING = 4 # warning conditions
LOG_NOTICE = 5 # normal but significant condition
LOG_INFO = 6 # informational
LOG_DEBUG = 7 # debug-level messages
# facility codes
LOG_KERN = 0 # kernel messages
LOG_USER = 1 # random user-level messages
LOG_MAIL = 2 # mail system
LOG_DAEMON = 3 # system daemons
LOG_AUTH = 4 # security/authorization messages
LOG_SYSLOG = 5 # messages generated internally by syslogd
LOG_LPR = 6 # line printer subsystem
LOG_NEWS = 7 # network news subsystem
LOG_UUCP = 8 # UUCP subsystem
LOG_CRON = 9 # clock daemon
LOG_AUTHPRIV = 10 # security/authorization messages (private)
# other codes through 15 reserved for system use
LOG_LOCAL0 = 16 # reserved for local use
LOG_LOCAL1 = 17 # reserved for local use
LOG_LOCAL2 = 18 # reserved for local use
LOG_LOCAL3 = 19 # reserved for local use
LOG_LOCAL4 = 20 # reserved for local use
LOG_LOCAL5 = 21 # reserved for local use
LOG_LOCAL6 = 22 # reserved for local use
LOG_LOCAL7 = 23 # reserved for local use
priority_names = {
"alert": LOG_ALERT,
"crit": LOG_CRIT,
"critical": LOG_CRIT,
"debug": LOG_DEBUG,
"emerg": LOG_EMERG,
"err": LOG_ERR,
"error": LOG_ERR, # DEPRECATED
"info": LOG_INFO,
"notice": LOG_NOTICE,
"panic": LOG_EMERG, # DEPRECATED
"warn": LOG_WARNING, # DEPRECATED
"warning": LOG_WARNING,
}
facility_names = {
"auth": LOG_AUTH,
"authpriv": LOG_AUTHPRIV,
"cron": LOG_CRON,
"daemon": LOG_DAEMON,
"kern": LOG_KERN,
"lpr": LOG_LPR,
"mail": LOG_MAIL,
"news": LOG_NEWS,
"security": LOG_AUTH, # DEPRECATED
"syslog": LOG_SYSLOG,
"user": LOG_USER,
"uucp": LOG_UUCP,
"local0": LOG_LOCAL0,
"local1": LOG_LOCAL1,
"local2": LOG_LOCAL2,
"local3": LOG_LOCAL3,
"local4": LOG_LOCAL4,
"local5": LOG_LOCAL5,
"local6": LOG_LOCAL6,
"local7": LOG_LOCAL7,
}
def __init__(self, address=('localhost', SYSLOG_UDP_PORT), facility=LOG_USER):
"""
Initialize a handler.
If address is specified as a string, UNIX socket is used.
If facility is not specified, LOG_USER is used.
"""
logging.Handler.__init__(self)
self.address = address
self.facility = facility
if type(address) == types.StringType:
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
# syslog may require either DGRAM or STREAM sockets
try:
self.socket.connect(address)
except socket.error:
self.socket.close()
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.socket.connect(address)
self.unixsocket = 1
else:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.unixsocket = 0
self.formatter = None
# curious: when talking to the unix-domain '/dev/log' socket, a
# zero-terminator seems to be required. this string is placed
# into a class variable so that it can be overridden if
# necessary.
log_format_string = '<%d>%s\000'
def encodePriority (self, facility, priority):
"""
Encode the facility and priority. You can pass in strings or
integers - if strings are passed, the facility_names and
priority_names mapping dictionaries are used to convert them to
integers.
"""
if type(facility) == types.StringType:
facility = self.facility_names[facility]
if type(priority) == types.StringType:
priority = self.priority_names[priority]
return (facility << 3) | priority
def close (self):
"""
Closes the socket.
"""
if self.unixsocket:
self.socket.close()
logging.Handler.close(self)
def emit(self, record):
"""
Emit a record.
The record is formatted, and then sent to the syslog server. If
exception information is present, it is NOT sent to the server.
"""
msg = self.format(record)
"""
We need to convert record level to lowercase, maybe this will
change in the future.
"""
msg = self.log_format_string % (
self.encodePriority(self.facility,
string.lower(record.levelname)),
msg)
try:
if self.unixsocket:
self.socket.send(msg)
else:
self.socket.sendto(msg, self.address)
except:
self.handleError(record)
class SMTPHandler(logging.Handler):
"""
A handler class which sends an SMTP email for each logging event.
"""
def __init__(self, mailhost, fromaddr, toaddrs, subject):
"""
Initialize the handler.
Initialize the instance with the from and to addresses and subject
line of the email. To specify a non-standard SMTP port, use the
(host, port) tuple format for the mailhost argument.
"""
logging.Handler.__init__(self)
if type(mailhost) == types.TupleType:
host, port = mailhost
self.mailhost = host
self.mailport = port
else:
self.mailhost = mailhost
self.mailport = None
self.fromaddr = fromaddr
if type(toaddrs) == types.StringType:
toaddrs = [toaddrs]
self.toaddrs = toaddrs
self.subject = subject
def getSubject(self, record):
"""
Determine the subject for the email.
If you want to specify a subject line which is record-dependent,
override this method.
"""
return self.subject
weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
monthname = [None,
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
def date_time(self):
"""Return the current date and time formatted for a MIME header."""
year, month, day, hh, mm, ss, wd, y, z = time.gmtime(time.time())
s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
self.weekdayname[wd],
day, self.monthname[month], year,
hh, mm, ss)
return s
def emit(self, record):
"""
Emit a record.
Format the record and send it to the specified addressees.
"""
try:
import smtplib
port = self.mailport
if not port:
port = smtplib.SMTP_PORT
smtp = smtplib.SMTP(self.mailhost, port)
msg = self.format(record)
msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % (
self.fromaddr,
string.join(self.toaddrs, ","),
self.getSubject(record),
self.date_time(), msg)
smtp.sendmail(self.fromaddr, self.toaddrs, msg)
smtp.quit()
except:
self.handleError(record)
class NTEventLogHandler(logging.Handler):
"""
A handler class which sends events to the NT Event Log. Adds a
registry entry for the specified application name. If no dllname is
provided, win32service.pyd (which contains some basic message
placeholders) is used. Note that use of these placeholders will make
your event logs big, as the entire message source is held in the log.
If you want slimmer logs, you have to pass in the name of your own DLL
which contains the message definitions you want to use in the event log.
"""
def __init__(self, appname, dllname=None, logtype="Application"):
logging.Handler.__init__(self)
try:
import win32evtlogutil, win32evtlog
self.appname = appname
self._welu = win32evtlogutil
if not dllname:
dllname = os.path.split(self._welu.__file__)
dllname = os.path.split(dllname[0])
dllname = os.path.join(dllname[0], r'win32service.pyd')
self.dllname = dllname
self.logtype = logtype
self._welu.AddSourceToRegistry(appname, dllname, logtype)
self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE
self.typemap = {
logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE,
logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE,
logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE,
logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE,
logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE,
}
except ImportError:
print "The Python Win32 extensions for NT (service, event "\
"logging) appear not to be available."
self._welu = None
def getMessageID(self, record):
"""
Return the message ID for the event record. If you are using your
own messages, you could do this by having the msg passed to the
logger being an ID rather than a formatting string. Then, in here,
you could use a dictionary lookup to get the message ID. This
version returns 1, which is the base message ID in win32service.pyd.
"""
return 1
def getEventCategory(self, record):
"""
Return the event category for the record.
Override this if you want to specify your own categories. This version
returns 0.
"""
return 0
def getEventType(self, record):
"""
Return the event type for the record.
Override this if you want to specify your own types. This version does
a mapping using the handler's typemap attribute, which is set up in
__init__() to a dictionary which contains mappings for DEBUG, INFO,
WARNING, ERROR and CRITICAL. If you are using your own levels you will
either need to override this method or place a suitable dictionary in
the handler's typemap attribute.
"""
return self.typemap.get(record.levelno, self.deftype)
def emit(self, record):
"""
Emit a record.
Determine the message ID, event category and event type. Then
log the message in the NT event log.
"""
if self._welu:
try:
id = self.getMessageID(record)
cat = self.getEventCategory(record)
type = self.getEventType(record)
msg = self.format(record)
self._welu.ReportEvent(self.appname, id, cat, type, [msg])
except:
self.handleError(record)
def close(self):
"""
Clean up this handler.
You can remove the application name from the registry as a
source of event log entries. However, if you do this, you will
not be able to see the events as you intended in the Event Log
Viewer - it needs to be able to access the registry to get the
DLL name.
"""
#self._welu.RemoveSourceFromRegistry(self.appname, self.logtype)
logging.Handler.close(self)
class HTTPHandler(logging.Handler):
"""
A class which sends records to a Web server, using either GET or
POST semantics.
"""
def __init__(self, host, url, method="GET"):
"""
Initialize the instance with the host, the request URL, and the method
("GET" or "POST")
"""
logging.Handler.__init__(self)
method = string.upper(method)
if method not in ["GET", "POST"]:
raise ValueError, "method must be GET or POST"
self.host = host
self.url = url
self.method = method
def mapLogRecord(self, record):
"""
Default implementation of mapping the log record into a dict
that is sent as the CGI data. Overwrite in your class.
Contributed by Franz Glasner.
"""
return record.__dict__
def emit(self, record):
"""
Emit a record.
Send the record to the Web server as an URL-encoded dictionary
"""
try:
import httplib, urllib
h = httplib.HTTP(self.host)
url = self.url
data = urllib.urlencode(self.mapLogRecord(record))
if self.method == "GET":
if (string.find(url, '?') >= 0):
sep = '&'
else:
sep = '?'
url = url + "%c%s" % (sep, data)
h.putrequest(self.method, url)
if self.method == "POST":
h.putheader("Content-length", str(len(data)))
h.endheaders()
if self.method == "POST":
h.send(data)
h.getreply() #can't do anything with the result
except:
self.handleError(record)
class BufferingHandler(logging.Handler):
"""
A handler class which buffers logging records in memory. Whenever each
record is added to the buffer, a check is made to see if the buffer should
be flushed. If it should, then flush() is expected to do what's needed.
"""
def __init__(self, capacity):
"""
Initialize the handler with the buffer size.
"""
logging.Handler.__init__(self)
self.capacity = capacity
self.buffer = []
def shouldFlush(self, record):
"""
Should the handler flush its buffer?
Returns true if the buffer is up to capacity. This method can be
overridden to implement custom flushing strategies.
"""
return (len(self.buffer) >= self.capacity)
def emit(self, record):
"""
Emit a record.
Append the record. If shouldFlush() tells us to, call flush() to process
the buffer.
"""
self.buffer.append(record)
if self.shouldFlush(record):
self.flush()
def flush(self):
"""
Override to implement custom flushing behaviour.
This version just zaps the buffer to empty.
"""
self.buffer = []
def close(self):
"""
Close the handler.
This version just flushes and chains to the parent class' close().
"""
self.flush()
logging.Handler.close(self)
class MemoryHandler(BufferingHandler):
"""
A handler class which buffers logging records in memory, periodically
flushing them to a target handler. Flushing occurs whenever the buffer
is full, or when an event of a certain severity or greater is seen.
"""
def __init__(self, capacity, flushLevel=logging.ERROR, target=None):
"""
Initialize the handler with the buffer size, the level at which
flushing should occur and an optional target.
Note that without a target being set either here or via setTarget(),
a MemoryHandler is no use to anyone!
"""
BufferingHandler.__init__(self, capacity)
self.flushLevel = flushLevel
self.target = target
def shouldFlush(self, record):
"""
Check for buffer full or a record at the flushLevel or higher.
"""
return (len(self.buffer) >= self.capacity) or \
(record.levelno >= self.flushLevel)
def setTarget(self, target):
"""
Set the target handler for this handler.
"""
self.target = target
def flush(self):
"""
For a MemoryHandler, flushing means just sending the buffered
records to the target, if there is one. Override if you want
different behaviour.
"""
if self.target:
for record in self.buffer:
self.target.handle(record)
self.buffer = []
def close(self):
"""
Flush, set the target to None and lose the buffer.
"""
self.flush()
self.target = None
BufferingHandler.close(self)
| gpl-2.0 |
raags/ansible-modules-core | windows/win_ping.py | 208 | 1376 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <[email protected]>, and others
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
DOCUMENTATION = '''
---
module: win_ping
version_added: "1.7"
short_description: A windows version of the classic ping module.
description:
- Checks management connectivity of a windows host
options:
data:
description:
- Alternate data to return instead of 'pong'
required: false
default: 'pong'
aliases: []
author: "Chris Church (@cchurch)"
'''
EXAMPLES = '''
# Test connectivity to a windows host
ansible winserver -m win_ping
# Example from an Ansible Playbook
- action: win_ping
'''
| gpl-3.0 |
lurch/python-gpiozero | gpiozero/pins/pigpiod.py | 1 | 9761 | from __future__ import (
unicode_literals,
absolute_import,
print_function,
division,
)
str = type('')
import warnings
import pigpio
import os
from . import Pin
from .data import pi_info
from ..exc import (
PinInvalidFunction,
PinSetInput,
PinFixedPull,
PinInvalidPull,
PinInvalidBounce,
PinInvalidState,
PinNonPhysical,
PinNoPins,
)
class PiGPIOPin(Pin):
"""
Uses the `pigpio`_ library to interface to the Pi's GPIO pins. The pigpio
library relies on a daemon (``pigpiod``) to be running as root to provide
access to the GPIO pins, and communicates with this daemon over a network
socket.
While this does mean only the daemon itself should control the pins, the
architecture does have several advantages:
* Pins can be remote controlled from another machine (the other
machine doesn't even have to be a Raspberry Pi; it simply needs the
`pigpio`_ client library installed on it)
* The daemon supports hardware PWM via the DMA controller
* Your script itself doesn't require root privileges; it just needs to
be able to communicate with the daemon
You can construct pigpiod pins manually like so::
from gpiozero.pins.pigpiod import PiGPIOPin
from gpiozero import LED
led = LED(PiGPIOPin(12))
This is particularly useful for controlling pins on a remote machine. To
accomplish this simply specify the host (and optionally port) when
constructing the pin::
from gpiozero.pins.pigpiod import PiGPIOPin
from gpiozero import LED
from signal import pause
led = LED(PiGPIOPin(12, host='192.168.0.2'))
.. note::
In some circumstances, especially when playing with PWM, it does appear
to be possible to get the daemon into "unusual" states. We would be
most interested to hear any bug reports relating to this (it may be a
bug in our pin implementation). A workaround for now is simply to
restart the ``pigpiod`` daemon.
.. _pigpio: http://abyz.co.uk/rpi/pigpio/
"""
_CONNECTIONS = {} # maps (host, port) to (connection, pi_info)
_PINS = {}
GPIO_FUNCTIONS = {
'input': pigpio.INPUT,
'output': pigpio.OUTPUT,
'alt0': pigpio.ALT0,
'alt1': pigpio.ALT1,
'alt2': pigpio.ALT2,
'alt3': pigpio.ALT3,
'alt4': pigpio.ALT4,
'alt5': pigpio.ALT5,
}
GPIO_PULL_UPS = {
'up': pigpio.PUD_UP,
'down': pigpio.PUD_DOWN,
'floating': pigpio.PUD_OFF,
}
GPIO_EDGES = {
'both': pigpio.EITHER_EDGE,
'rising': pigpio.RISING_EDGE,
'falling': pigpio.FALLING_EDGE,
}
GPIO_FUNCTION_NAMES = {v: k for (k, v) in GPIO_FUNCTIONS.items()}
GPIO_PULL_UP_NAMES = {v: k for (k, v) in GPIO_PULL_UPS.items()}
GPIO_EDGES_NAMES = {v: k for (k, v) in GPIO_EDGES.items()}
def __new__(
cls, number, host=os.getenv('PIGPIO_ADDR', 'localhost'),
port=int(os.getenv('PIGPIO_PORT', 8888))):
try:
return cls._PINS[(host, port, number)]
except KeyError:
self = super(PiGPIOPin, cls).__new__(cls)
cls.pi_info(host, port) # implicitly creates connection
self._connection, self._pi_info = cls._CONNECTIONS[(host, port)]
try:
self._pi_info.physical_pin('GPIO%d' % number)
except PinNoPins:
warnings.warn(
PinNonPhysical(
'no physical pins exist for GPIO%d' % number))
self._host = host
self._port = port
self._number = number
self._pull = 'up' if self._pi_info.pulled_up('GPIO%d' % number) else 'floating'
self._pwm = False
self._bounce = None
self._when_changed = None
self._callback = None
self._edges = pigpio.EITHER_EDGE
try:
self._connection.set_mode(self._number, pigpio.INPUT)
except pigpio.error as e:
raise ValueError(e)
self._connection.set_pull_up_down(self._number, self.GPIO_PULL_UPS[self._pull])
self._connection.set_glitch_filter(self._number, 0)
cls._PINS[(host, port, number)] = self
return self
def __repr__(self):
if self._host == 'localhost':
return "GPIO%d" % self._number
else:
return "GPIO%d on %s:%d" % (self._number, self._host, self._port)
@property
def host(self):
return self._host
@property
def port(self):
return self._port
@property
def number(self):
return self._number
def close(self):
# If we're shutting down, the connection may have disconnected itself
# already. Unfortunately, the connection's "connected" property is
# rather buggy - disconnecting doesn't set it to False! So we're
# naughty and check an internal variable instead...
if self._connection.sl.s is not None:
self.frequency = None
self.when_changed = None
self.function = 'input'
self.pull = 'up' if self._pi_info.pulled_up('GPIO%d' % self.number) else 'floating'
def _get_function(self):
return self.GPIO_FUNCTION_NAMES[self._connection.get_mode(self._number)]
def _set_function(self, value):
if value != 'input':
self._pull = 'floating'
try:
self._connection.set_mode(self._number, self.GPIO_FUNCTIONS[value])
except KeyError:
raise PinInvalidFunction('invalid function "%s" for pin %r' % (value, self))
def _get_state(self):
if self._pwm:
return (
self._connection.get_PWM_dutycycle(self._number) /
self._connection.get_PWM_range(self._number)
)
else:
return bool(self._connection.read(self._number))
def _set_state(self, value):
if self._pwm:
try:
value = int(value * self._connection.get_PWM_range(self._number))
if value != self._connection.get_PWM_dutycycle(self._number):
self._connection.set_PWM_dutycycle(self._number, value)
except pigpio.error:
raise PinInvalidState('invalid state "%s" for pin %r' % (value, self))
elif self.function == 'input':
raise PinSetInput('cannot set state of pin %r' % self)
else:
# write forces pin to OUTPUT, hence the check above
self._connection.write(self._number, bool(value))
def _get_pull(self):
return self._pull
def _set_pull(self, value):
if self.function != 'input':
raise PinFixedPull('cannot set pull on non-input pin %r' % self)
if value != 'up' and self._pi_info.pulled_up('GPIO%d' % self._number):
raise PinFixedPull('%r has a physical pull-up resistor' % self)
try:
self._connection.set_pull_up_down(self._number, self.GPIO_PULL_UPS[value])
self._pull = value
except KeyError:
raise PinInvalidPull('invalid pull "%s" for pin %r' % (value, self))
def _get_frequency(self):
if self._pwm:
return self._connection.get_PWM_frequency(self._number)
return None
def _set_frequency(self, value):
if not self._pwm and value is not None:
self._connection.set_PWM_frequency(self._number, value)
self._connection.set_PWM_range(self._number, 10000)
self._connection.set_PWM_dutycycle(self._number, 0)
self._pwm = True
elif self._pwm and value is not None:
if value != self._connection.get_PWM_frequency(self._number):
self._connection.set_PWM_frequency(self._number, value)
self._connection.set_PWM_range(self._number, 10000)
elif self._pwm and value is None:
self._connection.write(self._number, 0)
self._pwm = False
def _get_bounce(self):
return None if not self._bounce else self._bounce / 1000000
def _set_bounce(self, value):
if value is None:
value = 0
elif value < 0:
raise PinInvalidBounce('bounce must be 0 or greater')
self._connection.set_glitch_filter(self._number, int(value * 1000000))
def _get_edges(self):
return self.GPIO_EDGES_NAMES[self._edges]
def _set_edges(self, value):
f = self.when_changed
self.when_changed = None
try:
self._edges = self.GPIO_EDGES[value]
finally:
self.when_changed = f
def _get_when_changed(self):
if self._callback is None:
return None
return self._callback.callb.func
def _set_when_changed(self, value):
if self._callback is not None:
self._callback.cancel()
self._callback = None
if value is not None:
self._callback = self._connection.callback(
self._number, self._edges,
lambda gpio, level, tick: value())
@classmethod
def pi_info(
cls, host=os.getenv('PIGPIO_ADDR', 'localhost'),
port=int(os.getenv('PIGPIO_PORT', 8888))):
try:
connection, info = cls._CONNECTIONS[(host, port)]
except KeyError:
connection = pigpio.pi(host, port)
revision = '%04x' % connection.get_hardware_revision()
info = pi_info(revision)
cls._CONNECTIONS[(host, port)] = (connection, info)
return info
| bsd-3-clause |
moehle/cvxpy_codegen | cvxpy_codegen/atoms/kron.py | 1 | 1098 | """
Copyright 2017 Nicholas Moehle
This file is part of CVXPY-CODEGEN.
CVXPY-CODEGEN is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CVXPY-CODEGEN is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CVXPY-CODEGEN. If not, see <http://www.gnu.org/licenses/>.
"""
from cvxpy_codegen.param.expr_data import AtomData
import scipy.sparse as sp
def getdata_kron(expr, arg_data):
return [AtomData(expr, arg_data,
macro_name = "kron",
sparsity = sp.kron(arg_data[0].sparsity, arg_data[1].sparsity),
work_int = arg_data[0].sparsity.shape[1],
work_float = arg_data[0].sparsity.shape[1])]
| gpl-3.0 |
woobe/h2o | py/testdir_single_jvm/test_GLM2_tweedie.py | 2 | 2449 | import unittest, time, sys
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_glm, h2o_hosts, h2o_import as h2i, h2o_util
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global localhost
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(1)
else:
h2o_hosts.build_cloud_with_hosts(1)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_GLM2_tweedie(self):
h2o.beta_features = True
csvFilename = "AutoClaim.csv"
csvPathname = 'standard/' + csvFilename
print "\nStarting", csvPathname
parseResult = h2i.import_parse(bucket='home-0xdiag-datasets', path=csvPathname, schema='put')
# columns start at 0
# regress: glm(CLM_AMT ~ CAR_USE + REVOLKED + GENDER + AREA + MARRIED + CAR_TYPE, data=AutoClaim, family=tweedie(1.34))
coefs = [7, 13, 20, 27, 21, 11]
y = "4"
ignored_cols = h2o_cmd.createIgnoredCols(key=parseResult['destination_key'], cols=coefs, response=y)
# sapply(c('CLM_AMT', 'CAR_USE', 'REVOLKED', 'GENDER', 'AREA', 'MARRIED', 'CAR_TYPE'), function(x) which(x==colnames(AutoClaim)) - 1)
kwargs = {
'family': 'tweedie',
'tweedie_variance_power': 1.36,
'response': y,
'ignored_cols' : ignored_cols,
'max_iter': 10,
'lambda': 0,
'alpha': 0,
'n_folds': 0,
'beta_epsilon': 1e-4,
}
glm = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=15, **kwargs)
coefficientsExpected = {'Intercept': 0, 'GENDER.M': 0.0014842488782470984, 'CAR_TYPE.Sports Car': 0.07786742314454961, 'MARRIED.Yes': 0.0007748552195851079, 'CAR_TYPE.SUV': 0.07267702940249621, 'CAR_TYPE.Pickup': 0.04952083408742968, 'CAR_TYPE.Van': 0.026422137690691405, 'CAR_TYPE.Sedan': 0.05128350794060489, 'CAR_USE.Private': -0.03050194832853935, 'REVOLKED.Yes': -0.05095942737408699}
deltaExpected = 0.05
(warnings, coefficients, intercept) = h2o_glm.simpleCheckGLM(self, glm, None,
coefficientsExpected=coefficientsExpected, deltaExpected=deltaExpected, **kwargs)
print 'coefficients: %s' % (str(coefficients))
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
dfalt974/SickRage | lib/github/tests/Issue.py | 7 | 9092 | # -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <[email protected]> #
# Copyright 2012 Zearin <[email protected]> #
# Copyright 2013 Stuart Glaser <[email protected]> #
# Copyright 2013 Vincent Jacques <[email protected]> #
# #
# This file is part of PyGithub. #
# http://pygithub.github.io/PyGithub/v1/index.html #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import Framework
import datetime
class Issue(Framework.TestCase):
def setUp(self):
Framework.TestCase.setUp(self)
self.repo = self.g.get_user().get_repo("PyGithub")
self.issue = self.repo.get_issue(28)
def testAttributes(self):
self.assertEqual(self.issue.assignee.login, "jacquev6")
self.assertListKeyEqual(self.issue.assignees, lambda a: a.login, ["jacquev6", "stuglaser"])
self.assertEqual(self.issue.body, "Body edited by PyGithub")
self.assertEqual(self.issue.closed_at, datetime.datetime(2012, 5, 26, 14, 59, 33))
self.assertEqual(self.issue.closed_by.login, "jacquev6")
self.assertEqual(self.issue.comments, 0)
self.assertEqual(self.issue.created_at, datetime.datetime(2012, 5, 19, 10, 38, 23))
self.assertEqual(self.issue.html_url, "https://github.com/jacquev6/PyGithub/issues/28")
self.assertEqual(self.issue.id, 4653757)
self.assertListKeyEqual(self.issue.labels, lambda l: l.name, ["Bug", "Project management", "Question"])
self.assertEqual(self.issue.milestone.title, "Version 0.4")
self.assertEqual(self.issue.number, 28)
self.assertEqual(self.issue.pull_request.diff_url, None)
self.assertEqual(self.issue.pull_request.patch_url, None)
self.assertEqual(self.issue.pull_request.html_url, None)
self.assertEqual(self.issue.state, "closed")
self.assertEqual(self.issue.title, "Issue created by PyGithub")
self.assertEqual(self.issue.updated_at, datetime.datetime(2012, 5, 26, 14, 59, 33))
self.assertEqual(self.issue.url, "https://api.github.com/repos/jacquev6/PyGithub/issues/28")
self.assertEqual(self.issue.user.login, "jacquev6")
self.assertEqual(self.issue.repository.name, "PyGithub")
# test __repr__() based on this attributes
self.assertEqual(self.issue.__repr__(), 'Issue(title="Issue created by PyGithub", number=28)')
def testEditWithoutParameters(self):
self.issue.edit()
def testEditWithAllParameters(self):
user = self.g.get_user("jacquev6")
milestone = self.repo.get_milestone(2)
self.issue.edit("Title edited by PyGithub", "Body edited by PyGithub", user, "open", milestone, ["Bug"], ["jacquev6", "stuglaser"])
self.assertEqual(self.issue.assignee.login, "jacquev6")
self.assertListKeyEqual(self.issue.assignees, lambda a: a.login, ["jacquev6", "stuglaser"])
self.assertEqual(self.issue.body, "Body edited by PyGithub")
self.assertEqual(self.issue.state, "open")
self.assertEqual(self.issue.title, "Title edited by PyGithub")
self.assertListKeyEqual(self.issue.labels, lambda l: l.name, ["Bug"])
def testEditResetMilestone(self):
self.assertEqual(self.issue.milestone.title, "Version 0.4")
self.issue.edit(milestone=None)
self.assertEqual(self.issue.milestone, None)
def testEditResetAssignee(self):
self.assertEqual(self.issue.assignee.login, "jacquev6")
self.issue.edit(assignee=None)
self.assertEqual(self.issue.assignee, None)
def testCreateComment(self):
comment = self.issue.create_comment("Comment created by PyGithub")
self.assertEqual(comment.id, 5808311)
def testGetComments(self):
self.assertListKeyEqual(self.issue.get_comments(), lambda c: c.user.login, ["jacquev6", "roskakori"])
def testGetCommentsSince(self):
self.assertListKeyEqual(self.issue.get_comments(datetime.datetime(2012, 5, 26, 13, 59, 33)), lambda c: c.user.login, ["jacquev6", "roskakori"])
def testGetEvents(self):
self.assertListKeyEqual(self.issue.get_events(), lambda e: e.id, [15819975, 15820048])
def testGetLabels(self):
self.assertListKeyEqual(self.issue.get_labels(), lambda l: l.name, ["Bug", "Project management", "Question"])
def testAddAndRemoveAssignees(self):
user1 = "jayfk"
user2 = self.g.get_user("jzelinskie")
self.assertListKeyEqual(self.issue.assignees, lambda a: a.login, ["jacquev6", "stuglaser"])
self.issue.add_to_assignees(user1, user2)
self.assertListKeyEqual(self.issue.assignees, lambda a: a.login, ["jacquev6", "stuglaser", "jayfk", "jzelinskie"])
self.issue.remove_from_assignees(user1, user2)
self.assertListKeyEqual(self.issue.assignees, lambda a: a.login, ["jacquev6", "stuglaser"])
def testAddAndRemoveLabels(self):
bug = self.repo.get_label("Bug")
question = self.repo.get_label("Question")
self.assertListKeyEqual(self.issue.get_labels(), lambda l: l.name, ["Bug", "Project management", "Question"])
self.issue.remove_from_labels(bug)
self.assertListKeyEqual(self.issue.get_labels(), lambda l: l.name, ["Project management", "Question"])
self.issue.remove_from_labels(question)
self.assertListKeyEqual(self.issue.get_labels(), lambda l: l.name, ["Project management"])
self.issue.add_to_labels(bug, question)
self.assertListKeyEqual(self.issue.get_labels(), lambda l: l.name, ["Bug", "Project management", "Question"])
def testAddAndRemoveLabelsWithStringArguments(self):
bug = "Bug"
question = "Question"
self.assertListKeyEqual(self.issue.get_labels(), lambda l: l.name, ["Bug", "Project management", "Question"])
self.issue.remove_from_labels(bug)
self.assertListKeyEqual(self.issue.get_labels(), lambda l: l.name, ["Project management", "Question"])
self.issue.remove_from_labels(question)
self.assertListKeyEqual(self.issue.get_labels(), lambda l: l.name, ["Project management"])
self.issue.add_to_labels(bug, question)
self.assertListKeyEqual(self.issue.get_labels(), lambda l: l.name, ["Bug", "Project management", "Question"])
def testDeleteAndSetLabels(self):
bug = self.repo.get_label("Bug")
question = self.repo.get_label("Question")
self.assertListKeyEqual(self.issue.get_labels(), lambda l: l.name, ["Bug", "Project management", "Question"])
self.issue.delete_labels()
self.assertListKeyEqual(self.issue.get_labels(), None, [])
self.issue.set_labels(bug, question)
self.assertListKeyEqual(self.issue.get_labels(), lambda l: l.name, ["Bug", "Question"])
def testDeleteAndSetLabelsWithStringArguments(self):
bug = "Bug"
question = "Question"
self.assertListKeyEqual(self.issue.get_labels(), lambda l: l.name, ["Bug", "Project management", "Question"])
self.issue.delete_labels()
self.assertListKeyEqual(self.issue.get_labels(), None, [])
self.issue.set_labels(bug, question)
self.assertListKeyEqual(self.issue.get_labels(), lambda l: l.name, ["Bug", "Question"])
def testGetReactions(self):
reactions = self.issue.get_reactions()
self.assertEqual(reactions[0].content, "+1")
def testCreateReaction(self):
reaction = self.issue.create_reaction("hooray")
self.assertEqual(reaction.id, 16917472)
self.assertEqual(reaction.content, "hooray")
| gpl-3.0 |
tmpgit/intellij-community | python/lib/Lib/encodings/mbcs.py | 860 | 1211 | """ Python 'mbcs' Codec for Windows
Cloned by Mark Hammond ([email protected]) from ascii.py,
which was written by Marc-Andre Lemburg ([email protected]).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
# Import them explicitly to cause an ImportError
# on non-Windows systems
from codecs import mbcs_encode, mbcs_decode
# for IncrementalDecoder, IncrementalEncoder, ...
import codecs
### Codec APIs
encode = mbcs_encode
def decode(input, errors='strict'):
return mbcs_decode(input, errors, True)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return mbcs_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
_buffer_decode = mbcs_decode
class StreamWriter(codecs.StreamWriter):
encode = mbcs_encode
class StreamReader(codecs.StreamReader):
decode = mbcs_decode
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mbcs',
encode=encode,
decode=decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| apache-2.0 |
xen0l/ansible | lib/ansible/modules/windows/win_shell.py | 28 | 4846 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Ansible, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: win_shell
short_description: Execute shell commands on target hosts
version_added: 2.2
description:
- The C(win_shell) module takes the command name followed by a list of space-delimited arguments.
It is similar to the M(win_command) module, but runs
the command via a shell (defaults to PowerShell) on the target host.
- For non-Windows targets, use the M(shell) module instead.
options:
free_form:
description:
- The C(win_shell) module takes a free form command to run.
- There is no parameter actually named 'free form'. See the examples!
required: yes
creates:
description:
- A path or path filter pattern; when the referenced path exists on the target host, the task will be skipped.
type: path
removes:
description:
- A path or path filter pattern; when the referenced path B(does not) exist on the target host, the task will be skipped.
type: path
chdir:
description:
- Set the specified path as the current working directory before executing a command
type: path
executable:
description:
- Change the shell used to execute the command (eg, C(cmd)).
- The target shell must accept a C(/c) parameter followed by the raw command line to be executed.
type: path
stdin:
description:
- Set the stdin of the command directly to the specified value.
version_added: '2.5'
notes:
- If you want to run an executable securely and predictably, it may be
better to use the M(win_command) module instead. Best practices when writing
playbooks will follow the trend of using M(win_command) unless C(win_shell) is
explicitly required. When running ad-hoc commands, use your best judgement.
- WinRM will not return from a command execution until all child processes created have exited.
Thus, it is not possible to use C(win_shell) to spawn long-running child or background processes.
Consider creating a Windows service for managing background processes.
- For non-Windows targets, use the M(shell) module instead.
- See also M(win_command), M(raw)
author:
- Matt Davis (@nitzmahone)
'''
EXAMPLES = r'''
# Execute a command in the remote shell; stdout goes to the specified
# file on the remote.
- win_shell: C:\somescript.ps1 >> C:\somelog.txt
# Change the working directory to somedir/ before executing the command.
- win_shell: C:\somescript.ps1 >> C:\somelog.txt chdir=C:\somedir
# You can also use the 'args' form to provide the options. This command
# will change the working directory to somedir/ and will only run when
# somedir/somelog.txt doesn't exist.
- win_shell: C:\somescript.ps1 >> C:\somelog.txt
args:
chdir: C:\somedir
creates: C:\somelog.txt
# Run a command under a non-Powershell interpreter (cmd in this case)
- win_shell: echo %HOMEDIR%
args:
executable: cmd
register: homedir_out
- name: run multi-lined shell commands
win_shell: |
$value = Test-Path -Path C:\temp
if ($value) {
Remove-Item -Path C:\temp -Force
}
New-Item -Path C:\temp -ItemType Directory
- name: retrieve the input based on stdin
win_shell: '$string = [Console]::In.ReadToEnd(); Write-Output $string.Trim()'
args:
stdin: Input message
'''
RETURN = r'''
msg:
description: changed
returned: always
type: boolean
sample: True
start:
description: The command execution start time
returned: always
type: string
sample: '2016-02-25 09:18:26.429568'
end:
description: The command execution end time
returned: always
type: string
sample: '2016-02-25 09:18:26.755339'
delta:
description: The command execution delta time
returned: always
type: string
sample: '0:00:00.325771'
stdout:
description: The command standard output
returned: always
type: string
sample: 'Clustering node rabbit@slave1 with rabbit@master ...'
stderr:
description: The command standard error
returned: always
type: string
sample: 'ls: cannot access foo: No such file or directory'
cmd:
description: The command executed by the task
returned: always
type: string
sample: 'rabbitmqctl join_cluster rabbit@master'
rc:
description: The command return code (0 means success)
returned: always
type: int
sample: 0
stdout_lines:
description: The command standard output split in lines
returned: always
type: list
sample: [u'Clustering node rabbit@slave1 with rabbit@master ...']
'''
| gpl-3.0 |
draenog/gitolite-scripts | hooks/post-receive.d/gnome/git.py | 1 | 6496 | # Utility functions for git
#
# Copyright (C) 2008 Owen Taylor
# Copyright (C) 2009 Red Hat, Inc
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, If not, see
# http://www.gnu.org/licenses/.
#
# (These are adapted from git-bz)
import os
import re
from subprocess import Popen, PIPE
import sys
from util import die
# Clone of subprocess.CalledProcessError (not in Python 2.4)
class CalledProcessError(Exception):
def __init__(self, returncode, cmd):
self.returncode = returncode
self.cmd = cmd
def __str__(self):
return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode)
NULL_REVISION = "0000000000000000000000000000000000000000"
# Run a git command
# Non-keyword arguments are passed verbatim as command line arguments
# Keyword arguments are turned into command line options
# <name>=True => --<name>
# <name>='<str>' => --<name>=<str>
# Special keyword arguments:
# _quiet: Discard all output even if an error occurs
# _interactive: Don't capture stdout and stderr
# _input=<str>: Feed <str> to stdinin of the command
# _outfile=<file): Use <file> as the output file descriptor
# _split_lines: Return an array with one string per returned line
#
def git_run(command, *args, **kwargs):
to_run = ['git', command.replace("_", "-")]
interactive = False
quiet = False
input = None
interactive = False
outfile = None
do_split_lines = False
for (k,v) in kwargs.items():
if k == '_quiet':
quiet = True
elif k == '_interactive':
interactive = True
elif k == '_input':
input = v
elif k == '_outfile':
outfile = v
elif k == '_split_lines':
do_split_lines = True
elif v is True:
if len(k) == 1:
to_run.append("-" + k)
else:
to_run.append("--" + k.replace("_", "-"))
else:
if len(k) == 1:
to_run.append("-" + k + v)
else:
to_run.append("--" + k.replace("_", "-") + "=" + v)
to_run.extend(args)
if outfile:
stdout = outfile
else:
if interactive:
stdout = None
else:
stdout = PIPE
if interactive:
stderr = None
else:
stderr = PIPE
if input != None:
stdin = PIPE
else:
stdin = None
process = Popen(to_run,
stdout=stdout, stderr=stderr, stdin=stdin)
output, error = process.communicate(input)
if process.returncode != 0:
if not quiet and not interactive:
print(error, end=' ', file=sys.stderr)
print(output, end=' ')
raise CalledProcessError(process.returncode, " ".join(to_run))
if interactive or outfile:
return None
else:
output = output.decode('utf8')
if do_split_lines:
return output.strip().splitlines()
else:
return output.strip()
# Wrapper to allow us to do git.<command>(...) instead of git_run()
class Git:
def __getattr__(self, command):
def f(*args, **kwargs):
return git_run(command, *args, **kwargs)
return f
git = Git()
class GitCommit:
def __init__(self, id, subject):
self.id = id
self.subject = subject
# Takes argument like 'git.rev_list()' and returns a list of commit objects
def rev_list_commits(*args, **kwargs):
kwargs_copy = dict(kwargs)
kwargs_copy['pretty'] = 'format:%s'
kwargs_copy['_split_lines'] = True
lines = git.rev_list(*args, **kwargs_copy)
if (len(lines) % 2 != 0):
raise RuntimeException("git rev-list didn't return an even number of lines")
result = []
for i in range(0, len(lines), 2):
m = re.match("commit\s+([A-Fa-f0-9]+)", lines[i])
if not m:
raise RuntimeException("Can't parse commit it '%s'", lines[i])
commit_id = m.group(1)
subject = lines[i + 1]
result.append(GitCommit(commit_id, subject))
return result
# Loads a single commit object by ID
def load_commit(commit_id):
return rev_list_commits(commit_id + "^!")[0]
# Return True if the commit has multiple parents
def commit_is_merge(commit):
if isinstance(commit, str):
commit = load_commit(commit)
parent_count = 0
for line in git.cat_file("commit", commit.id, _split_lines=True):
if line == "":
break
if line.startswith("parent "):
parent_count += 1
return parent_count > 1
# Return a short one-line summary of the commit
def commit_oneline(commit):
if isinstance(commit, str):
commit = load_commit(commit)
return commit.id[0:7]+"... " + commit.subject[0:59]
# Return the directory name with .git stripped as a short identifier
# for the module
def get_module_name():
try:
git_dir = git.rev_parse(git_dir=True, _quiet=True)
except CalledProcessError:
die("GIT_DIR not set")
# Use the directory name with .git stripped as a short identifier
absdir = os.path.abspath(git_dir)
if absdir.endswith(os.sep + '.git'):
absdir = os.path.dirname(absdir)
projectshort = os.path.basename(absdir)
if projectshort.endswith(".git"):
projectshort = projectshort[:-4]
return projectshort
# Return the project description or '' if it is 'Unnamed repository;'
def get_project_description():
try:
git_dir = git.rev_parse(git_dir=True, _quiet=True)
except CalledProcessError:
die("GIT_DIR not set")
projectdesc = ''
description = os.path.join(git_dir, 'description')
if os.path.exists(description):
try:
projectdesc = open(description).read().strip()
except:
pass
if projectdesc.startswith('Unnamed repository;'):
projectdesc = ''
return projectdesc
| gpl-2.0 |
shibaniahegde/OpenStak_swift | swift/common/middleware/domain_remap.py | 20 | 6262 | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Domain Remap Middleware
Middleware that translates container and account parts of a domain to
path parameters that the proxy server understands.
container.account.storageurl/object gets translated to
container.account.storageurl/path_root/account/container/object
account.storageurl/path_root/container/object gets translated to
account.storageurl/path_root/account/container/object
Browsers can convert a host header to lowercase, so check that reseller
prefix on the account is the correct case. This is done by comparing the
items in the reseller_prefixes config option to the found prefix. If they
match except for case, the item from reseller_prefixes will be used
instead of the found reseller prefix. When none match, the default reseller
prefix is used. When no default reseller prefix is configured, any request with
an account prefix not in that list will be ignored by this middleware.
reseller_prefixes defaults to 'AUTH'.
Note that this middleware requires that container names and account names
(except as described above) must be DNS-compatible. This means that the
account name created in the system and the containers created by users
cannot exceed 63 characters or have UTF-8 characters. These are
restrictions over and above what swift requires and are not explicitly
checked. Simply put, the this middleware will do a best-effort attempt to
derive account and container names from elements in the domain name and
put those derived values into the URL path (leaving the Host header
unchanged).
Also note that using container sync with remapped domain names is not
advised. With container sync, you should use the true storage end points as
sync destinations.
"""
from swift.common.swob import Request, HTTPBadRequest
from swift.common.utils import list_from_csv, register_swift_info
class DomainRemapMiddleware(object):
"""
Domain Remap Middleware
See above for a full description.
:param app: The next WSGI filter or app in the paste.deploy
chain.
:param conf: The configuration dict for the middleware.
"""
def __init__(self, app, conf):
self.app = app
self.storage_domain = conf.get('storage_domain', 'example.com')
if self.storage_domain and self.storage_domain[0] != '.':
self.storage_domain = '.' + self.storage_domain
self.path_root = conf.get('path_root', 'v1').strip('/')
prefixes = conf.get('reseller_prefixes', 'AUTH')
self.reseller_prefixes = list_from_csv(prefixes)
self.reseller_prefixes_lower = [x.lower()
for x in self.reseller_prefixes]
self.default_reseller_prefix = conf.get('default_reseller_prefix')
def __call__(self, env, start_response):
if not self.storage_domain:
return self.app(env, start_response)
if 'HTTP_HOST' in env:
given_domain = env['HTTP_HOST']
else:
given_domain = env['SERVER_NAME']
port = ''
if ':' in given_domain:
given_domain, port = given_domain.rsplit(':', 1)
if given_domain.endswith(self.storage_domain):
parts_to_parse = given_domain[:-len(self.storage_domain)]
parts_to_parse = parts_to_parse.strip('.').split('.')
len_parts_to_parse = len(parts_to_parse)
if len_parts_to_parse == 2:
container, account = parts_to_parse
elif len_parts_to_parse == 1:
container, account = None, parts_to_parse[0]
else:
resp = HTTPBadRequest(request=Request(env),
body='Bad domain in host header',
content_type='text/plain')
return resp(env, start_response)
if len(self.reseller_prefixes) > 0:
if '_' not in account and '-' in account:
account = account.replace('-', '_', 1)
account_reseller_prefix = account.split('_', 1)[0].lower()
if account_reseller_prefix in self.reseller_prefixes_lower:
prefix_index = self.reseller_prefixes_lower.index(
account_reseller_prefix)
real_prefix = self.reseller_prefixes[prefix_index]
if not account.startswith(real_prefix):
account_suffix = account[len(real_prefix):]
account = real_prefix + account_suffix
elif self.default_reseller_prefix:
# account prefix is not in config list. Add default one.
account = "%s_%s" % (self.default_reseller_prefix, account)
else:
# account prefix is not in config list. bail.
return self.app(env, start_response)
path = env['PATH_INFO'].strip('/')
new_path_parts = ['', self.path_root, account]
if container:
new_path_parts.append(container)
if path.startswith(self.path_root):
path = path[len(self.path_root):].lstrip('/')
if path:
new_path_parts.append(path)
new_path = '/'.join(new_path_parts)
env['PATH_INFO'] = new_path
return self.app(env, start_response)
def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
register_swift_info(
'domain_remap',
default_reseller_prefix=conf.get('default_reseller_prefix'))
def domain_filter(app):
return DomainRemapMiddleware(app, conf)
return domain_filter
| apache-2.0 |
chafique-delli/OpenUpgrade | addons/l10n_ro/__openerp__.py | 40 | 1804 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012 (<http://www.erpsystems.ro>). All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "Romania - Accounting",
"version" : "1.0",
"author" : "TOTAL PC SYSTEMS",
"website": "http://www.erpsystems.ro",
"category" : "Localization/Account Charts",
"depends" : ['account','account_chart','base_vat'],
"description": """
This is the module to manage the accounting chart, VAT structure and Registration Number for Romania in OpenERP.
================================================================================================================
Romanian accounting chart and localization.
""",
"demo_xml" : [],
"data" : ['partner_view.xml','account_tax_code_template.xml','account_chart.xml','account_tax_template.xml','l10n_chart_ro_wizard.xml'],
"auto_install": False,
"installable": True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
git-keeper/git-keeper | git-keeper-server/gkeepserver/log_polling.py | 1 | 9468 | # Copyright 2016 Nathan Sommer and Ben Coleman
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Provides a class and a global access point for a log polling object.
The module stores a LogPollingThread instance in the module-level variable
named log_poller. Call initialize() on this object as early as possible to set
it up, and then call start() to start the thread.
Files to be watched can be added to the polling object. New events from the
log are passed on to a parser and then an appropriate handler.
It is possible to add files to be watched before calling initialize(), but no
actions can be taken until the thread is initialized and started.
The sizes of the log files are stored in the database after every log
modification. This allows the poller to start where it left off if the process
is restarted.
Example usage::
from gkeepcore.log_polling import log_poller
# import whatever byte_count_function and read_bytes_function you need
def main():
# set up other stuff
log_poller.initialize(new_log_event_queue, byte_count_function,
read_bytes_function, gkeepd_logger)
log_poller.start()
log_poller.watch_log_file('/path/to/log')
while keep_going:
log_file_path, log_event = new_log_event_queue.get()
# do something with the event
log_poller.shutdown()
"""
import json
import os
from queue import Queue, Empty
from threading import Thread
from time import time, sleep
from gkeepcore.gkeep_exception import GkeepException
from gkeepcore.log_file import LogFileReader, LogFileException
from gkeepcore.system_commands import file_is_readable
from gkeepserver.database import db
from gkeepserver.gkeepd_logger import GkeepdLoggerThread
class LogPollingThreadError(GkeepException):
"""Raised if there is an error polling log files."""
pass
class LogPollingThread(Thread):
"""
Watches log files for modifications.
New events from log files are put in a queue to be processed by another
thread.
See the module-level documentation for usage.
"""
def __init__(self):
"""
Constructor.
Create the _add_log_queue so that files to be watched can be added
before the thread is started. Initialize all other attributes to None.
Poller will be fully set up and ready to start after initialize() is
called.
"""
Thread.__init__(self)
# initialize this so we can add files to watch before the thread starts
self._add_log_queue = Queue()
self._new_log_event_queue = None
self._reader_class = None
self._polling_interval = None
self._last_poll_time = None
self._logger = None
self._log_file_readers = None
self._shutdown_flag = None
def initialize(self, new_log_event_queue: Queue, reader_class,
logger: GkeepdLoggerThread, polling_interval=0.5):
"""
Initialize the attributes.
:param new_log_event_queue: the poller places (file_path, event) pairs
into this queue
:param reader_class: LogFileReader class to use for creating readers
:param logger: the system logger, used to log runtime information
:param polling_interval: number of seconds between polling files
"""
self._new_log_event_queue = new_log_event_queue
self._reader_class = reader_class
self._polling_interval = polling_interval
self._last_poll_time = 0
self._logger = logger
# maps log file paths to log readers
self._log_file_readers = {}
self._load_paths_from_db()
self._shutdown_flag = False
def watch_log_file(self, file_path: str):
"""
Add a log file to be watched.
This method can be called from any other thread.
:param file_path: path to the log file
"""
if not file_is_readable(file_path):
error = '{} is not a readable file'.format(file_path)
raise GkeepException(error)
self._add_log_queue.put(file_path)
def shutdown(self):
"""
Shut down the poller.
The run loop will not shut down until the current polling cycle
is complete.
This method will block until the thread dies.
"""
self._shutdown_flag = True
self.join()
def run(self):
# Poll until _shutdown_flag is True.
#
# This should not be called directly, the thread should be started by
# calling start()
while not self._shutdown_flag:
try:
self._poll()
except Exception as e:
self._logger.log_error('Error polling logs: {0}'
.format(e))
def _load_paths_from_db(self):
for log_file_path, byte_count in db.get_byte_counts():
self._logger.log_debug('Watching {} from byte {}'
.format(log_file_path, byte_count))
self._create_and_add_reader(log_file_path,
seek_position=byte_count)
def _write_byte_counts_to_db(self):
# Writes all of the current file byte counts to the database.
# Called after updates.
byte_counts_by_file_path = {}
for file_path, reader in self._log_file_readers.items():
byte_counts_by_file_path[file_path] = reader.get_seek_position()
try:
db.update_byte_counts(byte_counts_by_file_path)
except GkeepException as e:
raise LogPollingThreadError('Error updating byte counts: {}'
.format(e))
def _write_byte_count_to_db(self, file_path):
# Writes a single file's byte count to the database.
update = {
file_path: self._log_file_readers[file_path].get_seek_position()
}
db.update_byte_counts(update)
def _start_watching_log_file(self, file_path: str):
# Start watching the file at file_path. This should only be called
# internally. Other threads should call watch_log_file()
try:
self._create_and_add_reader(file_path)
self._write_byte_count_to_db(file_path)
except GkeepException as e:
self._logger.log_warning(str(e))
def _create_and_add_reader(self, file_path: str, seek_position=None):
# Create a LogFileReader object for reading new data from the file
# and add it to the dictionary of readers.
# bail if the log does not exist
if not os.path.isfile(file_path):
warning = ('{0} does not exist and will not be watched'
.format(file_path))
self._logger.log_warning(warning)
return
reader = self._reader_class(file_path, seek_position=seek_position)
self._log_file_readers[file_path] = reader
def _stop_watching_log_file(self, log_file: LogFileReader):
# Simply remove the file reader from the dictionary
file_path = log_file.get_file_path()
del self._log_file_readers[file_path]
self._write_byte_count_to_db(file_path)
def _poll(self):
# Poll once for changes in files, and check the queue for new files
# to watch.
self._last_poll_time = time()
readers = list(self._log_file_readers.values())
# for each file reader, add any new events to the queue
for reader in readers:
try:
for event in reader.get_new_events():
file_path = reader.get_file_path()
self._new_log_event_queue.put((file_path, event))
self._write_byte_count_to_db(file_path)
except LogFileException as e:
self._logger.log_warning(str(e))
# if something goes wrong we should not keep watching this file
self._stop_watching_log_file(reader)
# consume all new log files until the queue is empty
try:
while True:
new_file_path = self._add_log_queue.get(block=False)
if isinstance(new_file_path, str):
self._start_watching_log_file(new_file_path)
else:
self._logger.log_warning('Log poller: {0} is not a string'
.format(new_file_path))
except Empty:
pass
# each file should be polled on average once per polling_interval
next_poll_time = self._last_poll_time + self._polling_interval
sleep_time = next_poll_time - time()
if sleep_time > 0:
sleep(sleep_time)
# module-level instance for global access
log_poller = LogPollingThread()
| agpl-3.0 |
ammaradil/fibonacci | Lib/site-packages/django/utils/module_loading.py | 145 | 6290 | import copy
import os
import sys
from importlib import import_module
from django.utils import six
def import_string(dotted_path):
"""
Import a dotted module path and return the attribute/class designated by the
last name in the path. Raise ImportError if the import failed.
"""
try:
module_path, class_name = dotted_path.rsplit('.', 1)
except ValueError:
msg = "%s doesn't look like a module path" % dotted_path
six.reraise(ImportError, ImportError(msg), sys.exc_info()[2])
module = import_module(module_path)
try:
return getattr(module, class_name)
except AttributeError:
msg = 'Module "%s" does not define a "%s" attribute/class' % (
module_path, class_name)
six.reraise(ImportError, ImportError(msg), sys.exc_info()[2])
def autodiscover_modules(*args, **kwargs):
"""
Auto-discover INSTALLED_APPS modules and fail silently when
not present. This forces an import on them to register any admin bits they
may want.
You may provide a register_to keyword parameter as a way to access a
registry. This register_to object must have a _registry instance variable
to access it.
"""
from django.apps import apps
register_to = kwargs.get('register_to')
for app_config in apps.get_app_configs():
for module_to_search in args:
# Attempt to import the app's module.
try:
if register_to:
before_import_registry = copy.copy(register_to._registry)
import_module('%s.%s' % (app_config.name, module_to_search))
except Exception:
# Reset the registry to the state before the last import
# as this import will have to reoccur on the next request and
# this could raise NotRegistered and AlreadyRegistered
# exceptions (see #8245).
if register_to:
register_to._registry = before_import_registry
# Decide whether to bubble up this error. If the app just
# doesn't have the module in question, we can ignore the error
# attempting to import it, otherwise we want it to bubble up.
if module_has_submodule(app_config.module, module_to_search):
raise
if six.PY3:
from importlib.util import find_spec as importlib_find
def module_has_submodule(package, module_name):
"""See if 'module' is in 'package'."""
try:
package_name = package.__name__
package_path = package.__path__
except AttributeError:
# package isn't a package.
return False
full_module_name = package_name + '.' + module_name
return importlib_find(full_module_name, package_path) is not None
else:
import imp
def module_has_submodule(package, module_name):
"""See if 'module' is in 'package'."""
name = ".".join([package.__name__, module_name])
try:
# None indicates a cached miss; see mark_miss() in Python/import.c.
return sys.modules[name] is not None
except KeyError:
pass
try:
package_path = package.__path__ # No __path__, then not a package.
except AttributeError:
# Since the remainder of this function assumes that we're dealing with
# a package (module with a __path__), so if it's not, then bail here.
return False
for finder in sys.meta_path:
if finder.find_module(name, package_path):
return True
for entry in package_path:
try:
# Try the cached finder.
finder = sys.path_importer_cache[entry]
if finder is None:
# Implicit import machinery should be used.
try:
file_, _, _ = imp.find_module(module_name, [entry])
if file_:
file_.close()
return True
except ImportError:
continue
# Else see if the finder knows of a loader.
elif finder.find_module(name):
return True
else:
continue
except KeyError:
# No cached finder, so try and make one.
for hook in sys.path_hooks:
try:
finder = hook(entry)
# XXX Could cache in sys.path_importer_cache
if finder.find_module(name):
return True
else:
# Once a finder is found, stop the search.
break
except ImportError:
# Continue the search for a finder.
continue
else:
# No finder found.
# Try the implicit import machinery if searching a directory.
if os.path.isdir(entry):
try:
file_, _, _ = imp.find_module(module_name, [entry])
if file_:
file_.close()
return True
except ImportError:
pass
# XXX Could insert None or NullImporter
else:
# Exhausted the search, so the module cannot be found.
return False
def module_dir(module):
"""
Find the name of the directory that contains a module, if possible.
Raise ValueError otherwise, e.g. for namespace packages that are split
over several directories.
"""
# Convert to list because _NamespacePath does not support indexing on 3.3.
paths = list(getattr(module, '__path__', []))
if len(paths) == 1:
return paths[0]
else:
filename = getattr(module, '__file__', None)
if filename is not None:
return os.path.dirname(filename)
raise ValueError("Cannot determine directory containing %s" % module)
| mit |
Weihonghao/ECM | Vpy34/lib/python3.5/site-packages/pip/_vendor/requests/utils.py | 319 | 24163 | # -*- coding: utf-8 -*-
"""
requests.utils
~~~~~~~~~~~~~~
This module provides utility functions that are used within Requests
that are also useful for external consumption.
"""
import cgi
import codecs
import collections
import io
import os
import re
import socket
import struct
import warnings
from . import __version__
from . import certs
from .compat import parse_http_list as _parse_list_header
from .compat import (quote, urlparse, bytes, str, OrderedDict, unquote, is_py2,
builtin_str, getproxies, proxy_bypass, urlunparse,
basestring)
from .cookies import RequestsCookieJar, cookiejar_from_dict
from .structures import CaseInsensitiveDict
from .exceptions import InvalidURL, InvalidHeader, FileModeWarning
_hush_pyflakes = (RequestsCookieJar,)
NETRC_FILES = ('.netrc', '_netrc')
DEFAULT_CA_BUNDLE_PATH = certs.where()
def dict_to_sequence(d):
"""Returns an internal sequence dictionary update."""
if hasattr(d, 'items'):
d = d.items()
return d
def super_len(o):
total_length = 0
current_position = 0
if hasattr(o, '__len__'):
total_length = len(o)
elif hasattr(o, 'len'):
total_length = o.len
elif hasattr(o, 'getvalue'):
# e.g. BytesIO, cStringIO.StringIO
total_length = len(o.getvalue())
elif hasattr(o, 'fileno'):
try:
fileno = o.fileno()
except io.UnsupportedOperation:
pass
else:
total_length = os.fstat(fileno).st_size
# Having used fstat to determine the file length, we need to
# confirm that this file was opened up in binary mode.
if 'b' not in o.mode:
warnings.warn((
"Requests has determined the content-length for this "
"request using the binary size of the file: however, the "
"file has been opened in text mode (i.e. without the 'b' "
"flag in the mode). This may lead to an incorrect "
"content-length. In Requests 3.0, support will be removed "
"for files in text mode."),
FileModeWarning
)
if hasattr(o, 'tell'):
try:
current_position = o.tell()
except (OSError, IOError):
# This can happen in some weird situations, such as when the file
# is actually a special file descriptor like stdin. In this
# instance, we don't know what the length is, so set it to zero and
# let requests chunk it instead.
current_position = total_length
return max(0, total_length - current_position)
def get_netrc_auth(url, raise_errors=False):
"""Returns the Requests tuple auth for a given url from netrc."""
try:
from netrc import netrc, NetrcParseError
netrc_path = None
for f in NETRC_FILES:
try:
loc = os.path.expanduser('~/{0}'.format(f))
except KeyError:
# os.path.expanduser can fail when $HOME is undefined and
# getpwuid fails. See http://bugs.python.org/issue20164 &
# https://github.com/kennethreitz/requests/issues/1846
return
if os.path.exists(loc):
netrc_path = loc
break
# Abort early if there isn't one.
if netrc_path is None:
return
ri = urlparse(url)
# Strip port numbers from netloc. This weird `if...encode`` dance is
# used for Python 3.2, which doesn't support unicode literals.
splitstr = b':'
if isinstance(url, str):
splitstr = splitstr.decode('ascii')
host = ri.netloc.split(splitstr)[0]
try:
_netrc = netrc(netrc_path).authenticators(host)
if _netrc:
# Return with login / password
login_i = (0 if _netrc[0] else 1)
return (_netrc[login_i], _netrc[2])
except (NetrcParseError, IOError):
# If there was a parsing error or a permissions issue reading the file,
# we'll just skip netrc auth unless explicitly asked to raise errors.
if raise_errors:
raise
# AppEngine hackiness.
except (ImportError, AttributeError):
pass
def guess_filename(obj):
"""Tries to guess the filename of the given object."""
name = getattr(obj, 'name', None)
if (name and isinstance(name, basestring) and name[0] != '<' and
name[-1] != '>'):
return os.path.basename(name)
def from_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. Unless it can not be represented as such, return an
OrderedDict, e.g.,
::
>>> from_key_val_list([('key', 'val')])
OrderedDict([('key', 'val')])
>>> from_key_val_list('string')
ValueError: need more than 1 value to unpack
>>> from_key_val_list({'key': 'val'})
OrderedDict([('key', 'val')])
:rtype: OrderedDict
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
return OrderedDict(value)
def to_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. If it can be, return a list of tuples, e.g.,
::
>>> to_key_val_list([('key', 'val')])
[('key', 'val')]
>>> to_key_val_list({'key': 'val'})
[('key', 'val')]
>>> to_key_val_list('string')
ValueError: cannot encode objects that are not 2-tuples.
:rtype: list
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
if isinstance(value, collections.Mapping):
value = value.items()
return list(value)
# From mitsuhiko/werkzeug (used with permission).
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
It basically works like :func:`parse_set_header` just that items
may appear multiple times and case sensitivity is preserved.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
To create a header from the :class:`list` again, use the
:func:`dump_header` function.
:param value: a string with a list header.
:return: :class:`list`
:rtype: list
"""
result = []
for item in _parse_list_header(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
# From mitsuhiko/werkzeug (used with permission).
def parse_dict_header(value):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict:
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
:param value: a string with a dict header.
:return: :class:`dict`
:rtype: dict
"""
result = {}
for item in _parse_list_header(value):
if '=' not in item:
result[item] = None
continue
name, value = item.split('=', 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result
# From mitsuhiko/werkzeug (used with permission).
def unquote_header_value(value, is_filename=False):
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
This does not use the real unquoting but what browsers are actually
using for quoting.
:param value: the header value to unquote.
:rtype: str
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
# if this is a filename and the starting characters look like
# a UNC path, then just return the value without quotes. Using the
# replace sequence below on a UNC path has the effect of turning
# the leading double slash into a single slash and then
# _fix_ie_filename() doesn't work correctly. See #458.
if not is_filename or value[:2] != '\\\\':
return value.replace('\\\\', '\\').replace('\\"', '"')
return value
def dict_from_cookiejar(cj):
"""Returns a key/value dictionary from a CookieJar.
:param cj: CookieJar object to extract cookies from.
:rtype: dict
"""
cookie_dict = {}
for cookie in cj:
cookie_dict[cookie.name] = cookie.value
return cookie_dict
def add_dict_to_cookiejar(cj, cookie_dict):
"""Returns a CookieJar from a key/value dictionary.
:param cj: CookieJar to insert cookies into.
:param cookie_dict: Dict of key/values to insert into CookieJar.
:rtype: CookieJar
"""
cj2 = cookiejar_from_dict(cookie_dict)
cj.update(cj2)
return cj
def get_encodings_from_content(content):
"""Returns encodings from given content string.
:param content: bytestring to extract encodings from.
"""
warnings.warn((
'In requests 3.0, get_encodings_from_content will be removed. For '
'more information, please see the discussion on issue #2266. (This'
' warning should only appear once.)'),
DeprecationWarning)
charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I)
pragma_re = re.compile(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I)
xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]')
return (charset_re.findall(content) +
pragma_re.findall(content) +
xml_re.findall(content))
def get_encoding_from_headers(headers):
"""Returns encodings from given HTTP Header Dict.
:param headers: dictionary to extract encoding from.
:rtype: str
"""
content_type = headers.get('content-type')
if not content_type:
return None
content_type, params = cgi.parse_header(content_type)
if 'charset' in params:
return params['charset'].strip("'\"")
if 'text' in content_type:
return 'ISO-8859-1'
def stream_decode_response_unicode(iterator, r):
"""Stream decodes a iterator."""
if r.encoding is None:
for item in iterator:
yield item
return
decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace')
for chunk in iterator:
rv = decoder.decode(chunk)
if rv:
yield rv
rv = decoder.decode(b'', final=True)
if rv:
yield rv
def iter_slices(string, slice_length):
"""Iterate over slices of a string."""
pos = 0
if slice_length is None or slice_length <= 0:
slice_length = len(string)
while pos < len(string):
yield string[pos:pos + slice_length]
pos += slice_length
def get_unicode_from_response(r):
"""Returns the requested content back in unicode.
:param r: Response object to get unicode content from.
Tried:
1. charset from content-type
2. fall back and replace all unicode characters
:rtype: str
"""
warnings.warn((
'In requests 3.0, get_unicode_from_response will be removed. For '
'more information, please see the discussion on issue #2266. (This'
' warning should only appear once.)'),
DeprecationWarning)
tried_encodings = []
# Try charset from content-type
encoding = get_encoding_from_headers(r.headers)
if encoding:
try:
return str(r.content, encoding)
except UnicodeError:
tried_encodings.append(encoding)
# Fall back:
try:
return str(r.content, encoding, errors='replace')
except TypeError:
return r.content
# The unreserved URI characters (RFC 3986)
UNRESERVED_SET = frozenset(
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
+ "0123456789-._~")
def unquote_unreserved(uri):
"""Un-escape any percent-escape sequences in a URI that are unreserved
characters. This leaves all reserved, illegal and non-ASCII bytes encoded.
:rtype: str
"""
parts = uri.split('%')
for i in range(1, len(parts)):
h = parts[i][0:2]
if len(h) == 2 and h.isalnum():
try:
c = chr(int(h, 16))
except ValueError:
raise InvalidURL("Invalid percent-escape sequence: '%s'" % h)
if c in UNRESERVED_SET:
parts[i] = c + parts[i][2:]
else:
parts[i] = '%' + parts[i]
else:
parts[i] = '%' + parts[i]
return ''.join(parts)
def requote_uri(uri):
"""Re-quote the given URI.
This function passes the given URI through an unquote/quote cycle to
ensure that it is fully and consistently quoted.
:rtype: str
"""
safe_with_percent = "!#$%&'()*+,/:;=?@[]~"
safe_without_percent = "!#$&'()*+,/:;=?@[]~"
try:
# Unquote only the unreserved characters
# Then quote only illegal characters (do not quote reserved,
# unreserved, or '%')
return quote(unquote_unreserved(uri), safe=safe_with_percent)
except InvalidURL:
# We couldn't unquote the given URI, so let's try quoting it, but
# there may be unquoted '%'s in the URI. We need to make sure they're
# properly quoted so they do not cause issues elsewhere.
return quote(uri, safe=safe_without_percent)
def address_in_network(ip, net):
"""This function allows you to check if on IP belongs to a network subnet
Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24
returns False if ip = 192.168.1.1 and net = 192.168.100.0/24
:rtype: bool
"""
ipaddr = struct.unpack('=L', socket.inet_aton(ip))[0]
netaddr, bits = net.split('/')
netmask = struct.unpack('=L', socket.inet_aton(dotted_netmask(int(bits))))[0]
network = struct.unpack('=L', socket.inet_aton(netaddr))[0] & netmask
return (ipaddr & netmask) == (network & netmask)
def dotted_netmask(mask):
"""Converts mask from /xx format to xxx.xxx.xxx.xxx
Example: if mask is 24 function returns 255.255.255.0
:rtype: str
"""
bits = 0xffffffff ^ (1 << 32 - mask) - 1
return socket.inet_ntoa(struct.pack('>I', bits))
def is_ipv4_address(string_ip):
"""
:rtype: bool
"""
try:
socket.inet_aton(string_ip)
except socket.error:
return False
return True
def is_valid_cidr(string_network):
"""
Very simple check of the cidr format in no_proxy variable.
:rtype: bool
"""
if string_network.count('/') == 1:
try:
mask = int(string_network.split('/')[1])
except ValueError:
return False
if mask < 1 or mask > 32:
return False
try:
socket.inet_aton(string_network.split('/')[0])
except socket.error:
return False
else:
return False
return True
def should_bypass_proxies(url):
"""
Returns whether we should bypass proxies or not.
:rtype: bool
"""
get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper())
# First check whether no_proxy is defined. If it is, check that the URL
# we're getting isn't in the no_proxy list.
no_proxy = get_proxy('no_proxy')
netloc = urlparse(url).netloc
if no_proxy:
# We need to check whether we match here. We need to see if we match
# the end of the netloc, both with and without the port.
no_proxy = (
host for host in no_proxy.replace(' ', '').split(',') if host
)
ip = netloc.split(':')[0]
if is_ipv4_address(ip):
for proxy_ip in no_proxy:
if is_valid_cidr(proxy_ip):
if address_in_network(ip, proxy_ip):
return True
elif ip == proxy_ip:
# If no_proxy ip was defined in plain IP notation instead of cidr notation &
# matches the IP of the index
return True
else:
for host in no_proxy:
if netloc.endswith(host) or netloc.split(':')[0].endswith(host):
# The URL does match something in no_proxy, so we don't want
# to apply the proxies on this URL.
return True
# If the system proxy settings indicate that this URL should be bypassed,
# don't proxy.
# The proxy_bypass function is incredibly buggy on macOS in early versions
# of Python 2.6, so allow this call to fail. Only catch the specific
# exceptions we've seen, though: this call failing in other ways can reveal
# legitimate problems.
try:
bypass = proxy_bypass(netloc)
except (TypeError, socket.gaierror):
bypass = False
if bypass:
return True
return False
def get_environ_proxies(url):
"""
Return a dict of environment proxies.
:rtype: dict
"""
if should_bypass_proxies(url):
return {}
else:
return getproxies()
def select_proxy(url, proxies):
"""Select a proxy for the url, if applicable.
:param url: The url being for the request
:param proxies: A dictionary of schemes or schemes and hosts to proxy URLs
"""
proxies = proxies or {}
urlparts = urlparse(url)
if urlparts.hostname is None:
return proxies.get('all', proxies.get(urlparts.scheme))
proxy_keys = [
'all://' + urlparts.hostname,
'all',
urlparts.scheme + '://' + urlparts.hostname,
urlparts.scheme,
]
proxy = None
for proxy_key in proxy_keys:
if proxy_key in proxies:
proxy = proxies[proxy_key]
break
return proxy
def default_user_agent(name="python-requests"):
"""
Return a string representing the default user agent.
:rtype: str
"""
return '%s/%s' % (name, __version__)
def default_headers():
"""
:rtype: requests.structures.CaseInsensitiveDict
"""
return CaseInsensitiveDict({
'User-Agent': default_user_agent(),
'Accept-Encoding': ', '.join(('gzip', 'deflate')),
'Accept': '*/*',
'Connection': 'keep-alive',
})
def parse_header_links(value):
"""Return a dict of parsed link headers proxies.
i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg"
:rtype: list
"""
links = []
replace_chars = ' \'"'
for val in re.split(', *<', value):
try:
url, params = val.split(';', 1)
except ValueError:
url, params = val, ''
link = {'url': url.strip('<> \'"')}
for param in params.split(';'):
try:
key, value = param.split('=')
except ValueError:
break
link[key.strip(replace_chars)] = value.strip(replace_chars)
links.append(link)
return links
# Null bytes; no need to recreate these on each call to guess_json_utf
_null = '\x00'.encode('ascii') # encoding to ASCII for Python 3
_null2 = _null * 2
_null3 = _null * 3
def guess_json_utf(data):
"""
:rtype: str
"""
# JSON always starts with two ASCII characters, so detection is as
# easy as counting the nulls and from their location and count
# determine the encoding. Also detect a BOM, if present.
sample = data[:4]
if sample in (codecs.BOM_UTF32_LE, codecs.BOM32_BE):
return 'utf-32' # BOM included
if sample[:3] == codecs.BOM_UTF8:
return 'utf-8-sig' # BOM included, MS style (discouraged)
if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE):
return 'utf-16' # BOM included
nullcount = sample.count(_null)
if nullcount == 0:
return 'utf-8'
if nullcount == 2:
if sample[::2] == _null2: # 1st and 3rd are null
return 'utf-16-be'
if sample[1::2] == _null2: # 2nd and 4th are null
return 'utf-16-le'
# Did not detect 2 valid UTF-16 ascii-range characters
if nullcount == 3:
if sample[:3] == _null3:
return 'utf-32-be'
if sample[1:] == _null3:
return 'utf-32-le'
# Did not detect a valid UTF-32 ascii-range character
return None
def prepend_scheme_if_needed(url, new_scheme):
"""Given a URL that may or may not have a scheme, prepend the given scheme.
Does not replace a present scheme with the one provided as an argument.
:rtype: str
"""
scheme, netloc, path, params, query, fragment = urlparse(url, new_scheme)
# urlparse is a finicky beast, and sometimes decides that there isn't a
# netloc present. Assume that it's being over-cautious, and switch netloc
# and path if urlparse decided there was no netloc.
if not netloc:
netloc, path = path, netloc
return urlunparse((scheme, netloc, path, params, query, fragment))
def get_auth_from_url(url):
"""Given a url with authentication components, extract them into a tuple of
username,password.
:rtype: (str,str)
"""
parsed = urlparse(url)
try:
auth = (unquote(parsed.username), unquote(parsed.password))
except (AttributeError, TypeError):
auth = ('', '')
return auth
def to_native_string(string, encoding='ascii'):
"""Given a string object, regardless of type, returns a representation of
that string in the native string type, encoding and decoding where
necessary. This assumes ASCII unless told otherwise.
"""
if isinstance(string, builtin_str):
out = string
else:
if is_py2:
out = string.encode(encoding)
else:
out = string.decode(encoding)
return out
# Moved outside of function to avoid recompile every call
_CLEAN_HEADER_REGEX_BYTE = re.compile(b'^\\S[^\\r\\n]*$|^$')
_CLEAN_HEADER_REGEX_STR = re.compile(r'^\S[^\r\n]*$|^$')
def check_header_validity(header):
"""Verifies that header value is a string which doesn't contain
leading whitespace or return characters. This prevents unintended
header injection.
:param header: tuple, in the format (name, value).
"""
name, value = header
if isinstance(value, bytes):
pat = _CLEAN_HEADER_REGEX_BYTE
else:
pat = _CLEAN_HEADER_REGEX_STR
try:
if not pat.match(value):
raise InvalidHeader("Invalid return character or leading space in header: %s" % name)
except TypeError:
raise InvalidHeader("Header value %s must be of type str or bytes, "
"not %s" % (value, type(value)))
def urldefragauth(url):
"""
Given a url remove the fragment and the authentication part.
:rtype: str
"""
scheme, netloc, path, params, query, fragment = urlparse(url)
# see func:`prepend_scheme_if_needed`
if not netloc:
netloc, path = path, netloc
netloc = netloc.rsplit('@', 1)[-1]
return urlunparse((scheme, netloc, path, params, query, ''))
| agpl-3.0 |
Jordy281/Tic_Tac_Toe_SuperComputer | game.py | 1 | 1938 | import numpy as np
import copy
from random import randrange
"""
We will check across the diagonal top left to bottom right,
This will allow us to check all possible solutions for a win
"""
def threecheck(board):
win=False
#Top Left
if board[0]!=0:
#Row T-L to T-R
if board[0]==board[1]:
#Top Right
if board[2]==board[1]:
win=True
#Column T-L to B-L
if board[0]==board[3]:
if board[3]==board[6]:
win=True
#Middle center
if board[4]!=0:
#Diagonal T-L to B-R
if board[4]==board[0]:
if board[4]==board[8]:
win=True
#Diagonal B-L to T-R
if board[4]==board[2]:
if board[4] ==board[6]:
win=True
#Column T-M to B-M
if board[4]==board[1]:
if board[4] == board[7]:
win=True
#Row C-L to C-R
if board[4]==board[3]:
if board[4]==board[5]:
win=True
#Bottom Right
if board[8]!=0:
#Column T-R to B-R
if board[8]==board[2]:
#Top Right
if board[8]==board[5]:
win = True
#Row B-L to B-R
if board[8]==board[7]:
if board[8]==board[6]:
win=True
return win
"""
This will add the Move to the board
"""
def addMove(board, turn, index):
if turn%2==1:
board[index]=1
else:
board[index]=2
def gameOver(board, turn):
return threecheck(board) is True or turn==10
| mit |
xme1226/sahara | sahara/tests/unit/db/migration/test_migrations.py | 2 | 13912 | # Copyright 2014 OpenStack Foundation
# Copyright 2014 Mirantis Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for database migrations. This test case reads the configuration
file test_migrations.conf for database connection settings
to use in the tests. For each connection found in the config file,
the test case runs a series of test cases to ensure that migrations work
properly.
There are also "opportunistic" tests for both mysql and postgresql in here,
which allows testing against mysql and pg) in a properly configured unit
test environment.
For the opportunistic testing you need to set up a db named 'openstack_citest'
with user 'openstack_citest' and password 'openstack_citest' on localhost.
The test will then use that db and u/p combo to run the tests.
For postgres on Ubuntu this can be done with the following commands:
sudo -u postgres psql
postgres=# create user openstack_citest with createdb login password
'openstack_citest';
postgres=# create database openstack_citest with owner openstack_citest;
"""
import os
from oslo.config import cfg
from oslo.db.sqlalchemy import utils as db_utils
from sahara.tests.unit.db.migration import test_migrations_base as base
CONF = cfg.CONF
class TestMigrations(base.BaseWalkMigrationTestCase, base.CommonTestsMixIn):
"""Test sqlalchemy-migrate migrations."""
USER = "openstack_citest"
PASSWD = "openstack_citest"
DATABASE = "openstack_citest"
def __init__(self, *args, **kwargs):
super(TestMigrations, self).__init__(*args, **kwargs)
def setUp(self):
super(TestMigrations, self).setUp()
def assertColumnExists(self, engine, table, column):
t = db_utils.get_table(engine, table)
self.assertIn(column, t.c)
def assertColumnsExists(self, engine, table, columns):
for column in columns:
self.assertColumnExists(engine, table, column)
def assertColumnCount(self, engine, table, columns):
t = db_utils.get_table(engine, table)
self.assertEqual(len(t.columns), len(columns))
def assertColumnNotExists(self, engine, table, column):
t = db_utils.get_table(engine, table)
self.assertNotIn(column, t.c)
def assertIndexExists(self, engine, table, index):
t = db_utils.get_table(engine, table)
index_names = [idx.name for idx in t.indexes]
self.assertIn(index, index_names)
def assertIndexMembers(self, engine, table, index, members):
self.assertIndexExists(engine, table, index)
t = db_utils.get_table(engine, table)
index_columns = None
for idx in t.indexes:
if idx.name == index:
index_columns = idx.columns.keys()
break
self.assertEqual(sorted(members), sorted(index_columns))
def _pre_upgrade_001(self, engine):
# Anything returned from this method will be
# passed to corresponding _check_xxx method as 'data'.
pass
def _check_001(self, engine, data):
job_binary_internal_columns = [
'created_at',
'updated_at',
'id',
'tenant_id',
'name',
'data',
'datasize'
]
self.assertColumnsExists(
engine, 'job_binary_internal', job_binary_internal_columns)
self.assertColumnCount(
engine, 'job_binary_internal', job_binary_internal_columns)
node_group_templates_columns = [
'created_at',
'updated_at',
'id',
'name',
'description',
'tenant_id',
'flavor_id',
'image_id',
'plugin_name',
'hadoop_version',
'node_processes',
'node_configs',
'volumes_per_node',
'volumes_size',
'volume_mount_prefix',
'floating_ip_pool'
]
self.assertColumnsExists(
engine, 'node_group_templates', node_group_templates_columns)
self.assertColumnCount(
engine, 'node_group_templates', node_group_templates_columns)
data_sources_columns = [
'created_at',
'updated_at',
'id',
'tenant_id',
'name',
'description',
'type',
'url',
'credentials'
]
self.assertColumnsExists(
engine, 'data_sources', data_sources_columns)
self.assertColumnCount(
engine, 'data_sources', data_sources_columns)
cluster_templates_columns = [
'created_at',
'updated_at',
'id',
'name',
'description',
'cluster_configs',
'default_image_id',
'anti_affinity',
'tenant_id',
'neutron_management_network',
'plugin_name',
'hadoop_version'
]
self.assertColumnsExists(
engine, 'cluster_templates', cluster_templates_columns)
self.assertColumnCount(
engine, 'cluster_templates', cluster_templates_columns)
job_binaries_columns = [
'created_at',
'updated_at',
'id',
'tenant_id',
'name',
'description',
'url',
'extra'
]
self.assertColumnsExists(
engine, 'job_binaries', job_binaries_columns)
self.assertColumnCount(
engine, 'job_binaries', job_binaries_columns)
jobs_columns = [
'created_at',
'updated_at',
'id',
'tenant_id',
'name',
'description',
'type'
]
self.assertColumnsExists(engine, 'jobs', jobs_columns)
self.assertColumnCount(engine, 'jobs', jobs_columns)
templates_relations_columns = [
'created_at',
'updated_at',
'id',
'tenant_id',
'name',
'flavor_id',
'image_id',
'node_processes',
'node_configs',
'volumes_per_node',
'volumes_size',
'volume_mount_prefix',
'count',
'cluster_template_id',
'node_group_template_id',
'floating_ip_pool'
]
self.assertColumnsExists(
engine, 'templates_relations', templates_relations_columns)
self.assertColumnCount(
engine, 'templates_relations', templates_relations_columns)
mains_association_columns = [
'Job_id',
'JobBinary_id'
]
self.assertColumnsExists(
engine, 'mains_association', mains_association_columns)
self.assertColumnCount(
engine, 'mains_association', mains_association_columns)
libs_association_columns = [
'Job_id',
'JobBinary_id'
]
self.assertColumnsExists(
engine, 'libs_association', libs_association_columns)
self.assertColumnCount(
engine, 'libs_association', libs_association_columns)
clusters_columns = [
'created_at',
'updated_at',
'id',
'name',
'description',
'tenant_id',
'trust_id',
'is_transient',
'plugin_name',
'hadoop_version',
'cluster_configs',
'default_image_id',
'neutron_management_network',
'anti_affinity',
'management_private_key',
'management_public_key',
'user_keypair_id',
'status',
'status_description',
'info',
'extra',
'cluster_template_id'
]
self.assertColumnsExists(engine, 'clusters', clusters_columns)
self.assertColumnCount(engine, 'clusters', clusters_columns)
node_groups_columns = [
'created_at',
'updated_at',
'id',
'name',
'tenant_id',
'flavor_id',
'image_id',
'image_username',
'node_processes',
'node_configs',
'volumes_per_node',
'volumes_size',
'volume_mount_prefix',
'count',
'cluster_id',
'node_group_template_id',
'floating_ip_pool'
]
self.assertColumnsExists(engine, 'node_groups', node_groups_columns)
self.assertColumnCount(engine, 'node_groups', node_groups_columns)
job_executions_columns = [
'created_at',
'updated_at',
'id',
'tenant_id',
'job_id',
'input_id',
'output_id',
'start_time',
'end_time',
'cluster_id',
'info',
'progress',
'oozie_job_id',
'return_code',
'job_configs',
'extra'
]
self.assertColumnsExists(
engine, 'job_executions', job_executions_columns)
self.assertColumnCount(
engine, 'job_executions', job_executions_columns)
instances_columns = [
'created_at',
'updated_at',
'id',
'tenant_id',
'node_group_id',
'instance_id',
'instance_name',
'internal_ip',
'management_ip',
'volumes'
]
self.assertColumnsExists(engine, 'instances', instances_columns)
self.assertColumnCount(engine, 'instances', instances_columns)
self._data_001(engine, data)
def _data_001(self, engine, data):
datasize = 512 * 1024 # 512kB
data = os.urandom(datasize)
t = db_utils.get_table(engine, 'job_binary_internal')
engine.execute(t.insert(), data=data, id='123', name='name')
new_data = engine.execute(t.select()).fetchone().data
self.assertEqual(data, new_data)
engine.execute(t.delete())
def _check_002(self, engine, data):
# currently, 002 is just a placeholder
pass
def _check_003(self, engine, data):
# currently, 003 is just a placeholder
pass
def _check_004(self, engine, data):
# currently, 004 is just a placeholder
pass
def _check_005(self, engine, data):
# currently, 005 is just a placeholder
pass
def _check_006(self, engine, data):
# currently, 006 is just a placeholder
pass
def _pre_upgrade_007(self, engine):
desc = 'magic'
t = db_utils.get_table(engine, 'clusters')
engine.execute(t.insert(), id='123', name='name', plugin_name='pname',
hadoop_version='1', management_private_key='2',
management_public_key='3', status_description=desc)
def _check_007(self, engine, data):
t = db_utils.get_table(engine, 'clusters')
res = engine.execute(t.select(), id='123').first()
self.assertEqual('magic', res['status_description'])
engine.execute(t.delete())
# check that status_description can keep 128kb.
# MySQL varchar can not keep more then 64kb
desc = 'a' * 128 * 1024 # 128kb
t = db_utils.get_table(engine, 'clusters')
engine.execute(t.insert(), id='123', name='name', plugin_name='plname',
hadoop_version='hversion', management_private_key='1',
management_public_key='2', status_description=desc)
new_desc = engine.execute(t.select()).fetchone().status_description
self.assertEqual(desc, new_desc)
engine.execute(t.delete())
def _check_008(self, engine, date):
self.assertColumnExists(engine, 'node_group_templates',
'security_groups')
self.assertColumnExists(engine, 'node_groups', 'security_groups')
self.assertColumnExists(engine, 'templates_relations',
'security_groups')
def _check_009(self, engine, date):
self.assertColumnExists(engine, 'clusters', 'rollback_info')
def _check_010(self, engine, date):
self.assertColumnExists(engine, 'node_group_templates',
'auto_security_group')
self.assertColumnExists(engine, 'node_groups', 'auto_security_group')
self.assertColumnExists(engine, 'templates_relations',
'auto_security_group')
self.assertColumnExists(engine, 'node_groups', 'open_ports')
def _check_011(self, engine, date):
self.assertColumnExists(engine, 'clusters', 'sahara_info')
def _check_012(self, engine, date):
self.assertColumnExists(engine, 'node_group_templates',
'availability_zone')
self.assertColumnExists(engine, 'node_groups', 'availability_zone')
self.assertColumnExists(engine, 'templates_relations',
'availability_zone')
def _check_014(self, engine, data):
self.assertColumnExists(engine, 'node_group_templates', 'volume_type')
self.assertColumnExists(engine, 'node_groups', 'volume_type')
self.assertColumnExists(engine, 'templates_relations', 'volume_type')
| apache-2.0 |
napalm-automation/napalm-yang | napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_prefix/tlvs/tlv/state/__init__.py | 1 | 27029 | # -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/opaque-lsa/extended-prefix/tlvs/tlv/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State parameters relating to the sub-TLV of the extended
prefix LSA
"""
__slots__ = ("_path_helper", "_extmethods", "__type")
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__type = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"EXTENDED_PREFIX_RANGE": {
"@module": "openconfig-ospf-types",
"@namespace": "http://openconfig.net/yang/ospf-types",
},
"oc-ospf-types:EXTENDED_PREFIX_RANGE": {
"@module": "openconfig-ospf-types",
"@namespace": "http://openconfig.net/yang/ospf-types",
},
"oc-ospft:EXTENDED_PREFIX_RANGE": {
"@module": "openconfig-ospf-types",
"@namespace": "http://openconfig.net/yang/ospf-types",
},
"PREFIX_SID": {
"@module": "openconfig-ospf-types",
"@namespace": "http://openconfig.net/yang/ospf-types",
},
"oc-ospf-types:PREFIX_SID": {
"@module": "openconfig-ospf-types",
"@namespace": "http://openconfig.net/yang/ospf-types",
},
"oc-ospft:PREFIX_SID": {
"@module": "openconfig-ospf-types",
"@namespace": "http://openconfig.net/yang/ospf-types",
},
"SID_LABEL_BINDING": {
"@module": "openconfig-ospf-types",
"@namespace": "http://openconfig.net/yang/ospf-types",
},
"oc-ospf-types:SID_LABEL_BINDING": {
"@module": "openconfig-ospf-types",
"@namespace": "http://openconfig.net/yang/ospf-types",
},
"oc-ospft:SID_LABEL_BINDING": {
"@module": "openconfig-ospf-types",
"@namespace": "http://openconfig.net/yang/ospf-types",
},
},
),
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"lsdb",
"lsa-types",
"lsa-type",
"lsas",
"lsa",
"opaque-lsa",
"extended-prefix",
"tlvs",
"tlv",
"state",
]
def _get_type(self):
"""
Getter method for type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_prefix/tlvs/tlv/state/type (identityref)
YANG Description: The type of sub-TLV as indicated by the Extended Prefix LSA
"""
return self.__type
def _set_type(self, v, load=False):
"""
Setter method for type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_prefix/tlvs/tlv/state/type (identityref)
If this variable is read-only (config: false) in the
source YANG file, then _set_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_type() directly.
YANG Description: The type of sub-TLV as indicated by the Extended Prefix LSA
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"EXTENDED_PREFIX_RANGE": {
"@module": "openconfig-ospf-types",
"@namespace": "http://openconfig.net/yang/ospf-types",
},
"oc-ospf-types:EXTENDED_PREFIX_RANGE": {
"@module": "openconfig-ospf-types",
"@namespace": "http://openconfig.net/yang/ospf-types",
},
"oc-ospft:EXTENDED_PREFIX_RANGE": {
"@module": "openconfig-ospf-types",
"@namespace": "http://openconfig.net/yang/ospf-types",
},
"PREFIX_SID": {
"@module": "openconfig-ospf-types",
"@namespace": "http://openconfig.net/yang/ospf-types",
},
"oc-ospf-types:PREFIX_SID": {
"@module": "openconfig-ospf-types",
"@namespace": "http://openconfig.net/yang/ospf-types",
},
"oc-ospft:PREFIX_SID": {
"@module": "openconfig-ospf-types",
"@namespace": "http://openconfig.net/yang/ospf-types",
},
"SID_LABEL_BINDING": {
"@module": "openconfig-ospf-types",
"@namespace": "http://openconfig.net/yang/ospf-types",
},
"oc-ospf-types:SID_LABEL_BINDING": {
"@module": "openconfig-ospf-types",
"@namespace": "http://openconfig.net/yang/ospf-types",
},
"oc-ospft:SID_LABEL_BINDING": {
"@module": "openconfig-ospf-types",
"@namespace": "http://openconfig.net/yang/ospf-types",
},
},
),
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """type must be of a type compatible with identityref""",
"defined-type": "openconfig-network-instance:identityref",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'EXTENDED_PREFIX_RANGE': {'@module': 'openconfig-ospf-types', '@namespace': 'http://openconfig.net/yang/ospf-types'}, 'oc-ospf-types:EXTENDED_PREFIX_RANGE': {'@module': 'openconfig-ospf-types', '@namespace': 'http://openconfig.net/yang/ospf-types'}, 'oc-ospft:EXTENDED_PREFIX_RANGE': {'@module': 'openconfig-ospf-types', '@namespace': 'http://openconfig.net/yang/ospf-types'}, 'PREFIX_SID': {'@module': 'openconfig-ospf-types', '@namespace': 'http://openconfig.net/yang/ospf-types'}, 'oc-ospf-types:PREFIX_SID': {'@module': 'openconfig-ospf-types', '@namespace': 'http://openconfig.net/yang/ospf-types'}, 'oc-ospft:PREFIX_SID': {'@module': 'openconfig-ospf-types', '@namespace': 'http://openconfig.net/yang/ospf-types'}, 'SID_LABEL_BINDING': {'@module': 'openconfig-ospf-types', '@namespace': 'http://openconfig.net/yang/ospf-types'}, 'oc-ospf-types:SID_LABEL_BINDING': {'@module': 'openconfig-ospf-types', '@namespace': 'http://openconfig.net/yang/ospf-types'}, 'oc-ospft:SID_LABEL_BINDING': {'@module': 'openconfig-ospf-types', '@namespace': 'http://openconfig.net/yang/ospf-types'}},), is_leaf=True, yang_name="type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='identityref', is_config=False)""",
}
)
self.__type = t
if hasattr(self, "_set"):
self._set()
def _unset_type(self):
self.__type = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"EXTENDED_PREFIX_RANGE": {
"@module": "openconfig-ospf-types",
"@namespace": "http://openconfig.net/yang/ospf-types",
},
"oc-ospf-types:EXTENDED_PREFIX_RANGE": {
"@module": "openconfig-ospf-types",
"@namespace": "http://openconfig.net/yang/ospf-types",
},
"oc-ospft:EXTENDED_PREFIX_RANGE": {
"@module": "openconfig-ospf-types",
"@namespace": "http://openconfig.net/yang/ospf-types",
},
"PREFIX_SID": {
"@module": "openconfig-ospf-types",
"@namespace": "http://openconfig.net/yang/ospf-types",
},
"oc-ospf-types:PREFIX_SID": {
"@module": "openconfig-ospf-types",
"@namespace": "http://openconfig.net/yang/ospf-types",
},
"oc-ospft:PREFIX_SID": {
"@module": "openconfig-ospf-types",
"@namespace": "http://openconfig.net/yang/ospf-types",
},
"SID_LABEL_BINDING": {
"@module": "openconfig-ospf-types",
"@namespace": "http://openconfig.net/yang/ospf-types",
},
"oc-ospf-types:SID_LABEL_BINDING": {
"@module": "openconfig-ospf-types",
"@namespace": "http://openconfig.net/yang/ospf-types",
},
"oc-ospft:SID_LABEL_BINDING": {
"@module": "openconfig-ospf-types",
"@namespace": "http://openconfig.net/yang/ospf-types",
},
},
),
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
type = __builtin__.property(_get_type)
_pyangbind_elements = OrderedDict([("type", type)])
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/opaque-lsa/extended-prefix/tlvs/tlv/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State parameters relating to the sub-TLV of the extended
prefix LSA
"""
__slots__ = ("_path_helper", "_extmethods", "__type")
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__type = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"EXTENDED_PREFIX_RANGE": {
"@module": "openconfig-ospf-types",
"@namespace": "http://openconfig.net/yang/ospf-types",
},
"oc-ospf-types:EXTENDED_PREFIX_RANGE": {
"@module": "openconfig-ospf-types",
"@namespace": "http://openconfig.net/yang/ospf-types",
},
"oc-ospft:EXTENDED_PREFIX_RANGE": {
"@module": "openconfig-ospf-types",
"@namespace": "http://openconfig.net/yang/ospf-types",
},
"PREFIX_SID": {
"@module": "openconfig-ospf-types",
"@namespace": "http://openconfig.net/yang/ospf-types",
},
"oc-ospf-types:PREFIX_SID": {
"@module": "openconfig-ospf-types",
"@namespace": "http://openconfig.net/yang/ospf-types",
},
"oc-ospft:PREFIX_SID": {
"@module": "openconfig-ospf-types",
"@namespace": "http://openconfig.net/yang/ospf-types",
},
"SID_LABEL_BINDING": {
"@module": "openconfig-ospf-types",
"@namespace": "http://openconfig.net/yang/ospf-types",
},
"oc-ospf-types:SID_LABEL_BINDING": {
"@module": "openconfig-ospf-types",
"@namespace": "http://openconfig.net/yang/ospf-types",
},
"oc-ospft:SID_LABEL_BINDING": {
"@module": "openconfig-ospf-types",
"@namespace": "http://openconfig.net/yang/ospf-types",
},
},
),
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"lsdb",
"lsa-types",
"lsa-type",
"lsas",
"lsa",
"opaque-lsa",
"extended-prefix",
"tlvs",
"tlv",
"state",
]
def _get_type(self):
"""
Getter method for type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_prefix/tlvs/tlv/state/type (identityref)
YANG Description: The type of sub-TLV as indicated by the Extended Prefix LSA
"""
return self.__type
def _set_type(self, v, load=False):
"""
Setter method for type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_prefix/tlvs/tlv/state/type (identityref)
If this variable is read-only (config: false) in the
source YANG file, then _set_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_type() directly.
YANG Description: The type of sub-TLV as indicated by the Extended Prefix LSA
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"EXTENDED_PREFIX_RANGE": {
"@module": "openconfig-ospf-types",
"@namespace": "http://openconfig.net/yang/ospf-types",
},
"oc-ospf-types:EXTENDED_PREFIX_RANGE": {
"@module": "openconfig-ospf-types",
"@namespace": "http://openconfig.net/yang/ospf-types",
},
"oc-ospft:EXTENDED_PREFIX_RANGE": {
"@module": "openconfig-ospf-types",
"@namespace": "http://openconfig.net/yang/ospf-types",
},
"PREFIX_SID": {
"@module": "openconfig-ospf-types",
"@namespace": "http://openconfig.net/yang/ospf-types",
},
"oc-ospf-types:PREFIX_SID": {
"@module": "openconfig-ospf-types",
"@namespace": "http://openconfig.net/yang/ospf-types",
},
"oc-ospft:PREFIX_SID": {
"@module": "openconfig-ospf-types",
"@namespace": "http://openconfig.net/yang/ospf-types",
},
"SID_LABEL_BINDING": {
"@module": "openconfig-ospf-types",
"@namespace": "http://openconfig.net/yang/ospf-types",
},
"oc-ospf-types:SID_LABEL_BINDING": {
"@module": "openconfig-ospf-types",
"@namespace": "http://openconfig.net/yang/ospf-types",
},
"oc-ospft:SID_LABEL_BINDING": {
"@module": "openconfig-ospf-types",
"@namespace": "http://openconfig.net/yang/ospf-types",
},
},
),
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """type must be of a type compatible with identityref""",
"defined-type": "openconfig-network-instance:identityref",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'EXTENDED_PREFIX_RANGE': {'@module': 'openconfig-ospf-types', '@namespace': 'http://openconfig.net/yang/ospf-types'}, 'oc-ospf-types:EXTENDED_PREFIX_RANGE': {'@module': 'openconfig-ospf-types', '@namespace': 'http://openconfig.net/yang/ospf-types'}, 'oc-ospft:EXTENDED_PREFIX_RANGE': {'@module': 'openconfig-ospf-types', '@namespace': 'http://openconfig.net/yang/ospf-types'}, 'PREFIX_SID': {'@module': 'openconfig-ospf-types', '@namespace': 'http://openconfig.net/yang/ospf-types'}, 'oc-ospf-types:PREFIX_SID': {'@module': 'openconfig-ospf-types', '@namespace': 'http://openconfig.net/yang/ospf-types'}, 'oc-ospft:PREFIX_SID': {'@module': 'openconfig-ospf-types', '@namespace': 'http://openconfig.net/yang/ospf-types'}, 'SID_LABEL_BINDING': {'@module': 'openconfig-ospf-types', '@namespace': 'http://openconfig.net/yang/ospf-types'}, 'oc-ospf-types:SID_LABEL_BINDING': {'@module': 'openconfig-ospf-types', '@namespace': 'http://openconfig.net/yang/ospf-types'}, 'oc-ospft:SID_LABEL_BINDING': {'@module': 'openconfig-ospf-types', '@namespace': 'http://openconfig.net/yang/ospf-types'}},), is_leaf=True, yang_name="type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='identityref', is_config=False)""",
}
)
self.__type = t
if hasattr(self, "_set"):
self._set()
def _unset_type(self):
self.__type = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"EXTENDED_PREFIX_RANGE": {
"@module": "openconfig-ospf-types",
"@namespace": "http://openconfig.net/yang/ospf-types",
},
"oc-ospf-types:EXTENDED_PREFIX_RANGE": {
"@module": "openconfig-ospf-types",
"@namespace": "http://openconfig.net/yang/ospf-types",
},
"oc-ospft:EXTENDED_PREFIX_RANGE": {
"@module": "openconfig-ospf-types",
"@namespace": "http://openconfig.net/yang/ospf-types",
},
"PREFIX_SID": {
"@module": "openconfig-ospf-types",
"@namespace": "http://openconfig.net/yang/ospf-types",
},
"oc-ospf-types:PREFIX_SID": {
"@module": "openconfig-ospf-types",
"@namespace": "http://openconfig.net/yang/ospf-types",
},
"oc-ospft:PREFIX_SID": {
"@module": "openconfig-ospf-types",
"@namespace": "http://openconfig.net/yang/ospf-types",
},
"SID_LABEL_BINDING": {
"@module": "openconfig-ospf-types",
"@namespace": "http://openconfig.net/yang/ospf-types",
},
"oc-ospf-types:SID_LABEL_BINDING": {
"@module": "openconfig-ospf-types",
"@namespace": "http://openconfig.net/yang/ospf-types",
},
"oc-ospft:SID_LABEL_BINDING": {
"@module": "openconfig-ospf-types",
"@namespace": "http://openconfig.net/yang/ospf-types",
},
},
),
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
type = __builtin__.property(_get_type)
_pyangbind_elements = OrderedDict([("type", type)])
| apache-2.0 |
atosorigin/ansible | test/support/windows-integration/plugins/modules/win_lineinfile.py | 68 | 7333 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_lineinfile
short_description: Ensure a particular line is in a file, or replace an existing line using a back-referenced regular expression
description:
- This module will search a file for a line, and ensure that it is present or absent.
- This is primarily useful when you want to change a single line in a file only.
version_added: "2.0"
options:
path:
description:
- The path of the file to modify.
- Note that the Windows path delimiter C(\) must be escaped as C(\\) when the line is double quoted.
- Before Ansible 2.3 this option was only usable as I(dest), I(destfile) and I(name).
type: path
required: yes
aliases: [ dest, destfile, name ]
backup:
description:
- Determine whether a backup should be created.
- When set to C(yes), create a backup file including the timestamp information
so you can get the original file back if you somehow clobbered it incorrectly.
type: bool
default: no
regex:
description:
- The regular expression to look for in every line of the file. For C(state=present), the pattern to replace if found; only the last line found
will be replaced. For C(state=absent), the pattern of the line to remove. Uses .NET compatible regular expressions;
see U(https://msdn.microsoft.com/en-us/library/hs600312%28v=vs.110%29.aspx).
aliases: [ "regexp" ]
state:
description:
- Whether the line should be there or not.
type: str
choices: [ absent, present ]
default: present
line:
description:
- Required for C(state=present). The line to insert/replace into the file. If C(backrefs) is set, may contain backreferences that will get
expanded with the C(regexp) capture groups if the regexp matches.
- Be aware that the line is processed first on the controller and thus is dependent on yaml quoting rules. Any double quoted line
will have control characters, such as '\r\n', expanded. To print such characters literally, use single or no quotes.
type: str
backrefs:
description:
- Used with C(state=present). If set, line can contain backreferences (both positional and named) that will get populated if the C(regexp)
matches. This flag changes the operation of the module slightly; C(insertbefore) and C(insertafter) will be ignored, and if the C(regexp)
doesn't match anywhere in the file, the file will be left unchanged.
- If the C(regexp) does match, the last matching line will be replaced by the expanded line parameter.
type: bool
default: no
insertafter:
description:
- Used with C(state=present). If specified, the line will be inserted after the last match of specified regular expression. A special value is
available; C(EOF) for inserting the line at the end of the file.
- If specified regular expression has no matches, EOF will be used instead. May not be used with C(backrefs).
type: str
choices: [ EOF, '*regex*' ]
default: EOF
insertbefore:
description:
- Used with C(state=present). If specified, the line will be inserted before the last match of specified regular expression. A value is available;
C(BOF) for inserting the line at the beginning of the file.
- If specified regular expression has no matches, the line will be inserted at the end of the file. May not be used with C(backrefs).
type: str
choices: [ BOF, '*regex*' ]
create:
description:
- Used with C(state=present). If specified, the file will be created if it does not already exist. By default it will fail if the file is missing.
type: bool
default: no
validate:
description:
- Validation to run before copying into place. Use %s in the command to indicate the current file to validate.
- The command is passed securely so shell features like expansion and pipes won't work.
type: str
encoding:
description:
- Specifies the encoding of the source text file to operate on (and thus what the output encoding will be). The default of C(auto) will cause
the module to auto-detect the encoding of the source file and ensure that the modified file is written with the same encoding.
- An explicit encoding can be passed as a string that is a valid value to pass to the .NET framework System.Text.Encoding.GetEncoding() method -
see U(https://msdn.microsoft.com/en-us/library/system.text.encoding%28v=vs.110%29.aspx).
- This is mostly useful with C(create=yes) if you want to create a new file with a specific encoding. If C(create=yes) is specified without a
specific encoding, the default encoding (UTF-8, no BOM) will be used.
type: str
default: auto
newline:
description:
- Specifies the line separator style to use for the modified file. This defaults to the windows line separator (C(\r\n)). Note that the indicated
line separator will be used for file output regardless of the original line separator that appears in the input file.
type: str
choices: [ unix, windows ]
default: windows
notes:
- As of Ansible 2.3, the I(dest) option has been changed to I(path) as default, but I(dest) still works as well.
seealso:
- module: assemble
- module: lineinfile
author:
- Brian Lloyd (@brianlloyd)
'''
EXAMPLES = r'''
# Before Ansible 2.3, option 'dest', 'destfile' or 'name' was used instead of 'path'
- name: Insert path without converting \r\n
win_lineinfile:
path: c:\file.txt
line: c:\return\new
- win_lineinfile:
path: C:\Temp\example.conf
regex: '^name='
line: 'name=JohnDoe'
- win_lineinfile:
path: C:\Temp\example.conf
regex: '^name='
state: absent
- win_lineinfile:
path: C:\Temp\example.conf
regex: '^127\.0\.0\.1'
line: '127.0.0.1 localhost'
- win_lineinfile:
path: C:\Temp\httpd.conf
regex: '^Listen '
insertafter: '^#Listen '
line: Listen 8080
- win_lineinfile:
path: C:\Temp\services
regex: '^# port for http'
insertbefore: '^www.*80/tcp'
line: '# port for http by default'
- name: Create file if it doesn't exist with a specific encoding
win_lineinfile:
path: C:\Temp\utf16.txt
create: yes
encoding: utf-16
line: This is a utf-16 encoded file
- name: Add a line to a file and ensure the resulting file uses unix line separators
win_lineinfile:
path: C:\Temp\testfile.txt
line: Line added to file
newline: unix
- name: Update a line using backrefs
win_lineinfile:
path: C:\Temp\example.conf
backrefs: yes
regex: '(^name=)'
line: '$1JohnDoe'
'''
RETURN = r'''
backup:
description:
- Name of the backup file that was created.
- This is now deprecated, use C(backup_file) instead.
returned: if backup=yes
type: str
sample: C:\Path\To\File.txt.11540.20150212-220915.bak
backup_file:
description: Name of the backup file that was created.
returned: if backup=yes
type: str
sample: C:\Path\To\File.txt.11540.20150212-220915.bak
'''
| gpl-3.0 |
caser789/xuejiao-blog | app/api_1_0/posts.py | 1 | 1747 | from flask import jsonify, request, g, abort, url_for, current_app
from .. import db
from ..models import Post, Permission
from . import api
from .decorators import permission_required
from .errors import forbidden
@api.route('/posts/')
def get_posts():
page = request.args.get('page', 1, type=int)
pagination = Post.query.paginate(
page, per_page=current_app.config['BLOG_POSTS_PER_PAGE'],
error_out=False)
posts = pagination.items
prev = None
if pagination.has_prev:
prev = url_for('api.get_posts', page=page-1, _external=True)
next = None
if pagination.has_next:
next = url_for('api.get_posts', page=page+1, _external=True)
return jsonify({
'posts': [post.to_json() for post in posts],
'prev': prev,
'next': next,
'count': pagination.total
})
@api.route('/posts/<int:id>')
def get_post(id):
post = Post.query.get_or_404(id)
return jsonify(post.to_json())
@api.route('/posts/', methods=['POST'])
@permission_required(Permission.WRITE_ARTICLES)
def new_post():
post = Post.from_json(request.json)
post.author = g.current_user
db.session.add(post)
db.session.commit()
return jsonify(post.to_json()), 201, \
{'Location': url_for('api.get_post', id=post.id, _external=True)}
@api.route('/posts/<int:id>', methods=['PUT'])
@permission_required(Permission.WRITE_ARTICLES)
def edit_post(id):
post = Post.query.get_or_404(id)
if g.current_user != post.author and \
not g.current_user.can(Permission.ADMINISTER):
return forbidden('Insufficient permissions')
post.body = request.json.get('body', post.body)
db.session.add(post)
return jsonify(post.to_json())
| mit |
wbyne/QGIS | python/plugins/processing/algs/qgis/HypsometricCurves.py | 2 | 8209 | # -*- coding: utf-8 -*-
"""
***************************************************************************
HypsometricCurves.py
---------------------
Date : November 2014
Copyright : (C) 2014 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from builtins import str
__author__ = 'Alexander Bruy'
__date__ = 'November 2014'
__copyright__ = '(C) 2014, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import numpy
from osgeo import gdal, ogr, osr
from qgis.core import QgsRectangle, QgsGeometry
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.parameters import ParameterRaster
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterNumber
from processing.core.parameters import ParameterBoolean
from processing.core.outputs import OutputDirectory
from processing.tools import raster, vector, dataobjects
class HypsometricCurves(GeoAlgorithm):
INPUT_DEM = 'INPUT_DEM'
BOUNDARY_LAYER = 'BOUNDARY_LAYER'
STEP = 'STEP'
USE_PERCENTAGE = 'USE_PERCENTAGE'
OUTPUT_DIRECTORY = 'OUTPUT_DIRECTORY'
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Hypsometric curves')
self.group, self.i18n_group = self.trAlgorithm('Raster tools')
self.addParameter(ParameterRaster(self.INPUT_DEM,
self.tr('DEM to analyze')))
self.addParameter(ParameterVector(self.BOUNDARY_LAYER,
self.tr('Boundary layer'), dataobjects.TYPE_VECTOR_POLYGON))
self.addParameter(ParameterNumber(self.STEP,
self.tr('Step'), 0.0, 999999999.999999, 100.0))
self.addParameter(ParameterBoolean(self.USE_PERCENTAGE,
self.tr('Use % of area instead of absolute value'), False))
self.addOutput(OutputDirectory(self.OUTPUT_DIRECTORY,
self.tr('Hypsometric curves')))
def processAlgorithm(self, progress):
rasterPath = self.getParameterValue(self.INPUT_DEM)
layer = dataobjects.getObjectFromUri(
self.getParameterValue(self.BOUNDARY_LAYER))
step = self.getParameterValue(self.STEP)
percentage = self.getParameterValue(self.USE_PERCENTAGE)
outputPath = self.getOutputValue(self.OUTPUT_DIRECTORY)
rasterDS = gdal.Open(rasterPath, gdal.GA_ReadOnly)
geoTransform = rasterDS.GetGeoTransform()
rasterBand = rasterDS.GetRasterBand(1)
noData = rasterBand.GetNoDataValue()
cellXSize = abs(geoTransform[1])
cellYSize = abs(geoTransform[5])
rasterXSize = rasterDS.RasterXSize
rasterYSize = rasterDS.RasterYSize
rasterBBox = QgsRectangle(geoTransform[0], geoTransform[3] - cellYSize
* rasterYSize, geoTransform[0] + cellXSize
* rasterXSize, geoTransform[3])
rasterGeom = QgsGeometry.fromRect(rasterBBox)
crs = osr.SpatialReference()
crs.ImportFromProj4(str(layer.crs().toProj4()))
memVectorDriver = ogr.GetDriverByName('Memory')
memRasterDriver = gdal.GetDriverByName('MEM')
features = vector.features(layer)
total = 100.0 / len(features)
for current, f in enumerate(features):
geom = f.geometry()
intersectedGeom = rasterGeom.intersection(geom)
if intersectedGeom.isGeosEmpty():
progress.setInfo(
self.tr('Feature %d does not intersect raster or '
'entirely located in NODATA area' % f.id()))
continue
fName = os.path.join(
outputPath, 'hystogram_%s_%s.csv' % (layer.name(), f.id()))
ogrGeom = ogr.CreateGeometryFromWkt(intersectedGeom.exportToWkt())
bbox = intersectedGeom.boundingBox()
xMin = bbox.xMinimum()
xMax = bbox.xMaximum()
yMin = bbox.yMinimum()
yMax = bbox.yMaximum()
(startColumn, startRow) = raster.mapToPixel(xMin, yMax, geoTransform)
(endColumn, endRow) = raster.mapToPixel(xMax, yMin, geoTransform)
width = endColumn - startColumn
height = endRow - startRow
srcOffset = (startColumn, startRow, width, height)
srcArray = rasterBand.ReadAsArray(*srcOffset)
if srcOffset[2] == 0 or srcOffset[3] == 0:
progress.setInfo(
self.tr('Feature %d is smaller than raster '
'cell size' % f.id()))
continue
newGeoTransform = (
geoTransform[0] + srcOffset[0] * geoTransform[1],
geoTransform[1],
0.0,
geoTransform[3] + srcOffset[1] * geoTransform[5],
0.0,
geoTransform[5]
)
memVDS = memVectorDriver.CreateDataSource('out')
memLayer = memVDS.CreateLayer('poly', crs, ogr.wkbPolygon)
ft = ogr.Feature(memLayer.GetLayerDefn())
ft.SetGeometry(ogrGeom)
memLayer.CreateFeature(ft)
ft.Destroy()
rasterizedDS = memRasterDriver.Create('', srcOffset[2],
srcOffset[3], 1, gdal.GDT_Byte)
rasterizedDS.SetGeoTransform(newGeoTransform)
gdal.RasterizeLayer(rasterizedDS, [1], memLayer, burn_values=[1])
rasterizedArray = rasterizedDS.ReadAsArray()
srcArray = numpy.nan_to_num(srcArray)
masked = numpy.ma.MaskedArray(srcArray,
mask=numpy.logical_or(srcArray == noData,
numpy.logical_not(rasterizedArray)))
self.calculateHypsometry(f.id(), fName, progress, masked,
cellXSize, cellYSize, percentage, step)
memVDS = None
rasterizedDS = None
progress.setPercentage(int(current * total))
rasterDS = None
def calculateHypsometry(self, fid, fName, progress, data, pX, pY,
percentage, step):
out = dict()
d = data.compressed()
if d.size == 0:
progress.setInfo(
self.tr('Feature %d does not intersect raster or '
'entirely located in NODATA area' % fid))
return
minValue = d.min()
maxValue = d.max()
startValue = minValue
tmpValue = minValue + step
while startValue < maxValue:
out[tmpValue] = ((startValue <= d) & (d < tmpValue)).sum()
startValue = tmpValue
tmpValue += step
if percentage:
multiplier = 100.0 / len(d.flat)
else:
multiplier = pX * pY
for k, v in out.items():
out[k] = v * multiplier
prev = None
for i in sorted(out.items()):
if prev is None:
out[i[0]] = i[1]
else:
out[i[0]] = i[1] + out[prev]
prev = i[0]
writer = vector.TableWriter(fName, 'utf-8', [self.tr('Area'), self.tr('Elevation')])
for i in sorted(out.items()):
writer.addRecord([i[1], i[0]])
del writer
| gpl-2.0 |
docker/docker-py | tests/integration/api_swarm_test.py | 4 | 9438 | import copy
import docker
import pytest
from ..helpers import force_leave_swarm, requires_api_version
from .base import BaseAPIIntegrationTest
class SwarmTest(BaseAPIIntegrationTest):
def setUp(self):
super(SwarmTest, self).setUp()
force_leave_swarm(self.client)
self._unlock_key = None
def tearDown(self):
try:
if self._unlock_key:
self.client.unlock_swarm(self._unlock_key)
except docker.errors.APIError:
pass
force_leave_swarm(self.client)
super(SwarmTest, self).tearDown()
@requires_api_version('1.24')
def test_init_swarm_simple(self):
assert self.init_swarm()
@requires_api_version('1.24')
def test_init_swarm_force_new_cluster(self):
pytest.skip('Test stalls the engine on 1.12.0')
assert self.init_swarm()
version_1 = self.client.inspect_swarm()['Version']['Index']
assert self.client.init_swarm(force_new_cluster=True)
version_2 = self.client.inspect_swarm()['Version']['Index']
assert version_2 != version_1
@requires_api_version('1.39')
def test_init_swarm_custom_addr_pool_defaults(self):
assert self.init_swarm()
results = self.client.inspect_swarm()
assert set(results['DefaultAddrPool']) == {'10.0.0.0/8'}
assert results['SubnetSize'] == 24
@requires_api_version('1.39')
def test_init_swarm_custom_addr_pool_only_pool(self):
assert self.init_swarm(default_addr_pool=['2.0.0.0/16'])
results = self.client.inspect_swarm()
assert set(results['DefaultAddrPool']) == {'2.0.0.0/16'}
assert results['SubnetSize'] == 24
@requires_api_version('1.39')
def test_init_swarm_custom_addr_pool_only_subnet_size(self):
assert self.init_swarm(subnet_size=26)
results = self.client.inspect_swarm()
assert set(results['DefaultAddrPool']) == {'10.0.0.0/8'}
assert results['SubnetSize'] == 26
@requires_api_version('1.39')
def test_init_swarm_custom_addr_pool_both_args(self):
assert self.init_swarm(default_addr_pool=['2.0.0.0/16', '3.0.0.0/16'],
subnet_size=28)
results = self.client.inspect_swarm()
assert set(results['DefaultAddrPool']) == {'2.0.0.0/16', '3.0.0.0/16'}
assert results['SubnetSize'] == 28
@requires_api_version('1.24')
def test_init_already_in_cluster(self):
assert self.init_swarm()
with pytest.raises(docker.errors.APIError):
self.init_swarm()
@requires_api_version('1.24')
def test_init_swarm_custom_raft_spec(self):
spec = self.client.create_swarm_spec(
snapshot_interval=5000, log_entries_for_slow_followers=1200
)
assert self.init_swarm(swarm_spec=spec)
swarm_info = self.client.inspect_swarm()
assert swarm_info['Spec']['Raft']['SnapshotInterval'] == 5000
assert swarm_info['Spec']['Raft']['LogEntriesForSlowFollowers'] == 1200
@requires_api_version('1.30')
def test_init_swarm_with_ca_config(self):
spec = self.client.create_swarm_spec(
node_cert_expiry=7776000000000000, ca_force_rotate=6000000000000
)
assert self.init_swarm(swarm_spec=spec)
swarm_info = self.client.inspect_swarm()
assert swarm_info['Spec']['CAConfig']['NodeCertExpiry'] == (
spec['CAConfig']['NodeCertExpiry']
)
assert swarm_info['Spec']['CAConfig']['ForceRotate'] == (
spec['CAConfig']['ForceRotate']
)
@requires_api_version('1.25')
def test_init_swarm_with_autolock_managers(self):
spec = self.client.create_swarm_spec(autolock_managers=True)
assert self.init_swarm(swarm_spec=spec)
# save unlock key for tearDown
self._unlock_key = self.client.get_unlock_key()
swarm_info = self.client.inspect_swarm()
assert (
swarm_info['Spec']['EncryptionConfig']['AutoLockManagers'] is True
)
assert self._unlock_key.get('UnlockKey')
@requires_api_version('1.25')
@pytest.mark.xfail(
reason="This doesn't seem to be taken into account by the engine"
)
def test_init_swarm_with_log_driver(self):
spec = {'TaskDefaults': {'LogDriver': {'Name': 'syslog'}}}
assert self.init_swarm(swarm_spec=spec)
swarm_info = self.client.inspect_swarm()
assert swarm_info['Spec']['TaskDefaults']['LogDriver']['Name'] == (
'syslog'
)
@requires_api_version('1.24')
def test_leave_swarm(self):
assert self.init_swarm()
with pytest.raises(docker.errors.APIError) as exc_info:
self.client.leave_swarm()
exc_info.value.response.status_code == 500
assert self.client.leave_swarm(force=True)
with pytest.raises(docker.errors.APIError) as exc_info:
self.client.inspect_swarm()
exc_info.value.response.status_code == 406
assert self.client.leave_swarm(force=True)
@requires_api_version('1.24')
def test_update_swarm(self):
assert self.init_swarm()
swarm_info_1 = self.client.inspect_swarm()
spec = self.client.create_swarm_spec(
snapshot_interval=5000, log_entries_for_slow_followers=1200,
node_cert_expiry=7776000000000000
)
assert self.client.update_swarm(
version=swarm_info_1['Version']['Index'],
swarm_spec=spec, rotate_worker_token=True
)
swarm_info_2 = self.client.inspect_swarm()
assert (
swarm_info_1['Version']['Index'] !=
swarm_info_2['Version']['Index']
)
assert swarm_info_2['Spec']['Raft']['SnapshotInterval'] == 5000
assert (
swarm_info_2['Spec']['Raft']['LogEntriesForSlowFollowers'] == 1200
)
assert (
swarm_info_1['JoinTokens']['Manager'] ==
swarm_info_2['JoinTokens']['Manager']
)
assert (
swarm_info_1['JoinTokens']['Worker'] !=
swarm_info_2['JoinTokens']['Worker']
)
@requires_api_version('1.24')
def test_list_nodes(self):
assert self.init_swarm()
nodes_list = self.client.nodes()
assert len(nodes_list) == 1
node = nodes_list[0]
assert 'ID' in node
assert 'Spec' in node
assert node['Spec']['Role'] == 'manager'
filtered_list = self.client.nodes(filters={
'id': node['ID']
})
assert len(filtered_list) == 1
filtered_list = self.client.nodes(filters={
'role': 'worker'
})
assert len(filtered_list) == 0
@requires_api_version('1.24')
def test_inspect_node(self):
node_id = self.init_swarm()
assert node_id
nodes_list = self.client.nodes()
assert len(nodes_list) == 1
node = nodes_list[0]
node_data = self.client.inspect_node(node['ID'])
assert node['ID'] == node_data['ID']
assert node_id == node['ID']
assert node['Version'] == node_data['Version']
@requires_api_version('1.24')
def test_update_node(self):
assert self.init_swarm()
nodes_list = self.client.nodes()
node = nodes_list[0]
orig_spec = node['Spec']
# add a new label
new_spec = copy.deepcopy(orig_spec)
new_spec['Labels'] = {'new.label': 'new value'}
self.client.update_node(node_id=node['ID'],
version=node['Version']['Index'],
node_spec=new_spec)
updated_node = self.client.inspect_node(node['ID'])
assert new_spec == updated_node['Spec']
# Revert the changes
self.client.update_node(node_id=node['ID'],
version=updated_node['Version']['Index'],
node_spec=orig_spec)
reverted_node = self.client.inspect_node(node['ID'])
assert orig_spec == reverted_node['Spec']
@requires_api_version('1.24')
def test_remove_main_node(self):
assert self.init_swarm()
nodes_list = self.client.nodes()
node_id = nodes_list[0]['ID']
with pytest.raises(docker.errors.NotFound):
self.client.remove_node('foobar01')
with pytest.raises(docker.errors.APIError) as e:
self.client.remove_node(node_id)
assert e.value.response.status_code >= 400
with pytest.raises(docker.errors.APIError) as e:
self.client.remove_node(node_id, True)
assert e.value.response.status_code >= 400
@requires_api_version('1.25')
def test_rotate_manager_unlock_key(self):
spec = self.client.create_swarm_spec(autolock_managers=True)
assert self.init_swarm(swarm_spec=spec)
swarm_info = self.client.inspect_swarm()
key_1 = self.client.get_unlock_key()
assert self.client.update_swarm(
version=swarm_info['Version']['Index'],
rotate_manager_unlock_key=True
)
key_2 = self.client.get_unlock_key()
assert key_1['UnlockKey'] != key_2['UnlockKey']
@requires_api_version('1.30')
@pytest.mark.xfail(reason='Can fail if eth0 has multiple IP addresses')
def test_init_swarm_data_path_addr(self):
assert self.init_swarm(data_path_addr='eth0')
| apache-2.0 |
sstruct/flasky | tests/test_api.py | 23 | 10686 | import unittest
import json
import re
from base64 import b64encode
from flask import url_for
from app import create_app, db
from app.models import User, Role, Post, Comment
class APITestCase(unittest.TestCase):
def setUp(self):
self.app = create_app('testing')
self.app_context = self.app.app_context()
self.app_context.push()
db.create_all()
Role.insert_roles()
self.client = self.app.test_client()
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_context.pop()
def get_api_headers(self, username, password):
return {
'Authorization': 'Basic ' + b64encode(
(username + ':' + password).encode('utf-8')).decode('utf-8'),
'Accept': 'application/json',
'Content-Type': 'application/json'
}
def test_404(self):
response = self.client.get(
'/wrong/url',
headers=self.get_api_headers('email', 'password'))
self.assertTrue(response.status_code == 404)
json_response = json.loads(response.data.decode('utf-8'))
self.assertTrue(json_response['error'] == 'not found')
def test_no_auth(self):
response = self.client.get(url_for('api.get_posts'),
content_type='application/json')
self.assertTrue(response.status_code == 200)
def test_bad_auth(self):
# add a user
r = Role.query.filter_by(name='User').first()
self.assertIsNotNone(r)
u = User(email='[email protected]', password='cat', confirmed=True,
role=r)
db.session.add(u)
db.session.commit()
# authenticate with bad password
response = self.client.get(
url_for('api.get_posts'),
headers=self.get_api_headers('[email protected]', 'dog'))
self.assertTrue(response.status_code == 401)
def test_token_auth(self):
# add a user
r = Role.query.filter_by(name='User').first()
self.assertIsNotNone(r)
u = User(email='[email protected]', password='cat', confirmed=True,
role=r)
db.session.add(u)
db.session.commit()
# issue a request with a bad token
response = self.client.get(
url_for('api.get_posts'),
headers=self.get_api_headers('bad-token', ''))
self.assertTrue(response.status_code == 401)
# get a token
response = self.client.get(
url_for('api.get_token'),
headers=self.get_api_headers('[email protected]', 'cat'))
self.assertTrue(response.status_code == 200)
json_response = json.loads(response.data.decode('utf-8'))
self.assertIsNotNone(json_response.get('token'))
token = json_response['token']
# issue a request with the token
response = self.client.get(
url_for('api.get_posts'),
headers=self.get_api_headers(token, ''))
self.assertTrue(response.status_code == 200)
def test_anonymous(self):
response = self.client.get(
url_for('api.get_posts'),
headers=self.get_api_headers('', ''))
self.assertTrue(response.status_code == 200)
def test_unconfirmed_account(self):
# add an unconfirmed user
r = Role.query.filter_by(name='User').first()
self.assertIsNotNone(r)
u = User(email='[email protected]', password='cat', confirmed=False,
role=r)
db.session.add(u)
db.session.commit()
# get list of posts with the unconfirmed account
response = self.client.get(
url_for('api.get_posts'),
headers=self.get_api_headers('[email protected]', 'cat'))
self.assertTrue(response.status_code == 403)
def test_posts(self):
# add a user
r = Role.query.filter_by(name='User').first()
self.assertIsNotNone(r)
u = User(email='[email protected]', password='cat', confirmed=True,
role=r)
db.session.add(u)
db.session.commit()
# write an empty post
response = self.client.post(
url_for('api.new_post'),
headers=self.get_api_headers('[email protected]', 'cat'),
data=json.dumps({'body': ''}))
self.assertTrue(response.status_code == 400)
# write a post
response = self.client.post(
url_for('api.new_post'),
headers=self.get_api_headers('[email protected]', 'cat'),
data=json.dumps({'body': 'body of the *blog* post'}))
self.assertTrue(response.status_code == 201)
url = response.headers.get('Location')
self.assertIsNotNone(url)
# get the new post
response = self.client.get(
url,
headers=self.get_api_headers('[email protected]', 'cat'))
self.assertTrue(response.status_code == 200)
json_response = json.loads(response.data.decode('utf-8'))
self.assertTrue(json_response['url'] == url)
self.assertTrue(json_response['body'] == 'body of the *blog* post')
self.assertTrue(json_response['body_html'] ==
'<p>body of the <em>blog</em> post</p>')
json_post = json_response
# get the post from the user
response = self.client.get(
url_for('api.get_user_posts', id=u.id),
headers=self.get_api_headers('[email protected]', 'cat'))
self.assertTrue(response.status_code == 200)
json_response = json.loads(response.data.decode('utf-8'))
self.assertIsNotNone(json_response.get('posts'))
self.assertTrue(json_response.get('count', 0) == 1)
self.assertTrue(json_response['posts'][0] == json_post)
# get the post from the user as a follower
response = self.client.get(
url_for('api.get_user_followed_posts', id=u.id),
headers=self.get_api_headers('[email protected]', 'cat'))
self.assertTrue(response.status_code == 200)
json_response = json.loads(response.data.decode('utf-8'))
self.assertIsNotNone(json_response.get('posts'))
self.assertTrue(json_response.get('count', 0) == 1)
self.assertTrue(json_response['posts'][0] == json_post)
# edit post
response = self.client.put(
url,
headers=self.get_api_headers('[email protected]', 'cat'),
data=json.dumps({'body': 'updated body'}))
self.assertTrue(response.status_code == 200)
json_response = json.loads(response.data.decode('utf-8'))
self.assertTrue(json_response['url'] == url)
self.assertTrue(json_response['body'] == 'updated body')
self.assertTrue(json_response['body_html'] == '<p>updated body</p>')
def test_users(self):
# add two users
r = Role.query.filter_by(name='User').first()
self.assertIsNotNone(r)
u1 = User(email='[email protected]', username='john',
password='cat', confirmed=True, role=r)
u2 = User(email='[email protected]', username='susan',
password='dog', confirmed=True, role=r)
db.session.add_all([u1, u2])
db.session.commit()
# get users
response = self.client.get(
url_for('api.get_user', id=u1.id),
headers=self.get_api_headers('[email protected]', 'dog'))
self.assertTrue(response.status_code == 200)
json_response = json.loads(response.data.decode('utf-8'))
self.assertTrue(json_response['username'] == 'john')
response = self.client.get(
url_for('api.get_user', id=u2.id),
headers=self.get_api_headers('[email protected]', 'dog'))
self.assertTrue(response.status_code == 200)
json_response = json.loads(response.data.decode('utf-8'))
self.assertTrue(json_response['username'] == 'susan')
def test_comments(self):
# add two users
r = Role.query.filter_by(name='User').first()
self.assertIsNotNone(r)
u1 = User(email='[email protected]', username='john',
password='cat', confirmed=True, role=r)
u2 = User(email='[email protected]', username='susan',
password='dog', confirmed=True, role=r)
db.session.add_all([u1, u2])
db.session.commit()
# add a post
post = Post(body='body of the post', author=u1)
db.session.add(post)
db.session.commit()
# write a comment
response = self.client.post(
url_for('api.new_post_comment', id=post.id),
headers=self.get_api_headers('[email protected]', 'dog'),
data=json.dumps({'body': 'Good [post](http://example.com)!'}))
self.assertTrue(response.status_code == 201)
json_response = json.loads(response.data.decode('utf-8'))
url = response.headers.get('Location')
self.assertIsNotNone(url)
self.assertTrue(json_response['body'] ==
'Good [post](http://example.com)!')
self.assertTrue(
re.sub('<.*?>', '', json_response['body_html']) == 'Good post!')
# get the new comment
response = self.client.get(
url,
headers=self.get_api_headers('[email protected]', 'cat'))
self.assertTrue(response.status_code == 200)
json_response = json.loads(response.data.decode('utf-8'))
self.assertTrue(json_response['url'] == url)
self.assertTrue(json_response['body'] ==
'Good [post](http://example.com)!')
# add another comment
comment = Comment(body='Thank you!', author=u1, post=post)
db.session.add(comment)
db.session.commit()
# get the two comments from the post
response = self.client.get(
url_for('api.get_post_comments', id=post.id),
headers=self.get_api_headers('[email protected]', 'dog'))
self.assertTrue(response.status_code == 200)
json_response = json.loads(response.data.decode('utf-8'))
self.assertIsNotNone(json_response.get('posts'))
self.assertTrue(json_response.get('count', 0) == 2)
# get all the comments
response = self.client.get(
url_for('api.get_comments', id=post.id),
headers=self.get_api_headers('[email protected]', 'dog'))
self.assertTrue(response.status_code == 200)
json_response = json.loads(response.data.decode('utf-8'))
self.assertIsNotNone(json_response.get('posts'))
self.assertTrue(json_response.get('count', 0) == 2)
| mit |
codingforfun/Olena-Mirror | swilena/python/box2d-misc.py | 2 | 1329 | #! /usr/bin/env python
# Copyright (C) 2009 EPITA Research and Development Laboratory (LRDE)
#
# This file is part of Olena.
#
# Olena is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, version 2 of the License.
#
# Olena is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Olena. If not, see <http://www.gnu.org/licenses/>.
from swilena import *
# Generic iterator interface.
b = box2d(2, 3)
p = iter(b)
while p.is_valid():
print p.site()
p.advance()
print
# Python's iterator interface.
# We cannot use
#
# for p in box2d(2, 3):
# print p
#
# here because the box2d is a temporary object that may be collected
# before the end of the iteration. To prevent Python from disposing
# of it, we use a named variable that will
#
# Another possibility would be to have a generator playing with the
# `thisown' field of the box, to prevent its destruction (see
# http://www.swig.org/Doc1.3/SWIGDocumentation.html#Python_nn30).
for p in b:
print p
| gpl-2.0 |
Beauhurst/django | django/contrib/admin/migrations/0001_initial.py | 95 | 1893 | import django.contrib.admin.models
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '__first__'),
]
operations = [
migrations.CreateModel(
name='LogEntry',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('action_time', models.DateTimeField(auto_now=True, verbose_name='action time')),
('object_id', models.TextField(null=True, verbose_name='object id', blank=True)),
('object_repr', models.CharField(max_length=200, verbose_name='object repr')),
('action_flag', models.PositiveSmallIntegerField(verbose_name='action flag')),
('change_message', models.TextField(verbose_name='change message', blank=True)),
('content_type', models.ForeignKey(
to_field='id',
on_delete=models.SET_NULL,
blank=True, null=True,
to='contenttypes.ContentType',
verbose_name='content type',
)),
('user', models.ForeignKey(
to=settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
verbose_name='user',
)),
],
options={
'ordering': ('-action_time',),
'db_table': 'django_admin_log',
'verbose_name': 'log entry',
'verbose_name_plural': 'log entries',
},
bases=(models.Model,),
managers=[
('objects', django.contrib.admin.models.LogEntryManager()),
],
),
]
| bsd-3-clause |
vadimtk/chrome4sdp | tools/telemetry/telemetry/internal/platform/power_monitor/msr_power_monitor_unittest.py | 24 | 1071 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import time
import unittest
from telemetry import decorators
from telemetry.internal.platform.power_monitor import msr_power_monitor
from telemetry.internal.platform import win_platform_backend
class MsrPowerMonitorTest(unittest.TestCase):
@decorators.Enabled('xp', 'win7', 'win8') # http://crbug.com/479337
def testMsrRuns(self):
platform_backend = win_platform_backend.WinPlatformBackend()
power_monitor = msr_power_monitor.MsrPowerMonitorWin(platform_backend)
if not power_monitor.CanMonitorPower():
logging.warning('Test not supported on this platform.')
return
power_monitor.StartMonitoringPower(None)
time.sleep(0.01)
statistics = power_monitor.StopMonitoringPower()
self.assertEqual(statistics['identifier'], 'msr')
self.assertIn('energy_consumption_mwh', statistics)
self.assertGreater(statistics['energy_consumption_mwh'], 0)
| bsd-3-clause |
alexallah/django | django/forms/widgets.py | 1 | 35356 | """
HTML Widget classes
"""
import copy
import datetime
import re
from itertools import chain
from django.conf import settings
from django.forms.utils import to_current_timezone
from django.templatetags.static import static
from django.utils import datetime_safe, formats
from django.utils.dates import MONTHS
from django.utils.formats import get_format
from django.utils.html import format_html, html_safe
from django.utils.safestring import mark_safe
from django.utils.translation import gettext_lazy as _
from .renderers import get_default_renderer
__all__ = (
'Media', 'MediaDefiningClass', 'Widget', 'TextInput', 'NumberInput',
'EmailInput', 'URLInput', 'PasswordInput', 'HiddenInput',
'MultipleHiddenInput', 'FileInput', 'ClearableFileInput', 'Textarea',
'DateInput', 'DateTimeInput', 'TimeInput', 'CheckboxInput', 'Select',
'NullBooleanSelect', 'SelectMultiple', 'RadioSelect',
'CheckboxSelectMultiple', 'MultiWidget', 'SplitDateTimeWidget',
'SplitHiddenDateTimeWidget', 'SelectDateWidget',
)
MEDIA_TYPES = ('css', 'js')
@html_safe
class Media:
def __init__(self, media=None, **kwargs):
if media:
media_attrs = media.__dict__
else:
media_attrs = kwargs
self._css = {}
self._js = []
for name in MEDIA_TYPES:
getattr(self, 'add_' + name)(media_attrs.get(name))
def __str__(self):
return self.render()
def render(self):
return mark_safe('\n'.join(chain.from_iterable(getattr(self, 'render_' + name)() for name in MEDIA_TYPES)))
def render_js(self):
return [
format_html(
'<script type="text/javascript" src="{}"></script>',
self.absolute_path(path)
) for path in self._js
]
def render_css(self):
# To keep rendering order consistent, we can't just iterate over items().
# We need to sort the keys, and iterate over the sorted list.
media = sorted(self._css.keys())
return chain.from_iterable([
format_html(
'<link href="{}" type="text/css" media="{}" rel="stylesheet" />',
self.absolute_path(path), medium
) for path in self._css[medium]
] for medium in media)
def absolute_path(self, path):
"""
Given a relative or absolute path to a static asset, return an absolute
path. An absolute path will be returned unchanged while a relative path
will be passed to django.templatetags.static.static().
"""
if path.startswith(('http://', 'https://', '/')):
return path
return static(path)
def __getitem__(self, name):
"""Return a Media object that only contains media of the given type."""
if name in MEDIA_TYPES:
return Media(**{str(name): getattr(self, '_' + name)})
raise KeyError('Unknown media type "%s"' % name)
def add_js(self, data):
if data:
for path in data:
if path not in self._js:
self._js.append(path)
def add_css(self, data):
if data:
for medium, paths in data.items():
for path in paths:
if not self._css.get(medium) or path not in self._css[medium]:
self._css.setdefault(medium, []).append(path)
def __add__(self, other):
combined = Media()
for name in MEDIA_TYPES:
getattr(combined, 'add_' + name)(getattr(self, '_' + name, None))
getattr(combined, 'add_' + name)(getattr(other, '_' + name, None))
return combined
def media_property(cls):
def _media(self):
# Get the media property of the superclass, if it exists
sup_cls = super(cls, self)
try:
base = sup_cls.media
except AttributeError:
base = Media()
# Get the media definition for this class
definition = getattr(cls, 'Media', None)
if definition:
extend = getattr(definition, 'extend', True)
if extend:
if extend is True:
m = base
else:
m = Media()
for medium in extend:
m = m + base[medium]
return m + Media(definition)
else:
return Media(definition)
else:
return base
return property(_media)
class MediaDefiningClass(type):
"""
Metaclass for classes that can have media definitions.
"""
def __new__(mcs, name, bases, attrs):
new_class = super(MediaDefiningClass, mcs).__new__(mcs, name, bases, attrs)
if 'media' not in attrs:
new_class.media = media_property(new_class)
return new_class
class Widget(metaclass=MediaDefiningClass):
needs_multipart_form = False # Determines does this widget need multipart form
is_localized = False
is_required = False
supports_microseconds = True
def __init__(self, attrs=None):
if attrs is not None:
self.attrs = attrs.copy()
else:
self.attrs = {}
def __deepcopy__(self, memo):
obj = copy.copy(self)
obj.attrs = self.attrs.copy()
memo[id(self)] = obj
return obj
@property
def is_hidden(self):
return self.input_type == 'hidden' if hasattr(self, 'input_type') else False
def subwidgets(self, name, value, attrs=None):
context = self.get_context(name, value, attrs)
yield context['widget']
def format_value(self, value):
"""
Return a value as it should appear when rendered in a template.
"""
if value == '' or value is None:
return None
if self.is_localized:
return formats.localize_input(value)
return str(value)
def get_context(self, name, value, attrs):
context = {}
context['widget'] = {
'name': name,
'is_hidden': self.is_hidden,
'required': self.is_required,
'value': self.format_value(value),
'attrs': self.build_attrs(self.attrs, attrs),
'template_name': self.template_name,
}
return context
def render(self, name, value, attrs=None, renderer=None):
"""Render the widget as an HTML string."""
context = self.get_context(name, value, attrs)
return self._render(self.template_name, context, renderer)
def _render(self, template_name, context, renderer=None):
if renderer is None:
renderer = get_default_renderer()
return mark_safe(renderer.render(template_name, context))
def build_attrs(self, base_attrs, extra_attrs=None):
"""Build an attribute dictionary."""
attrs = base_attrs.copy()
if extra_attrs is not None:
attrs.update(extra_attrs)
return attrs
def value_from_datadict(self, data, files, name):
"""
Given a dictionary of data and this widget's name, return the value
of this widget or None if it's not provided.
"""
return data.get(name)
def value_omitted_from_data(self, data, files, name):
return name not in data
def id_for_label(self, id_):
"""
Return the HTML ID attribute of this Widget for use by a <label>,
given the ID of the field. Return None if no ID is available.
This hook is necessary because some widgets have multiple HTML
elements and, thus, multiple IDs. In that case, this method should
return an ID value that corresponds to the first ID in the widget's
tags.
"""
return id_
def use_required_attribute(self, initial):
return not self.is_hidden
class Input(Widget):
"""
Base class for all <input> widgets.
"""
input_type = None # Subclasses must define this.
template_name = 'django/forms/widgets/input.html'
def __init__(self, attrs=None):
if attrs is not None:
attrs = attrs.copy()
self.input_type = attrs.pop('type', self.input_type)
super().__init__(attrs)
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
context['widget']['type'] = self.input_type
return context
class TextInput(Input):
input_type = 'text'
template_name = 'django/forms/widgets/text.html'
class NumberInput(Input):
input_type = 'number'
template_name = 'django/forms/widgets/number.html'
class EmailInput(Input):
input_type = 'email'
template_name = 'django/forms/widgets/email.html'
class URLInput(Input):
input_type = 'url'
template_name = 'django/forms/widgets/url.html'
class PasswordInput(Input):
input_type = 'password'
template_name = 'django/forms/widgets/password.html'
def __init__(self, attrs=None, render_value=False):
super().__init__(attrs)
self.render_value = render_value
def get_context(self, name, value, attrs):
if not self.render_value:
value = None
return super().get_context(name, value, attrs)
class HiddenInput(Input):
input_type = 'hidden'
template_name = 'django/forms/widgets/hidden.html'
class MultipleHiddenInput(HiddenInput):
"""
Handle <input type="hidden"> for fields that have a list
of values.
"""
template_name = 'django/forms/widgets/multiple_hidden.html'
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
final_attrs = context['widget']['attrs']
id_ = context['widget']['attrs'].get('id')
subwidgets = []
for index, value_ in enumerate(context['widget']['value']):
widget_attrs = final_attrs.copy()
if id_:
# An ID attribute was given. Add a numeric index as a suffix
# so that the inputs don't all have the same ID attribute.
widget_attrs['id'] = '%s_%s' % (id_, index)
widget = HiddenInput()
widget.is_required = self.is_required
subwidgets.append(widget.get_context(name, value_, widget_attrs)['widget'])
context['widget']['subwidgets'] = subwidgets
return context
def value_from_datadict(self, data, files, name):
try:
getter = data.getlist
except AttributeError:
getter = data.get
return getter(name)
def format_value(self, value):
return [] if value is None else value
class FileInput(Input):
input_type = 'file'
needs_multipart_form = True
template_name = 'django/forms/widgets/file.html'
def format_value(self, value):
"""File input never renders a value."""
return
def value_from_datadict(self, data, files, name):
"File widgets take data from FILES, not POST"
return files.get(name)
def value_omitted_from_data(self, data, files, name):
return name not in files
FILE_INPUT_CONTRADICTION = object()
class ClearableFileInput(FileInput):
clear_checkbox_label = _('Clear')
initial_text = _('Currently')
input_text = _('Change')
template_name = 'django/forms/widgets/clearable_file_input.html'
def clear_checkbox_name(self, name):
"""
Given the name of the file input, return the name of the clear checkbox
input.
"""
return name + '-clear'
def clear_checkbox_id(self, name):
"""
Given the name of the clear checkbox input, return the HTML id for it.
"""
return name + '_id'
def is_initial(self, value):
"""
Return whether value is considered to be initial value.
"""
return bool(value and getattr(value, 'url', False))
def format_value(self, value):
"""
Return the file object if it has a defined url attribute.
"""
if self.is_initial(value):
return value
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
checkbox_name = self.clear_checkbox_name(name)
checkbox_id = self.clear_checkbox_id(checkbox_name)
context.update({
'checkbox_name': checkbox_name,
'checkbox_id': checkbox_id,
'is_initial': self.is_initial(value),
'input_text': self.input_text,
'initial_text': self.initial_text,
'clear_checkbox_label': self.clear_checkbox_label,
})
return context
def value_from_datadict(self, data, files, name):
upload = super().value_from_datadict(data, files, name)
if not self.is_required and CheckboxInput().value_from_datadict(
data, files, self.clear_checkbox_name(name)):
if upload:
# If the user contradicts themselves (uploads a new file AND
# checks the "clear" checkbox), we return a unique marker
# object that FileField will turn into a ValidationError.
return FILE_INPUT_CONTRADICTION
# False signals to clear any existing value, as opposed to just None
return False
return upload
def use_required_attribute(self, initial):
return super().use_required_attribute(initial) and not initial
def value_omitted_from_data(self, data, files, name):
return (
super().value_omitted_from_data(data, files, name) and
self.clear_checkbox_name(name) not in data
)
class Textarea(Widget):
template_name = 'django/forms/widgets/textarea.html'
def __init__(self, attrs=None):
# Use slightly better defaults than HTML's 20x2 box
default_attrs = {'cols': '40', 'rows': '10'}
if attrs:
default_attrs.update(attrs)
super().__init__(default_attrs)
class DateTimeBaseInput(TextInput):
format_key = ''
supports_microseconds = False
def __init__(self, attrs=None, format=None):
super().__init__(attrs)
self.format = format if format else None
def format_value(self, value):
return formats.localize_input(value, self.format or formats.get_format(self.format_key)[0])
class DateInput(DateTimeBaseInput):
format_key = 'DATE_INPUT_FORMATS'
template_name = 'django/forms/widgets/date.html'
class DateTimeInput(DateTimeBaseInput):
format_key = 'DATETIME_INPUT_FORMATS'
template_name = 'django/forms/widgets/datetime.html'
class TimeInput(DateTimeBaseInput):
format_key = 'TIME_INPUT_FORMATS'
template_name = 'django/forms/widgets/time.html'
# Defined at module level so that CheckboxInput is picklable (#17976)
def boolean_check(v):
return not (v is False or v is None or v == '')
class CheckboxInput(Input):
input_type = 'checkbox'
template_name = 'django/forms/widgets/checkbox.html'
def __init__(self, attrs=None, check_test=None):
super().__init__(attrs)
# check_test is a callable that takes a value and returns True
# if the checkbox should be checked for that value.
self.check_test = boolean_check if check_test is None else check_test
def format_value(self, value):
"""Only return the 'value' attribute if value isn't empty."""
if value is True or value is False or value is None or value == '':
return
return str(value)
def get_context(self, name, value, attrs):
if self.check_test(value):
if attrs is None:
attrs = {}
attrs['checked'] = True
return super().get_context(name, value, attrs)
def value_from_datadict(self, data, files, name):
if name not in data:
# A missing value means False because HTML form submission does not
# send results for unselected checkboxes.
return False
value = data.get(name)
# Translate true and false strings to boolean values.
values = {'true': True, 'false': False}
if isinstance(value, str):
value = values.get(value.lower(), value)
return bool(value)
def value_omitted_from_data(self, data, files, name):
# HTML checkboxes don't appear in POST data if not checked, so it's
# never known if the value is actually omitted.
return False
class ChoiceWidget(Widget):
allow_multiple_selected = False
input_type = None
template_name = None
option_template_name = None
add_id_index = True
checked_attribute = {'checked': True}
option_inherits_attrs = True
def __init__(self, attrs=None, choices=()):
super().__init__(attrs)
# choices can be any iterable, but we may need to render this widget
# multiple times. Thus, collapse it into a list so it can be consumed
# more than once.
self.choices = list(choices)
def __deepcopy__(self, memo):
obj = copy.copy(self)
obj.attrs = self.attrs.copy()
obj.choices = copy.copy(self.choices)
memo[id(self)] = obj
return obj
def subwidgets(self, name, value, attrs=None):
"""
Yield all "subwidgets" of this widget. Used to enable iterating
options from a BoundField for choice widgets.
"""
value = self.format_value(value)
yield from self.options(name, value, attrs)
def options(self, name, value, attrs=None):
"""Yield a flat list of options for this widgets."""
for group in self.optgroups(name, value, attrs):
yield from group[1]
def optgroups(self, name, value, attrs=None):
"""Return a list of optgroups for this widget."""
default = (None, [], 0)
groups = [default]
has_selected = False
for option_value, option_label in chain(self.choices):
if option_value is None:
option_value = ''
if isinstance(option_label, (list, tuple)):
index = groups[-1][2] + 1
subindex = 0
subgroup = []
groups.append((option_value, subgroup, index))
choices = option_label
else:
index = len(default[1])
subgroup = default[1]
subindex = None
choices = [(option_value, option_label)]
for subvalue, sublabel in choices:
selected = (
str(subvalue) in value and
(not has_selected or self.allow_multiple_selected)
)
if selected and not has_selected:
has_selected = True
subgroup.append(self.create_option(
name, subvalue, sublabel, selected, index,
subindex=subindex, attrs=attrs,
))
if subindex is not None:
subindex += 1
return groups
def create_option(self, name, value, label, selected, index, subindex=None, attrs=None):
index = str(index) if subindex is None else "%s_%s" % (index, subindex)
if attrs is None:
attrs = {}
option_attrs = self.build_attrs(self.attrs, attrs) if self.option_inherits_attrs else {}
if selected:
option_attrs.update(self.checked_attribute)
if 'id' in option_attrs:
option_attrs['id'] = self.id_for_label(option_attrs['id'], index)
return {
'name': name,
'value': str(value),
'label': label,
'selected': selected,
'index': index,
'attrs': option_attrs,
'type': self.input_type,
'template_name': self.option_template_name,
}
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
context['widget']['optgroups'] = self.optgroups(name, context['widget']['value'], attrs)
context['wrap_label'] = True
return context
def id_for_label(self, id_, index='0'):
"""
Use an incremented id for each option where the main widget
references the zero index.
"""
if id_ and self.add_id_index:
id_ = '%s_%s' % (id_, index)
return id_
def value_from_datadict(self, data, files, name):
getter = data.get
if self.allow_multiple_selected:
try:
getter = data.getlist
except AttributeError:
pass
return getter(name)
def format_value(self, value):
"""Return selected values as a list."""
if not isinstance(value, (tuple, list)):
value = [value]
return [str(v) if v is not None else '' for v in value]
class Select(ChoiceWidget):
input_type = 'select'
template_name = 'django/forms/widgets/select.html'
option_template_name = 'django/forms/widgets/select_option.html'
add_id_index = False
checked_attribute = {'selected': True}
option_inherits_attrs = False
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
if self.allow_multiple_selected:
context['widget']['attrs']['multiple'] = 'multiple'
return context
@staticmethod
def _choice_has_empty_value(choice):
"""Return True if the choice's value is empty string or None."""
value, _ = choice
return (isinstance(value, str) and not bool(value)) or value is None
def use_required_attribute(self, initial):
"""
Don't render 'required' if the first <option> has a value, as that's
invalid HTML.
"""
use_required_attribute = super().use_required_attribute(initial)
# 'required' is always okay for <select multiple>.
if self.allow_multiple_selected:
return use_required_attribute
first_choice = next(iter(self.choices), None)
return use_required_attribute and first_choice is not None and self._choice_has_empty_value(first_choice)
class NullBooleanSelect(Select):
"""
A Select Widget intended to be used with NullBooleanField.
"""
def __init__(self, attrs=None):
choices = (
('1', _('Unknown')),
('2', _('Yes')),
('3', _('No')),
)
super().__init__(attrs, choices)
def format_value(self, value):
try:
return {True: '2', False: '3', '2': '2', '3': '3'}[value]
except KeyError:
return '1'
def value_from_datadict(self, data, files, name):
value = data.get(name)
return {
'2': True,
True: True,
'True': True,
'3': False,
'False': False,
False: False,
}.get(value)
class SelectMultiple(Select):
allow_multiple_selected = True
def value_from_datadict(self, data, files, name):
try:
getter = data.getlist
except AttributeError:
getter = data.get
return getter(name)
def value_omitted_from_data(self, data, files, name):
# An unselected <select multiple> doesn't appear in POST data, so it's
# never known if the value is actually omitted.
return False
class RadioSelect(ChoiceWidget):
input_type = 'radio'
template_name = 'django/forms/widgets/radio.html'
option_template_name = 'django/forms/widgets/radio_option.html'
class CheckboxSelectMultiple(ChoiceWidget):
allow_multiple_selected = True
input_type = 'checkbox'
template_name = 'django/forms/widgets/checkbox_select.html'
option_template_name = 'django/forms/widgets/checkbox_option.html'
def use_required_attribute(self, initial):
# Don't use the 'required' attribute because browser validation would
# require all checkboxes to be checked instead of at least one.
return False
def value_omitted_from_data(self, data, files, name):
# HTML checkboxes don't appear in POST data if not checked, so it's
# never known if the value is actually omitted.
return False
def id_for_label(self, id_, index=None):
""""
Don't include for="field_0" in <label> because clicking such a label
would toggle the first checkbox.
"""
if index is None:
return ''
return super().id_for_label(id_, index)
class MultiWidget(Widget):
"""
A widget that is composed of multiple widgets.
In addition to the values added by Widget.get_context(), this widget
adds a list of subwidgets to the context as widget['subwidgets'].
These can be looped over and rendered like normal widgets.
You'll probably want to use this class with MultiValueField.
"""
template_name = 'django/forms/widgets/multiwidget.html'
def __init__(self, widgets, attrs=None):
self.widgets = [w() if isinstance(w, type) else w for w in widgets]
super().__init__(attrs)
@property
def is_hidden(self):
return all(w.is_hidden for w in self.widgets)
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
if self.is_localized:
for widget in self.widgets:
widget.is_localized = self.is_localized
# value is a list of values, each corresponding to a widget
# in self.widgets.
if not isinstance(value, list):
value = self.decompress(value)
final_attrs = context['widget']['attrs']
input_type = final_attrs.pop('type', None)
id_ = final_attrs.get('id')
subwidgets = []
for i, widget in enumerate(self.widgets):
if input_type is not None:
widget.input_type = input_type
widget_name = '%s_%s' % (name, i)
try:
widget_value = value[i]
except IndexError:
widget_value = None
if id_:
widget_attrs = final_attrs.copy()
widget_attrs['id'] = '%s_%s' % (id_, i)
else:
widget_attrs = final_attrs
subwidgets.append(widget.get_context(widget_name, widget_value, widget_attrs)['widget'])
context['widget']['subwidgets'] = subwidgets
return context
def id_for_label(self, id_):
if id_:
id_ += '_0'
return id_
def value_from_datadict(self, data, files, name):
return [widget.value_from_datadict(data, files, name + '_%s' % i) for i, widget in enumerate(self.widgets)]
def value_omitted_from_data(self, data, files, name):
return all(
widget.value_omitted_from_data(data, files, name + '_%s' % i)
for i, widget in enumerate(self.widgets)
)
def decompress(self, value):
"""
Return a list of decompressed values for the given compressed value.
The given value can be assumed to be valid, but not necessarily
non-empty.
"""
raise NotImplementedError('Subclasses must implement this method.')
def _get_media(self):
"""
Media for a multiwidget is the combination of all media of the
subwidgets.
"""
media = Media()
for w in self.widgets:
media = media + w.media
return media
media = property(_get_media)
def __deepcopy__(self, memo):
obj = super().__deepcopy__(memo)
obj.widgets = copy.deepcopy(self.widgets)
return obj
@property
def needs_multipart_form(self):
return any(w.needs_multipart_form for w in self.widgets)
class SplitDateTimeWidget(MultiWidget):
"""
A widget that splits datetime input into two <input type="text"> boxes.
"""
supports_microseconds = False
template_name = 'django/forms/widgets/splitdatetime.html'
def __init__(self, attrs=None, date_format=None, time_format=None, date_attrs=None, time_attrs=None):
widgets = (
DateInput(
attrs=attrs if date_attrs is None else date_attrs,
format=date_format,
),
TimeInput(
attrs=attrs if time_attrs is None else time_attrs,
format=time_format,
),
)
super().__init__(widgets)
def decompress(self, value):
if value:
value = to_current_timezone(value)
return [value.date(), value.time().replace(microsecond=0)]
return [None, None]
class SplitHiddenDateTimeWidget(SplitDateTimeWidget):
"""
A widget that splits datetime input into two <input type="hidden"> inputs.
"""
template_name = 'django/forms/widgets/splithiddendatetime.html'
def __init__(self, attrs=None, date_format=None, time_format=None, date_attrs=None, time_attrs=None):
super().__init__(attrs, date_format, time_format, date_attrs, time_attrs)
for widget in self.widgets:
widget.input_type = 'hidden'
class SelectDateWidget(Widget):
"""
A widget that splits date input into three <select> boxes.
This also serves as an example of a Widget that has more than one HTML
element and hence implements value_from_datadict.
"""
none_value = (0, '---')
month_field = '%s_month'
day_field = '%s_day'
year_field = '%s_year'
template_name = 'django/forms/widgets/select_date.html'
input_type = 'select'
select_widget = Select
date_re = re.compile(r'(\d{4}|0)-(\d\d?)-(\d\d?)$')
def __init__(self, attrs=None, years=None, months=None, empty_label=None):
self.attrs = attrs or {}
# Optional list or tuple of years to use in the "year" select box.
if years:
self.years = years
else:
this_year = datetime.date.today().year
self.years = range(this_year, this_year + 10)
# Optional dict of months to use in the "month" select box.
if months:
self.months = months
else:
self.months = MONTHS
# Optional string, list, or tuple to use as empty_label.
if isinstance(empty_label, (list, tuple)):
if not len(empty_label) == 3:
raise ValueError('empty_label list/tuple must have 3 elements.')
self.year_none_value = (0, empty_label[0])
self.month_none_value = (0, empty_label[1])
self.day_none_value = (0, empty_label[2])
else:
if empty_label is not None:
self.none_value = (0, empty_label)
self.year_none_value = self.none_value
self.month_none_value = self.none_value
self.day_none_value = self.none_value
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
date_context = {}
year_choices = [(i, i) for i in self.years]
if not self.is_required:
year_choices.insert(0, self.year_none_value)
year_attrs = context['widget']['attrs'].copy()
year_name = self.year_field % name
year_attrs['id'] = 'id_%s' % year_name
date_context['year'] = self.select_widget(attrs, choices=year_choices).get_context(
name=year_name,
value=context['widget']['value']['year'],
attrs=year_attrs,
)
month_choices = list(self.months.items())
if not self.is_required:
month_choices.insert(0, self.month_none_value)
month_attrs = context['widget']['attrs'].copy()
month_name = self.month_field % name
month_attrs['id'] = 'id_%s' % month_name
date_context['month'] = self.select_widget(attrs, choices=month_choices).get_context(
name=month_name,
value=context['widget']['value']['month'],
attrs=month_attrs,
)
day_choices = [(i, i) for i in range(1, 32)]
if not self.is_required:
day_choices.insert(0, self.day_none_value)
day_attrs = context['widget']['attrs'].copy()
day_name = self.day_field % name
day_attrs['id'] = 'id_%s' % day_name
date_context['day'] = self.select_widget(attrs, choices=day_choices,).get_context(
name=day_name,
value=context['widget']['value']['day'],
attrs=day_attrs,
)
subwidgets = []
for field in self._parse_date_fmt():
subwidgets.append(date_context[field]['widget'])
context['widget']['subwidgets'] = subwidgets
return context
def format_value(self, value):
"""
Return a dict containing the year, month, and day of the current value.
Use dict instead of a datetime to allow invalid dates such as February
31 to display correctly.
"""
year, month, day = None, None, None
if isinstance(value, (datetime.date, datetime.datetime)):
year, month, day = value.year, value.month, value.day
elif isinstance(value, str):
if settings.USE_L10N:
try:
input_format = get_format('DATE_INPUT_FORMATS')[0]
d = datetime.datetime.strptime(value, input_format)
year, month, day = d.year, d.month, d.day
except ValueError:
pass
match = self.date_re.match(value)
if match:
year, month, day = [int(val) for val in match.groups()]
return {'year': year, 'month': month, 'day': day}
@staticmethod
def _parse_date_fmt():
fmt = get_format('DATE_FORMAT')
escaped = False
for char in fmt:
if escaped:
escaped = False
elif char == '\\':
escaped = True
elif char in 'Yy':
yield 'year'
elif char in 'bEFMmNn':
yield 'month'
elif char in 'dj':
yield 'day'
def id_for_label(self, id_):
for first_select in self._parse_date_fmt():
return '%s_%s' % (id_, first_select)
else:
return '%s_month' % id_
def value_from_datadict(self, data, files, name):
y = data.get(self.year_field % name)
m = data.get(self.month_field % name)
d = data.get(self.day_field % name)
if y == m == d == "0":
return None
if y and m and d:
if settings.USE_L10N:
input_format = get_format('DATE_INPUT_FORMATS')[0]
try:
date_value = datetime.date(int(y), int(m), int(d))
except ValueError:
return '%s-%s-%s' % (y, m, d)
else:
date_value = datetime_safe.new_date(date_value)
return date_value.strftime(input_format)
else:
return '%s-%s-%s' % (y, m, d)
return data.get(name)
def value_omitted_from_data(self, data, files, name):
return not any(
('{}_{}'.format(name, interval) in data)
for interval in ('year', 'month', 'day')
)
| bsd-3-clause |
dcramer/django-compositepks | django/db/models/fields/related.py | 13 | 42537 | from django.db import connection, transaction
from django.db.models import signals, get_model
from django.db.models.fields import AutoField, Field, IntegerField, PositiveIntegerField, PositiveSmallIntegerField, FieldDoesNotExist
from django.db.models.related import RelatedObject
from django.db.models.query import QuerySet
from django.db.models.query_utils import QueryWrapper
from django.utils.encoding import smart_unicode
from django.utils.translation import ugettext_lazy, string_concat, ungettext, ugettext as _
from django.utils.functional import curry
from django.core import exceptions
from django import forms
try:
set
except NameError:
from sets import Set as set # Python 2.3 fallback
RECURSIVE_RELATIONSHIP_CONSTANT = 'self'
pending_lookups = {}
def add_lazy_relation(cls, field, relation, operation):
"""
Adds a lookup on ``cls`` when a related field is defined using a string,
i.e.::
class MyModel(Model):
fk = ForeignKey("AnotherModel")
This string can be:
* RECURSIVE_RELATIONSHIP_CONSTANT (i.e. "self") to indicate a recursive
relation.
* The name of a model (i.e "AnotherModel") to indicate another model in
the same app.
* An app-label and model name (i.e. "someapp.AnotherModel") to indicate
another model in a different app.
If the other model hasn't yet been loaded -- almost a given if you're using
lazy relationships -- then the relation won't be set up until the
class_prepared signal fires at the end of model initialization.
operation is the work that must be performed once the relation can be resolved.
"""
# Check for recursive relations
if relation == RECURSIVE_RELATIONSHIP_CONSTANT:
app_label = cls._meta.app_label
model_name = cls.__name__
else:
# Look for an "app.Model" relation
try:
app_label, model_name = relation.split(".")
except ValueError:
# If we can't split, assume a model in current app
app_label = cls._meta.app_label
model_name = relation
# Try to look up the related model, and if it's already loaded resolve the
# string right away. If get_model returns None, it means that the related
# model isn't loaded yet, so we need to pend the relation until the class
# is prepared.
model = get_model(app_label, model_name, False)
if model:
operation(field, model, cls)
else:
key = (app_label, model_name)
value = (cls, field, operation)
pending_lookups.setdefault(key, []).append(value)
def do_pending_lookups(sender, **kwargs):
"""
Handle any pending relations to the sending model. Sent from class_prepared.
"""
key = (sender._meta.app_label, sender.__name__)
for cls, field, operation in pending_lookups.pop(key, []):
operation(field, sender, cls)
signals.class_prepared.connect(do_pending_lookups)
#HACK
class RelatedField(object):
def contribute_to_class(self, cls, name):
sup = super(RelatedField, self)
# Add an accessor to allow easy determination of the related query path for this field
self.related_query_name = curry(self._get_related_query_name, cls._meta)
if hasattr(sup, 'contribute_to_class'):
sup.contribute_to_class(cls, name)
if not cls._meta.abstract and self.rel.related_name:
self.rel.related_name = self.rel.related_name % {'class': cls.__name__.lower()}
other = self.rel.to
if isinstance(other, basestring):
def resolve_related_class(field, model, cls):
field.rel.to = model
field.do_related_class(model, cls)
add_lazy_relation(cls, self, other, resolve_related_class)
else:
self.do_related_class(other, cls)
def set_attributes_from_rel(self):
self.name = self.name or (self.rel.to._meta.object_name.lower() + '_' + self.rel.to._meta.pk.name)
if self.verbose_name is None:
self.verbose_name = self.rel.to._meta.verbose_name
self.rel.field_name = self.rel.field_name or self.rel.to._meta.pk.name
def do_related_class(self, other, cls):
self.set_attributes_from_rel()
related = RelatedObject(other, cls, self)
if not cls._meta.abstract:
self.contribute_to_related_class(other, related)
def get_db_prep_lookup(self, lookup_type, value):
# If we are doing a lookup on a Related Field, we must be
# comparing object instances. The value should be the PK of value,
# not value itself.
def pk_trace(value):
# Value may be a primary key, or an object held in a relation.
# If it is an object, then we need to get the primary key value for
# that object. In certain conditions (especially one-to-one relations),
# the primary key may itself be an object - so we need to keep drilling
# down until we hit a value that can be used for a comparison.
v, field = value, None
try:
while True:
v, field = getattr(v, v._meta.pk.name), v._meta.pk
except AttributeError:
pass
if field:
if lookup_type in ('range', 'in'):
v = [v]
v = field.get_db_prep_lookup(lookup_type, v)
if isinstance(v, list):
v = v[0]
return v
if hasattr(value, 'as_sql'):
sql, params = value.as_sql()
return QueryWrapper(('(%s)' % sql), params)
# FIXME: lt and gt are explicitally allowed to make
# get_(next/prev)_by_date work; other lookups are not allowed since that
# gets messy pretty quick. This is a good candidate for some refactoring
# in the future.
if lookup_type in ['exact', 'gt', 'lt']:
return [pk_trace(value)]
if lookup_type in ('range', 'in'):
return [pk_trace(v) for v in value]
elif lookup_type == 'isnull':
return []
raise TypeError, "Related Field has invalid lookup: %s" % lookup_type
def _get_related_query_name(self, opts):
# This method defines the name that can be used to identify this
# related object in a table-spanning query. It uses the lower-cased
# object_name by default, but this can be overridden with the
# "related_name" option.
return self.rel.related_name or opts.object_name.lower()
class SingleRelatedObjectDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# a single "remote" value, on the class pointed to by a related field.
# In the example "place.restaurant", the restaurant attribute is a
# SingleRelatedObjectDescriptor instance.
def __init__(self, related):
self.related = related
self.cache_name = '_%s_cache' % related.get_accessor_name()
def __get__(self, instance, instance_type=None):
if instance is None:
raise AttributeError, "%s must be accessed via instance" % self.related.opts.object_name
try:
return getattr(instance, self.cache_name)
except AttributeError:
params = {'%s__pk' % self.related.field.name: instance._get_pk_val()}
rel_obj = self.related.model._default_manager.get(**params)
setattr(instance, self.cache_name, rel_obj)
return rel_obj
def __set__(self, instance, value):
if instance is None:
raise AttributeError, "%s must be accessed via instance" % self.related.opts.object_name
# The similarity of the code below to the code in
# ReverseSingleRelatedObjectDescriptor is annoying, but there's a bunch
# of small differences that would make a common base class convoluted.
# If null=True, we can assign null here, but otherwise the value needs
# to be an instance of the related class.
if value is None and self.related.field.null == False:
raise ValueError('Cannot assign None: "%s.%s" does not allow null values.' %
(instance._meta.object_name, self.related.get_accessor_name()))
elif value is not None and not isinstance(value, self.related.model):
raise ValueError('Cannot assign "%r": "%s.%s" must be a "%s" instance.' %
(value, instance._meta.object_name,
self.related.get_accessor_name(), self.related.opts.object_name))
# Set the value of the related field
setattr(value, self.related.field.rel.get_related_field().attname, instance)
# Since we already know what the related object is, seed the related
# object caches now, too. This avoids another db hit if you get the
# object you just set.
setattr(instance, self.cache_name, value)
setattr(value, self.related.field.get_cache_name(), instance)
class ReverseSingleRelatedObjectDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# a single "remote" value, on the class that defines the related field.
# In the example "choice.poll", the poll attribute is a
# ReverseSingleRelatedObjectDescriptor instance.
def __init__(self, field_with_rel):
self.field = field_with_rel
def __get__(self, instance, instance_type=None):
if instance is None:
raise AttributeError, "%s must be accessed via instance" % self.field.name
cache_name = self.field.get_cache_name()
try:
return getattr(instance, cache_name)
except AttributeError:
val = getattr(instance, self.field.attname)
if val is None:
# If NULL is an allowed value, return it.
if self.field.null:
return None
raise self.field.rel.to.DoesNotExist
other_field = self.field.rel.get_related_field()
if other_field.rel:
params = {'%s__pk' % self.field.rel.field_name: val}
else:
params = {'%s__exact' % self.field.rel.field_name: val}
# If the related manager indicates that it should be used for
# related fields, respect that.
rel_mgr = self.field.rel.to._default_manager
if getattr(rel_mgr, 'use_for_related_fields', False):
rel_obj = rel_mgr.get(**params)
else:
rel_obj = QuerySet(self.field.rel.to).get(**params)
setattr(instance, cache_name, rel_obj)
return rel_obj
def __set__(self, instance, value):
if instance is None:
raise AttributeError, "%s must be accessed via instance" % self._field.name
# If null=True, we can assign null here, but otherwise the value needs
# to be an instance of the related class.
if value is None and self.field.null == False:
raise ValueError('Cannot assign None: "%s.%s" does not allow null values.' %
(instance._meta.object_name, self.field.name))
elif value is not None and not isinstance(value, self.field.rel.to):
raise ValueError('Cannot assign "%r": "%s.%s" must be a "%s" instance.' %
(value, instance._meta.object_name,
self.field.name, self.field.rel.to._meta.object_name))
# Set the value of the related field
try:
val = getattr(value, self.field.rel.get_related_field().attname)
except AttributeError:
val = None
setattr(instance, self.field.attname, val)
# Since we already know what the related object is, seed the related
# object cache now, too. This avoids another db hit if you get the
# object you just set.
setattr(instance, self.field.get_cache_name(), value)
class ForeignRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ForeignKey pointed at them by
# some other model. In the example "poll.choice_set", the choice_set
# attribute is a ForeignRelatedObjectsDescriptor instance.
def __init__(self, related):
self.related = related # RelatedObject instance
def __get__(self, instance, instance_type=None):
if instance is None:
raise AttributeError, "Manager must be accessed via instance"
rel_field = self.related.field
rel_model = self.related.model
# Dynamically create a class that subclasses the related
# model's default manager.
superclass = self.related.model._default_manager.__class__
class RelatedManager(superclass):
def get_query_set(self):
return superclass.get_query_set(self).filter(**(self.core_filters))
def add(self, *objs):
for obj in objs:
setattr(obj, rel_field.name, instance)
obj.save()
add.alters_data = True
def create(self, **kwargs):
kwargs.update({rel_field.name: instance})
return super(RelatedManager, self).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
# Update kwargs with the related object that this
# ForeignRelatedObjectsDescriptor knows about.
kwargs.update({rel_field.name: instance})
return super(RelatedManager, self).get_or_create(**kwargs)
get_or_create.alters_data = True
# remove() and clear() are only provided if the ForeignKey can have a value of null.
if rel_field.null:
def remove(self, *objs):
val = getattr(instance, rel_field.rel.get_related_field().attname)
for obj in objs:
# Is obj actually part of this descriptor set?
if getattr(obj, rel_field.attname) == val:
setattr(obj, rel_field.name, None)
obj.save()
else:
raise rel_field.rel.to.DoesNotExist, "%r is not related to %r." % (obj, instance)
remove.alters_data = True
def clear(self):
for obj in self.all():
setattr(obj, rel_field.name, None)
obj.save()
clear.alters_data = True
manager = RelatedManager()
attname = rel_field.rel.get_related_field().name
manager.core_filters = {'%s__%s' % (rel_field.name, attname):
getattr(instance, attname)}
manager.model = self.related.model
return manager
def __set__(self, instance, value):
if instance is None:
raise AttributeError, "Manager must be accessed via instance"
manager = self.__get__(instance)
# If the foreign key can support nulls, then completely clear the related set.
# Otherwise, just move the named objects into the set.
if self.related.field.null:
manager.clear()
manager.add(*value)
def create_many_related_manager(superclass, through=False):
"""Creates a manager that subclasses 'superclass' (which is a Manager)
and adds behavior for many-to-many related objects."""
class ManyRelatedManager(superclass):
def __init__(self, model=None, core_filters=None, instance=None, symmetrical=None,
join_table=None, source_col_name=None, target_col_name=None):
super(ManyRelatedManager, self).__init__()
self.core_filters = core_filters
self.model = model
self.symmetrical = symmetrical
self.instance = instance
self.join_table = join_table
self.source_col_name = source_col_name
self.target_col_name = target_col_name
self.through = through
self._pk_val = self.instance._get_pk_val()
if self._pk_val is None:
raise ValueError("%r instance needs to have a primary key value before a many-to-many relationship can be used." % instance.__class__.__name__)
def get_query_set(self):
return superclass.get_query_set(self)._next_is_sticky().filter(**(self.core_filters))
# If the ManyToMany relation has an intermediary model,
# the add and remove methods do not exist.
if through is None:
def add(self, *objs):
self._add_items(self.source_col_name, self.target_col_name, *objs)
# If this is a symmetrical m2m relation to self, add the mirror entry in the m2m table
if self.symmetrical:
self._add_items(self.target_col_name, self.source_col_name, *objs)
add.alters_data = True
def remove(self, *objs):
self._remove_items(self.source_col_name, self.target_col_name, *objs)
# If this is a symmetrical m2m relation to self, remove the mirror entry in the m2m table
if self.symmetrical:
self._remove_items(self.target_col_name, self.source_col_name, *objs)
remove.alters_data = True
def clear(self):
self._clear_items(self.source_col_name)
# If this is a symmetrical m2m relation to self, clear the mirror entry in the m2m table
if self.symmetrical:
self._clear_items(self.target_col_name)
clear.alters_data = True
def create(self, **kwargs):
# This check needs to be done here, since we can't later remove this
# from the method lookup table, as we do with add and remove.
if through is not None:
raise AttributeError, "Cannot use create() on a ManyToManyField which specifies an intermediary model. Use %s's Manager instead." % through
new_obj = super(ManyRelatedManager, self).create(**kwargs)
self.add(new_obj)
return new_obj
create.alters_data = True
def get_or_create(self, **kwargs):
obj, created = \
super(ManyRelatedManager, self).get_or_create(**kwargs)
# We only need to add() if created because if we got an object back
# from get() then the relationship already exists.
if created:
self.add(obj)
return obj, created
get_or_create.alters_data = True
def _add_items(self, source_col_name, target_col_name, *objs):
# join_table: name of the m2m link table
# source_col_name: the PK colname in join_table for the source object
# target_col_name: the PK colname in join_table for the target object
# *objs - objects to add. Either object instances, or primary keys of object instances.
# If there aren't any objects, there is nothing to do.
if objs:
# Check that all the objects are of the right type
new_ids = set()
for obj in objs:
if isinstance(obj, self.model):
new_ids.add(obj._get_pk_val())
else:
new_ids.add(obj)
# Add the newly created or already existing objects to the join table.
# First find out which items are already added, to avoid adding them twice
cursor = connection.cursor()
cursor.execute("SELECT %s FROM %s WHERE %s = %%s AND %s IN (%s)" % \
(target_col_name, self.join_table, source_col_name,
target_col_name, ",".join(['%s'] * len(new_ids))),
[self._pk_val] + list(new_ids))
existing_ids = set([row[0] for row in cursor.fetchall()])
# Add the ones that aren't there already
for obj_id in (new_ids - existing_ids):
cursor.execute("INSERT INTO %s (%s, %s) VALUES (%%s, %%s)" % \
(self.join_table, source_col_name, target_col_name),
[self._pk_val, obj_id])
transaction.commit_unless_managed()
def _remove_items(self, source_col_name, target_col_name, *objs):
# source_col_name: the PK colname in join_table for the source object
# target_col_name: the PK colname in join_table for the target object
# *objs - objects to remove
# If there aren't any objects, there is nothing to do.
if objs:
# Check that all the objects are of the right type
old_ids = set()
for obj in objs:
if isinstance(obj, self.model):
old_ids.add(obj._get_pk_val())
else:
old_ids.add(obj)
# Remove the specified objects from the join table
cursor = connection.cursor()
cursor.execute("DELETE FROM %s WHERE %s = %%s AND %s IN (%s)" % \
(self.join_table, source_col_name,
target_col_name, ",".join(['%s'] * len(old_ids))),
[self._pk_val] + list(old_ids))
transaction.commit_unless_managed()
def _clear_items(self, source_col_name):
# source_col_name: the PK colname in join_table for the source object
cursor = connection.cursor()
cursor.execute("DELETE FROM %s WHERE %s = %%s" % \
(self.join_table, source_col_name),
[self._pk_val])
transaction.commit_unless_managed()
return ManyRelatedManager
class ManyRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ManyToManyField pointed at them by
# some other model (rather than having a ManyToManyField themselves).
# In the example "publication.article_set", the article_set attribute is a
# ManyRelatedObjectsDescriptor instance.
def __init__(self, related):
self.related = related # RelatedObject instance
def __get__(self, instance, instance_type=None):
if instance is None:
raise AttributeError, "Manager must be accessed via instance"
# Dynamically create a class that subclasses the related
# model's default manager.
rel_model = self.related.model
superclass = rel_model._default_manager.__class__
RelatedManager = create_many_related_manager(superclass, self.related.field.rel.through)
qn = connection.ops.quote_name
manager = RelatedManager(
model=rel_model,
core_filters={'%s__pk' % self.related.field.name: instance._get_pk_val()},
instance=instance,
symmetrical=False,
join_table=qn(self.related.field.m2m_db_table()),
source_col_name=qn(self.related.field.m2m_reverse_name()),
target_col_name=qn(self.related.field.m2m_column_name())
)
return manager
def __set__(self, instance, value):
if instance is None:
raise AttributeError, "Manager must be accessed via instance"
through = getattr(self.related.field.rel, 'through', None)
if through is not None:
raise AttributeError, "Cannot set values on a ManyToManyField which specifies an intermediary model. Use %s's Manager instead." % through
manager = self.__get__(instance)
manager.clear()
manager.add(*value)
class ReverseManyRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ManyToManyField defined in their
# model (rather than having another model pointed *at* them).
# In the example "article.publications", the publications attribute is a
# ReverseManyRelatedObjectsDescriptor instance.
def __init__(self, m2m_field):
self.field = m2m_field
def __get__(self, instance, instance_type=None):
if instance is None:
raise AttributeError, "Manager must be accessed via instance"
# Dynamically create a class that subclasses the related
# model's default manager.
rel_model=self.field.rel.to
superclass = rel_model._default_manager.__class__
RelatedManager = create_many_related_manager(superclass, self.field.rel.through)
qn = connection.ops.quote_name
manager = RelatedManager(
model=rel_model,
core_filters={'%s__pk' % self.field.related_query_name(): instance._get_pk_val()},
instance=instance,
symmetrical=(self.field.rel.symmetrical and instance.__class__ == rel_model),
join_table=qn(self.field.m2m_db_table()),
source_col_name=qn(self.field.m2m_column_name()),
target_col_name=qn(self.field.m2m_reverse_name())
)
return manager
def __set__(self, instance, value):
if instance is None:
raise AttributeError, "Manager must be accessed via instance"
through = getattr(self.field.rel, 'through', None)
if through is not None:
raise AttributeError, "Cannot set values on a ManyToManyField which specifies an intermediary model. Use %s's Manager instead." % through
manager = self.__get__(instance)
manager.clear()
manager.add(*value)
class ManyToOneRel(object):
def __init__(self, to, field_name, related_name=None,
limit_choices_to=None, lookup_overrides=None, parent_link=False):
try:
to._meta
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, basestring), "'to' must be either a model, a model name or the string %r" % RECURSIVE_RELATIONSHIP_CONSTANT
self.to, self.field_name = to, field_name
self.related_name = related_name
if limit_choices_to is None:
limit_choices_to = {}
self.limit_choices_to = limit_choices_to
self.lookup_overrides = lookup_overrides or {}
self.multiple = True
self.parent_link = parent_link
def get_related_field(self):
"""
Returns the Field in the 'to' object to which this relationship is
tied.
"""
data = self.to._meta.get_field_by_name(self.field_name)
if not data[2]:
raise FieldDoesNotExist("No related field named '%s'" %
self.field_name)
return data[0]
class OneToOneRel(ManyToOneRel):
def __init__(self, to, field_name, related_name=None,
limit_choices_to=None, lookup_overrides=None, parent_link=False):
super(OneToOneRel, self).__init__(to, field_name,
related_name=related_name, limit_choices_to=limit_choices_to,
lookup_overrides=lookup_overrides, parent_link=parent_link)
self.multiple = False
class ManyToManyRel(object):
def __init__(self, to, related_name=None, limit_choices_to=None,
symmetrical=True, through=None):
self.to = to
self.related_name = related_name
if limit_choices_to is None:
limit_choices_to = {}
self.limit_choices_to = limit_choices_to
self.symmetrical = symmetrical
self.multiple = True
self.through = through
class ForeignKey(RelatedField, Field):
empty_strings_allowed = False
def __init__(self, to, to_field=None, rel_class=ManyToOneRel, **kwargs):
try:
to_name = to._meta.object_name.lower()
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, basestring), "%s(%r) is invalid. First parameter to ForeignKey must be either a model, a model name, or the string %r" % (self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT)
else:
assert not to._meta.abstract, "%s cannot define a relation with abstract class %s" % (self.__class__.__name__, to._meta.object_name)
to_field = to_field or to._meta.pk.name
kwargs['verbose_name'] = kwargs.get('verbose_name', None)
kwargs['rel'] = rel_class(to, to_field,
related_name=kwargs.pop('related_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
lookup_overrides=kwargs.pop('lookup_overrides', None),
parent_link=kwargs.pop('parent_link', False))
Field.__init__(self, **kwargs)
self.db_index = True
def get_attname(self):
return '%s_id' % self.name
def get_validator_unique_lookup_type(self):
return '%s__%s__exact' % (self.name, self.rel.get_related_field().name)
def get_default(self):
"Here we check if the default value is an object and return the to_field if so."
field_default = super(ForeignKey, self).get_default()
if isinstance(field_default, self.rel.to):
return getattr(field_default, self.rel.get_related_field().attname)
return field_default
def get_db_prep_save(self, value):
if value == '' or value == None:
return None
else:
return self.rel.get_related_field().get_db_prep_save(value)
def value_to_string(self, obj):
if not obj:
# In required many-to-one fields with only one available choice,
# select that one available choice. Note: For SelectFields
# we have to check that the length of choices is *2*, not 1,
# because SelectFields always have an initial "blank" value.
if not self.blank and self.choices:
choice_list = self.get_choices_default()
if len(choice_list) == 2:
return smart_unicode(choice_list[1][0])
return Field.value_to_string(self, obj)
def contribute_to_class(self, cls, name):
super(ForeignKey, self).contribute_to_class(cls, name)
setattr(cls, self.name, ReverseSingleRelatedObjectDescriptor(self))
if isinstance(self.rel.to, basestring):
target = self.rel.to
else:
target = self.rel.to._meta.db_table
cls._meta.duplicate_targets[self.column] = (target, "o2m")
def contribute_to_related_class(self, cls, related):
setattr(cls, related.get_accessor_name(), ForeignRelatedObjectsDescriptor(related))
def formfield(self, **kwargs):
defaults = {
'form_class': forms.ModelChoiceField,
'queryset': self.rel.to._default_manager.complex_filter(
self.rel.limit_choices_to),
'to_field_name': self.rel.field_name,
}
defaults.update(kwargs)
return super(ForeignKey, self).formfield(**defaults)
def db_type(self):
# The database column type of a ForeignKey is the column type
# of the field to which it points. An exception is if the ForeignKey
# points to an AutoField/PositiveIntegerField/PositiveSmallIntegerField,
# in which case the column type is simply that of an IntegerField.
# If the database needs similar types for key fields however, the only
# thing we can do is making AutoField an IntegerField.
rel_field = self.rel.get_related_field()
if (isinstance(rel_field, AutoField) or
(not connection.features.related_fields_match_type and
isinstance(rel_field, (PositiveIntegerField,
PositiveSmallIntegerField)))):
return IntegerField().db_type()
return rel_field.db_type()
class OneToOneField(ForeignKey):
"""
A OneToOneField is essentially the same as a ForeignKey, with the exception
that always carries a "unique" constraint with it and the reverse relation
always returns the object pointed to (since there will only ever be one),
rather than returning a list.
"""
def __init__(self, to, to_field=None, **kwargs):
kwargs['unique'] = True
super(OneToOneField, self).__init__(to, to_field, OneToOneRel, **kwargs)
def contribute_to_related_class(self, cls, related):
setattr(cls, related.get_accessor_name(),
SingleRelatedObjectDescriptor(related))
if not cls._meta.one_to_one_field:
cls._meta.one_to_one_field = self
def formfield(self, **kwargs):
if self.rel.parent_link:
return None
return super(OneToOneField, self).formfield(**kwargs)
class ManyToManyField(RelatedField, Field):
def __init__(self, to, **kwargs):
try:
assert not to._meta.abstract, "%s cannot define a relation with abstract class %s" % (self.__class__.__name__, to._meta.object_name)
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, basestring), "%s(%r) is invalid. First parameter to ManyToManyField must be either a model, a model name, or the string %r" % (self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT)
kwargs['verbose_name'] = kwargs.get('verbose_name', None)
kwargs['rel'] = ManyToManyRel(to,
related_name=kwargs.pop('related_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
symmetrical=kwargs.pop('symmetrical', True),
through=kwargs.pop('through', None))
self.db_table = kwargs.pop('db_table', None)
if kwargs['rel'].through is not None:
self.creates_table = False
assert self.db_table is None, "Cannot specify a db_table if an intermediary model is used."
else:
self.creates_table = True
Field.__init__(self, **kwargs)
msg = ugettext_lazy('Hold down "Control", or "Command" on a Mac, to select more than one.')
self.help_text = string_concat(self.help_text, ' ', msg)
def get_choices_default(self):
return Field.get_choices(self, include_blank=False)
def _get_m2m_db_table(self, opts):
"Function that can be curried to provide the m2m table name for this relation"
if self.rel.through is not None:
return self.rel.through_model._meta.db_table
elif self.db_table:
return self.db_table
else:
return '%s_%s' % (opts.db_table, self.name)
def _get_m2m_column_name(self, related):
"Function that can be curried to provide the source column name for the m2m table"
try:
return self._m2m_column_name_cache
except:
if self.rel.through is not None:
for f in self.rel.through_model._meta.fields:
if hasattr(f,'rel') and f.rel and f.rel.to == related.model:
self._m2m_column_name_cache = f.column
break
# If this is an m2m relation to self, avoid the inevitable name clash
elif related.model == related.parent_model:
self._m2m_column_name_cache = 'from_' + related.model._meta.object_name.lower() + '_id'
else:
self._m2m_column_name_cache = related.model._meta.object_name.lower() + '_id'
# Return the newly cached value
return self._m2m_column_name_cache
def _get_m2m_reverse_name(self, related):
"Function that can be curried to provide the related column name for the m2m table"
try:
return self._m2m_reverse_name_cache
except:
if self.rel.through is not None:
found = False
for f in self.rel.through_model._meta.fields:
if hasattr(f,'rel') and f.rel and f.rel.to == related.parent_model:
if related.model == related.parent_model:
# If this is an m2m-intermediate to self,
# the first foreign key you find will be
# the source column. Keep searching for
# the second foreign key.
if found:
self._m2m_reverse_name_cache = f.column
break
else:
found = True
else:
self._m2m_reverse_name_cache = f.column
break
# If this is an m2m relation to self, avoid the inevitable name clash
elif related.model == related.parent_model:
self._m2m_reverse_name_cache = 'to_' + related.parent_model._meta.object_name.lower() + '_id'
else:
self._m2m_reverse_name_cache = related.parent_model._meta.object_name.lower() + '_id'
# Return the newly cached value
return self._m2m_reverse_name_cache
def isValidIDList(self, field_data, all_data):
"Validates that the value is a valid list of foreign keys"
mod = self.rel.to
try:
pks = map(int, field_data.split(','))
except ValueError:
# the CommaSeparatedIntegerField validator will catch this error
return
objects = mod._default_manager.in_bulk(pks)
if len(objects) != len(pks):
badkeys = [k for k in pks if k not in objects]
raise exceptions.ValidationError(
ungettext("Please enter valid %(self)s IDs. The value %(value)r is invalid.",
"Please enter valid %(self)s IDs. The values %(value)r are invalid.",
len(badkeys)) % {
'self': self.verbose_name,
'value': len(badkeys) == 1 and badkeys[0] or tuple(badkeys),
})
def value_to_string(self, obj):
data = ''
if obj:
qs = getattr(obj, self.name).all()
data = [instance._get_pk_val() for instance in qs]
else:
# In required many-to-many fields with only one available choice,
# select that one available choice.
if not self.blank:
choices_list = self.get_choices_default()
if len(choices_list) == 1:
data = [choices_list[0][0]]
return smart_unicode(data)
def contribute_to_class(self, cls, name):
# To support multiple relations to self, it's useful to have a non-None
# related name on symmetrical relations for internal reasons. The
# concept doesn't make a lot of sense externally ("you want me to
# specify *what* on my non-reversible relation?!"), so we set it up
# automatically. The funky name reduces the chance of an accidental
# clash.
if self.rel.symmetrical and self.rel.to == "self" and self.rel.related_name is None:
self.rel.related_name = "%s_rel_+" % name
super(ManyToManyField, self).contribute_to_class(cls, name)
# Add the descriptor for the m2m relation
setattr(cls, self.name, ReverseManyRelatedObjectsDescriptor(self))
# Set up the accessor for the m2m table name for the relation
self.m2m_db_table = curry(self._get_m2m_db_table, cls._meta)
# Populate some necessary rel arguments so that cross-app relations
# work correctly.
if isinstance(self.rel.through, basestring):
def resolve_through_model(field, model, cls):
field.rel.through_model = model
add_lazy_relation(cls, self, self.rel.through, resolve_through_model)
elif self.rel.through:
self.rel.through_model = self.rel.through
self.rel.through = self.rel.through._meta.object_name
if isinstance(self.rel.to, basestring):
target = self.rel.to
else:
target = self.rel.to._meta.db_table
cls._meta.duplicate_targets[self.column] = (target, "m2m")
def contribute_to_related_class(self, cls, related):
# m2m relations to self do not have a ManyRelatedObjectsDescriptor,
# as it would be redundant - unless the field is non-symmetrical.
if related.model != related.parent_model or not self.rel.symmetrical:
# Add the descriptor for the m2m relation
setattr(cls, related.get_accessor_name(), ManyRelatedObjectsDescriptor(related))
# Set up the accessors for the column names on the m2m table
self.m2m_column_name = curry(self._get_m2m_column_name, related)
self.m2m_reverse_name = curry(self._get_m2m_reverse_name, related)
def set_attributes_from_rel(self):
pass
def value_from_object(self, obj):
"Returns the value of this field in the given model instance."
return getattr(obj, self.attname).all()
def save_form_data(self, instance, data):
setattr(instance, self.attname, data)
def formfield(self, **kwargs):
defaults = {'form_class': forms.ModelMultipleChoiceField, 'queryset': self.rel.to._default_manager.complex_filter(self.rel.limit_choices_to)}
defaults.update(kwargs)
# If initial is passed in, it's a list of related objects, but the
# MultipleChoiceField takes a list of IDs.
if defaults.get('initial') is not None:
defaults['initial'] = [i._get_pk_val() for i in defaults['initial']]
return super(ManyToManyField, self).formfield(**defaults)
def db_type(self):
# A ManyToManyField is not represented by a single column,
# so return None.
return None
| bsd-3-clause |
Agana/MyBlogAgain | django/utils/synch.py | 376 | 2549 | """
Synchronization primitives:
- reader-writer lock (preference to writers)
(Contributed to Django by [email protected])
"""
try:
import threading
except ImportError:
import dummy_threading as threading
class RWLock:
"""
Classic implementation of reader-writer lock with preference to writers.
Readers can access a resource simultaneously.
Writers get an exclusive access.
API is self-descriptive:
reader_enters()
reader_leaves()
writer_enters()
writer_leaves()
"""
def __init__(self):
self.mutex = threading.RLock()
self.can_read = threading.Semaphore(0)
self.can_write = threading.Semaphore(0)
self.active_readers = 0
self.active_writers = 0
self.waiting_readers = 0
self.waiting_writers = 0
def reader_enters(self):
self.mutex.acquire()
try:
if self.active_writers == 0 and self.waiting_writers == 0:
self.active_readers += 1
self.can_read.release()
else:
self.waiting_readers += 1
finally:
self.mutex.release()
self.can_read.acquire()
def reader_leaves(self):
self.mutex.acquire()
try:
self.active_readers -= 1
if self.active_readers == 0 and self.waiting_writers != 0:
self.active_writers += 1
self.waiting_writers -= 1
self.can_write.release()
finally:
self.mutex.release()
def writer_enters(self):
self.mutex.acquire()
try:
if self.active_writers == 0 and self.waiting_writers == 0 and self.active_readers == 0:
self.active_writers += 1
self.can_write.release()
else:
self.waiting_writers += 1
finally:
self.mutex.release()
self.can_write.acquire()
def writer_leaves(self):
self.mutex.acquire()
try:
self.active_writers -= 1
if self.waiting_writers != 0:
self.active_writers += 1
self.waiting_writers -= 1
self.can_write.release()
elif self.waiting_readers != 0:
t = self.waiting_readers
self.waiting_readers = 0
self.active_readers += t
while t > 0:
self.can_read.release()
t -= 1
finally:
self.mutex.release()
| bsd-3-clause |
fernandezcuesta/ansible | contrib/inventory/openstack.py | 64 | 8980 | #!/usr/bin/env python
# Copyright (c) 2012, Marco Vito Moscaritolo <[email protected]>
# Copyright (c) 2013, Jesse Keating <[email protected]>
# Copyright (c) 2015, Hewlett-Packard Development Company, L.P.
# Copyright (c) 2016, Rackspace Australia
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
# The OpenStack Inventory module uses os-client-config for configuration.
# https://github.com/openstack/os-client-config
# This means it will either:
# - Respect normal OS_* environment variables like other OpenStack tools
# - Read values from a clouds.yaml file.
# If you want to configure via clouds.yaml, you can put the file in:
# - Current directory
# - ~/.config/openstack/clouds.yaml
# - /etc/openstack/clouds.yaml
# - /etc/ansible/openstack.yml
# The clouds.yaml file can contain entries for multiple clouds and multiple
# regions of those clouds. If it does, this inventory module will connect to
# all of them and present them as one contiguous inventory.
#
# See the adjacent openstack.yml file for an example config file
# There are two ansible inventory specific options that can be set in
# the inventory section.
# expand_hostvars controls whether or not the inventory will make extra API
# calls to fill out additional information about each server
# use_hostnames changes the behavior from registering every host with its UUID
# and making a group of its hostname to only doing this if the
# hostname in question has more than one server
# fail_on_errors causes the inventory to fail and return no hosts if one cloud
# has failed (for example, bad credentials or being offline).
# When set to False, the inventory will return hosts from
# whichever other clouds it can contact. (Default: True)
import argparse
import collections
import os
import sys
import time
from distutils.version import StrictVersion
try:
import json
except:
import simplejson as json
import os_client_config
import shade
import shade.inventory
CONFIG_FILES = ['/etc/ansible/openstack.yaml', '/etc/ansible/openstack.yml']
def get_groups_from_server(server_vars, namegroup=True):
groups = []
region = server_vars['region']
cloud = server_vars['cloud']
metadata = server_vars.get('metadata', {})
# Create a group for the cloud
groups.append(cloud)
# Create a group on region
groups.append(region)
# And one by cloud_region
groups.append("%s_%s" % (cloud, region))
# Check if group metadata key in servers' metadata
if 'group' in metadata:
groups.append(metadata['group'])
for extra_group in metadata.get('groups', '').split(','):
if extra_group:
groups.append(extra_group.strip())
groups.append('instance-%s' % server_vars['id'])
if namegroup:
groups.append(server_vars['name'])
for key in ('flavor', 'image'):
if 'name' in server_vars[key]:
groups.append('%s-%s' % (key, server_vars[key]['name']))
for key, value in iter(metadata.items()):
groups.append('meta-%s_%s' % (key, value))
az = server_vars.get('az', None)
if az:
# Make groups for az, region_az and cloud_region_az
groups.append(az)
groups.append('%s_%s' % (region, az))
groups.append('%s_%s_%s' % (cloud, region, az))
return groups
def get_host_groups(inventory, refresh=False):
(cache_file, cache_expiration_time) = get_cache_settings()
if is_cache_stale(cache_file, cache_expiration_time, refresh=refresh):
groups = to_json(get_host_groups_from_cloud(inventory))
open(cache_file, 'w').write(groups)
else:
groups = open(cache_file, 'r').read()
return groups
def append_hostvars(hostvars, groups, key, server, namegroup=False):
hostvars[key] = dict(
ansible_ssh_host=server['interface_ip'],
ansible_host=server['interface_ip'],
openstack=server)
for group in get_groups_from_server(server, namegroup=namegroup):
groups[group].append(key)
def get_host_groups_from_cloud(inventory):
groups = collections.defaultdict(list)
firstpass = collections.defaultdict(list)
hostvars = {}
list_args = {}
if hasattr(inventory, 'extra_config'):
use_hostnames = inventory.extra_config['use_hostnames']
list_args['expand'] = inventory.extra_config['expand_hostvars']
if StrictVersion(shade.__version__) >= StrictVersion("1.6.0"):
list_args['fail_on_cloud_config'] = \
inventory.extra_config['fail_on_errors']
else:
use_hostnames = False
for server in inventory.list_hosts(**list_args):
if 'interface_ip' not in server:
continue
firstpass[server['name']].append(server)
for name, servers in firstpass.items():
if len(servers) == 1 and use_hostnames:
append_hostvars(hostvars, groups, name, servers[0])
else:
server_ids = set()
# Trap for duplicate results
for server in servers:
server_ids.add(server['id'])
if len(server_ids) == 1 and use_hostnames:
append_hostvars(hostvars, groups, name, servers[0])
else:
for server in servers:
append_hostvars(
hostvars, groups, server['id'], server,
namegroup=True)
groups['_meta'] = {'hostvars': hostvars}
return groups
def is_cache_stale(cache_file, cache_expiration_time, refresh=False):
''' Determines if cache file has expired, or if it is still valid '''
if refresh:
return True
if os.path.isfile(cache_file) and os.path.getsize(cache_file) > 0:
mod_time = os.path.getmtime(cache_file)
current_time = time.time()
if (mod_time + cache_expiration_time) > current_time:
return False
return True
def get_cache_settings():
config = os_client_config.config.OpenStackConfig(
config_files=os_client_config.config.CONFIG_FILES + CONFIG_FILES)
# For inventory-wide caching
cache_expiration_time = config.get_cache_expiration_time()
cache_path = config.get_cache_path()
if not os.path.exists(cache_path):
os.makedirs(cache_path)
cache_file = os.path.join(cache_path, 'ansible-inventory.cache')
return (cache_file, cache_expiration_time)
def to_json(in_dict):
return json.dumps(in_dict, sort_keys=True, indent=2)
def parse_args():
parser = argparse.ArgumentParser(description='OpenStack Inventory Module')
parser.add_argument('--private',
action='store_true',
help='Use private address for ansible host')
parser.add_argument('--refresh', action='store_true',
help='Refresh cached information')
parser.add_argument('--debug', action='store_true', default=False,
help='Enable debug output')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--list', action='store_true',
help='List active servers')
group.add_argument('--host', help='List details about the specific host')
return parser.parse_args()
def main():
args = parse_args()
try:
config_files = os_client_config.config.CONFIG_FILES + CONFIG_FILES
shade.simple_logging(debug=args.debug)
inventory_args = dict(
refresh=args.refresh,
config_files=config_files,
private=args.private,
)
if hasattr(shade.inventory.OpenStackInventory, 'extra_config'):
inventory_args.update(dict(
config_key='ansible',
config_defaults={
'use_hostnames': False,
'expand_hostvars': True,
'fail_on_errors': True,
}
))
inventory = shade.inventory.OpenStackInventory(**inventory_args)
if args.list:
output = get_host_groups(inventory, refresh=args.refresh)
elif args.host:
output = to_json(inventory.get_host(args.host))
print(output)
except shade.OpenStackCloudException as e:
sys.stderr.write('%s\n' % e.message)
sys.exit(1)
sys.exit(0)
if __name__ == '__main__':
main()
| gpl-3.0 |
mogoweb/chromium-crosswalk | chrome/test/mini_installer/file_verifier.py | 125 | 1116 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import verifier
class FileVerifier(verifier.Verifier):
"""Verifies that the current files match the expectation dictionaries."""
def _VerifyExpectation(self, expectation_name, expectation,
variable_expander):
"""Overridden from verifier.Verifier.
This method will throw an AssertionError if file state doesn't match the
|expectation|.
Args:
expectation_name: Path to the file being verified. It is expanded using
Expand.
expectation: A dictionary with the following key and value:
'exists' a boolean indicating whether the file should exist.
variable_expander: A VariableExpander object.
"""
file_path = variable_expander.Expand(expectation_name)
file_exists = os.path.exists(file_path)
assert expectation['exists'] == file_exists, \
('File %s exists' % file_path) if file_exists else \
('File %s is missing' % file_path)
| bsd-3-clause |
AmesianX/amoco | amoco/arch/arm/v8/asm64.py | 6 | 15185 | # -*- coding: utf-8 -*-
# This code is part of Amoco
# Copyright (C) 2013 Axel Tillequin ([email protected])
# published under GPLv2 license
from amoco.logger import Log
logger = Log(__name__)
from .env64 import *
from .utils import *
from amoco.cas.utils import *
def i_ADC(i,fmap):
fmap[pc] = fmap[pc]+i.length
op1,op2 = map(fmap,i.operands[1:])
x,carry,overflow = AddWithCarry(op1, op2, fmap(C))
if i.setflags:
fmap[N] = x<0
fmap[Z] = x==0
fmap[C] = carry
fmap[V] = overflow
fmap[i.d] = x
def i_SBC(i,fmap):
fmap[pc] = fmap[pc]+i.length
op1,op2 = map(fmap,i.operands[1:])
x,carry,overflow = SubWithBorrow(op1, op2, fmap(C))
if i.setflags:
fmap[N] = x<0
fmap[Z] = x==0
fmap[C] = carry
fmap[V] = overflow
fmap[i.d] = x
def i_ADD(i,fmap):
fmap[pc] = fmap[pc]+i.length
op1,op2 = map(fmap,i.operands[1:])
x,carry,overflow = AddWithCarry(op1,op2)
if i.setflags:
fmap[N] = x<0
fmap[Z] = x==0
fmap[C] = carry
fmap[V] = overflow
fmap[i.d] = x
def i_SUB(i,fmap):
fmap[pc] = fmap[pc]+i.length
op1,op2 = map(fmap,i.operands[1:])
x,carry,overflow = SubWithBorrow(op1,op2)
if i.setflags:
fmap[N] = x<0
fmap[Z] = x==0
fmap[C] = carry
fmap[V] = overflow
fmap[i.d] = x
def i_ADR(i,fmap):
fmap[pc] = fmap[pc]+i.length
base = fmap(pc)
fmap[i.d] = base+i.imm
def i_ADRP(i,fmap):
fmap[pc] = fmap[pc]+i.length
base = fmap(pc)
base[0:12]=cst(0,12)
fmap[i.d] = base+i.imm
def i_AND(i,fmap):
fmap[pc] = fmap[pc]+i.length
dst,src1,src2 = i.operands
x = fmap(src1 & src2)
fmap[dst] = x
if i.setflags:
fmap[N] = x[x.size-1:x.size]
fmap[Z] = x==0
fmap[C] = bit0
fmap[V] = bit0
def i_ORR(i,fmap):
fmap[pc] = fmap[pc]+i.length
dst,src1,src2 = i.operands
x = fmap(src1 | src2)
fmap[dst] = x
if i.setflags:
fmap[N] = x[x.size-1:x.size]
fmap[Z] = x==0
fmap[C] = bit0
fmap[V] = bit0
def i_ORN(i,fmap):
fmap[pc] = fmap[pc]+i.length
dst,src1,src2 = i.operands
x = fmap(src1 | ~src2)
fmap[dst] = x
if i.setflags:
fmap[N] = x[x.size-1:x.size]
fmap[Z] = x==0
fmap[C] = bit0
fmap[V] = bit0
def i_EOR(i,fmap):
fmap[pc] = fmap[pc]+i.length
dst,src1,src2 = i.operands
x = fmap(src1 ^ src2)
fmap[dst] = fmap(x)
if i.setflags:
fmap[N] = x[x.size-1:x.size]
fmap[Z] = x==0
fmap[C] = bit0
fmap[V] = bit0
def i_EON(i,fmap):
fmap[pc] = fmap[pc]+i.length
dst,src1,src2 = i.operands
x = fmap(src1 ^ ~src2)
fmap[dst] = x
if i.setflags:
fmap[N] = x[x.size-1:x.size]
fmap[Z] = x==0
fmap[C] = bit0
fmap[V] = bit0
def i_ASRV(i,fmap):
fmap[pc] = fmap[pc]+i.length
fmap[i.d] = fmap(i.n>>i.m)
def i_Bcond(i,fmap):
cond = fmap(i.cond)
fmap[pc] = tst(cond, fmap[pc]+i.offset, fmap[pc]+i.length)
def i_B(i,fmap):
fmap[pc] = fmap[pc]+i.offset
def i_BL(i,fmap):
fmap[r30] = fmap[pc]+i.length
fmap[pc] = fmap[pc]+i.offset
def i_BR(i,fmap):
fmap[pc] = fmap(i.n)
def i_BLR(i,fmap):
fmap[r30] = fmap[pc]+i.length
fmap[pc] = fmap(i.n)
def i_BFM(i,fmap):
fmap[pc] = fmap[pc]+i.length
dst = cst(0,i.datasize) if i.inzero else fmap(i.d)
src = fmap(i.n)
lo = (dst & ~i.wmask) | (ROR(src,i.immr.value) & i.wmask)
sta,sto = i.imms.value,i.imms.value+1
hi = composer([src[sta:sto]]*i.datasize) if i.extend else dst
fmap[i.d] = (hi & ~i.tmask) | (lo & i.tmask)
i_SBFM = i_BFM
i_UBFM = i_BFM
def i_BIC(i,fmap):
fmap[pc] = fmap[pc]+i.length
op1 = fmap(i.n)
op2 = fmap(i.m)
if i.invert: op2 = ~op2
_r = op1 & op2
fmap[i.d] = _r
if i.setflags:
fmap[C] = bit0
fmap[V] = bit0
fmap[Z] = _r==0
fmap[N] = _r[_r.size-1:_r.size]
def i_BRK(i,fmap):
fmap[pc] = fmap[pc]+i.length
ext('BRK %s'%i.imm,size=pc.size).call(fmap)
def i_CBNZ(i,fmap):
fmap[pc] = tst(fmap(i.t!=0), fmap[pc]+i.offset, fmap[pc]+i.length)
def i_CBZ(i,fmap):
fmap[pc] = tst(fmap(i.t==0), fmap[pc]+i.offset, fmap[pc]+i.length)
def i_CCMN(i,fmap):
fmap[pc] = fmap[pc]+i.length
op1, op2, nzcv, cond = i.operands
_r, carry, overflow = AddWithCarry(fmap(op1),fmap(op2))
fmap[N] = tst(fmap(cond), _r<0 , i.flags[0])
fmap[Z] = tst(fmap(cond), _r==0 , i.flags[1])
fmap[C] = tst(fmap(cond), carry , i.flags[2])
fmap[V] = tst(fmap(cond), overflow, i.flags[3])
def i_CCMP(i,fmap):
fmap[pc] = fmap[pc]+i.length
op1, op2, nzcv, cond = i.operands
_r, carry, overflow = SubWithBorrow(fmap(op1),fmap(op2))
fmap[N] = tst(fmap(cond), _r<0 , i.flags[0])
fmap[Z] = tst(fmap(cond), _r==0 , i.flags[1])
fmap[C] = tst(fmap(cond), carry , i.flags[2])
fmap[V] = tst(fmap(cond), overflow, i.flags[3])
def i_CLREX(i,fmap):
fmap[pc] = fmap[pc]+i.length
logger.warning('semantic undefined for %s'%i.mnemonic)
def i_CLS(i,fmap):
fmap[pc] = fmap[pc]+i.length
fmap[i.d] = top(i.d.size)
logger.warning('semantic undefined for %s'%i.mnemonic)
def i_CLZ(i,fmap):
fmap[pc] = fmap[pc]+i.length
fmap[i.d] = top(i.d.size)
logger.warning('semantic undefined for %s'%i.mnemonic)
def i_CSEL(i,fmap):
fmap[pc] = fmap[pc]+i.length
dst, op1, op2, cond = i.operands
fmap[dst] = tst(fmap(cond), fmap(op1), fmap(op2))
def i_CSINC(i,fmap):
fmap[pc] = fmap[pc]+i.length
dst, op1, op2, cond = i.operands
fmap[dst] = tst(fmap(cond), fmap(op1), fmap(op2)+1)
def i_CSINV(i,fmap):
fmap[pc] = fmap[pc]+i.length
dst, op1, op2, cond = i.operands
fmap[dst] = tst(fmap(cond), fmap(op1), fmap(~op2))
def i_CSNEG(i,fmap):
fmap[pc] = fmap[pc]+i.length
dst, op1, op2, cond = i.operands
fmap[dst] = tst(fmap(cond), fmap(op1), fmap(-op2))
def i_DCPS1(i,fmap):
fmap[pc] = fmap[pc]+i.length
logger.warning('semantic undefined for %s'%i.mnemonic)
def i_DCPS2(i,fmap):
fmap[pc] = fmap[pc]+i.length
logger.warning('semantic undefined for %s'%i.mnemonic)
def i_DCPS3(i,fmap):
fmap[pc] = fmap[pc]+i.length
logger.warning('semantic undefined for %s'%i.mnemonic)
def i_DMB(i,fmap):
fmap[pc] = fmap[pc]+i.length
logger.warning('semantic undefined for %s'%i.mnemonic)
def i_DRPS(i,fmap):
fmap[pc] = fmap[pc]+i.length
logger.warning('semantic undefined for %s'%i.mnemonic)
def i_DSB(i,fmap):
fmap[pc] = fmap[pc]+i.length
logger.warning('semantic undefined for %s'%i.mnemonic)
def i_ISB(i,fmap):
fmap[pc] = fmap[pc]+i.length
logger.warning('semantic undefined for %s'%i.mnemonic)
def i_ERET(i,fmap):
fmap[pc] = top(64)
logger.warning('semantic undefined for %s'%i.mnemonic)
def i_EXTR(i,fmap):
fmap[pc] = fmap[pc]+i.length
dst, op1, op2, lsb = i.operands
concat = composer(fmap(op2),fmap(op1))
result = concat[lsb:lsb+i.datasize]
fmap[dst] = result
def i_HINT(i,fmap):
fmap[pc] = fmap[pc]+i.length
if i.imm>0:
logger.warning('semantic undefined for %s(%d)'%(i.mnemonic,i.imm))
def i_HLT(i,fmap):
fmap[pc] = fmap[pc]+i.length
logger.warning('semantic undefined for %s'%i.mnemonic)
def i_HVC(i,fmap):
fmap[pc] = fmap[pc]+i.length
logger.warning('semantic undefined for %s'%i.mnemonic)
def i_LDAR(i,fmap):
fmap[pc] = fmap[pc]+i.length
data = fmap(mem(i.n,i.datasize))
if i.pair:
if not i.excl: raise InstructionError(i)
if i.elsize==32:
if internals['endianstate']==0:
fmap[i.t] = data[0:i.elsize]
fmap[i.t2] = data[i.elsize:i.datasize]
else:
fmap[i.t] = data[i.elsize:i.datasize]
fmap[i.t2] = data[0:i.elsize]
else:
fmap[i.t] = fmap(mem(i.n, 64))
fmap[i.t2] = fmap(mem(i.n, 64, disp=8))
else:
fmap[i.t] = data.zeroextend(i.regsize)
i_LDARB = i_LDAR
i_LDARH = i_LDAR
i_LDAXP = i_LDAR
i_LDAXR = i_LDAR
i_LDAXRB = i_LDAR
i_LDAXRH = i_LDAR
i_LDXP = i_LDAR
i_LDXR = i_LDAR
i_LDXRB = i_LDAR
i_LDXRH = i_LDAR
def i_STLR(i,fmap):
fmap[pc] = fmap[pc]+i.length
address = fmap(i.n)
if i.pair:
if not i.excl: raise InstructionError(i)
if internals['endianstate']==0:
data = composer(i.t,i.t2)
else:
data = composer(i.t2,i.t)
else:
data = i.t
if i.excl:
fmap[i.s] = cst(1,32)
fmap[address] = fmap(data)
i_STLRB = i_STLR
i_STLRH = i_STLR
i_STLXP = i_STLR
i_STLXR = i_STLR
i_STLXRB = i_STLR
i_STLXRH = i_STLR
i_STXP = i_STLR
i_STXR = i_STLR
i_STXRB = i_STLR
i_STXRH = i_STLR
def i_LDP(i,fmap):
fmap[pc] = fmap[pc]+i.length
address = i.n
if not i.postindex: address += i.offset
data1 = mem(address,i.datasize)
data2 = mem(address,i.datasize, disp=i.datasize/8)
fmap[i.t] = fmap(data1)
fmap[i.t2] = fmap(data2)
if i.wback:
if i.postindex: address += i.offset
fmap[i.n] = fmap(address)
def i_STP(i,fmap):
fmap[pc] = fmap[pc]+i.length
address = i.n
if not i.postindex: address += i.offset
data1 = fmap(i.t)
data2 = fmap(i.t2)
fmap[mem(address,i.datasize)] = data1
fmap[mem(address,i.datasize,disp=i.datasize/8)] = data2
if i.wback:
if i.postindex: address += i.offset
fmap[i.n] = fmap(address)
i_LDNP = i_LDP
i_STNP = i_STP
def i_LDPSW(i,fmap):
fmap[pc] = fmap[pc]+i.length
address = i.n
if not i.postindex: address += i.offset
data1 = mem(address,i.datasize)
data2 = mem(address,i.datasize, disp=i.datasize/8)
fmap[i.t] = fmap(data1).signextend(64)
fmap[i.t2] = fmap(data2).signextend(64)
if i.wback:
if i.postindex: address += i.offset
fmap[i.n] = fmap(address)
def i_LDR(i,fmap):
if len(i.operands)==3:
fmap[pc] = fmap[pc]+i.length
Xt, Xn, offset = i.operands
address = Xn
if not i.postindex: address += offset
data = mem(address,i.datasize)
if i.signed:
fmap[Xt] = data.signextend(i.regsize)
else:
fmap[Xt] = data.zeroextend(i.regsize)
if i.wback:
if i.postindex: address += offset
fmap[Xn] = fmap(address)
else:# literal case:
Xt, offset = i.operands
address = fmap[pc] + offset
fmap[pc] = fmap[pc]+i.length
data = mem(address,i.size)
if i.signed:
fmap[Xt] = fmap(data.signextend(64))
else:
fmap[Xt] = fmap(data.zeroextend(64))
i_LDRB = i_LDR
i_LDRH = i_LDR
i_LDRSB = i_LDR
i_LDRSH = i_LDR
i_LDRSW = i_LDR
i_LDTR = i_LDR
i_LDTRB = i_LDR
i_LDTRH = i_LDR
i_LDTRSB = i_LDR
i_LDTRSH = i_LDR
i_LDTRSW = i_LDR
i_LDUR = i_LDR
i_LDURB = i_LDR
i_LDURH = i_LDR
i_LDURSB = i_LDR
i_LDURSH = i_LDR
i_LDURSW = i_LDR
def i_LSLV(i,fmap):
fmap[pc] = fmap[pc]+i.length
dst, op1, op2 = i.operands
op1.sf=False
fmap[dst] = fmap(op1<<op2)
def i_LSRV(i,fmap):
fmap[pc] = fmap[pc]+i.length
dst, op1, op2 = i.operands
op1.sf=False
fmap[dst] = fmap(op1>>op2)
def i_MADD(i,fmap):
fmap[pc] = fmap[pc]+i.length
fmap[i.d] = fmap(i.a + i.r*i.m)
def i_MSUB(i,fmap):
fmap[pc] = fmap[pc]+i.length
fmap[i.d] = fmap(i.a - i.r*i.m)
def i_MOVK(i,fmap):
fmap[pc] = fmap[pc]+i.length
result = fmap(i.d)
result[0:16] = i.imm
fmap[i.d] = result
def i_MOVZ(i,fmap):
fmap[pc] = fmap[pc]+i.length
result = cst(0,i.d.size)
result[0:16] = i.imm
fmap[i.d] = result
def i_MOVN(i,fmap):
fmap[pc] = fmap[pc]+i.length
result = cst(0,i.d.size)
result[0:16] = i.imm
fmap[i.d] = ~result
def i_MRS(i,fmap):
fmap[pc] = fmap[pc]+i.length
logger.warning('semantic undefined for %s'%i.mnemonic)
def i_MSR(i,fmap):
fmap[pc] = fmap[pc]+i.length
pstatefield, op2 = i.operands
fmap[pstatefield] = op2[0:pstatefield.size]
def i_PRFM(i,fmap):
fmap[pc] = fmap[pc]+i.length
logger.warning('semantic undefined for %s'%i.mnemonic)
def i_RBIT(i,fmap):
fmap[pc] = fmap[pc]+i.length
fmap[i.d] = top(i.datasize)
logger.warning('semantic undefined for %s'%i.mnemonic)
def i_REV16(i,fmap):
fmap[pc] = fmap[pc]+i.length
fmap[i.d] = top(i.datasize)
logger.warning('semantic undefined for %s'%i.mnemonic)
def i_REV32(i,fmap):
fmap[pc] = fmap[pc]+i.length
fmap[i.d] = top(i.datasize)
logger.warning('semantic undefined for %s'%i.mnemonic)
def i_REV(i,fmap):
fmap[pc] = fmap[pc]+i.length
fmap[i.d] = top(i.datasize)
logger.warning('semantic undefined for %s'%i.mnemonic)
def i_RET(i,fmap):
fmap[pc] = fmap(i.n)
def i_RORV(i,fmap):
fmap[pc] = fmap[pc]+i.length
fmap[i.d] = ROR(fmap(i.n),fmap(i.m))
def i_SDIV(i,fmap):
fmap[pc] = fmap[pc]+i.length
op1,op2 = fmap(i.n),fmap(i.m)
op1.sf = op2.sf = True
fmap[i.d] = op1/op2
def i_UDIV(i,fmap):
fmap[pc] = fmap[pc]+i.length
op1,op2 = fmap(i.n),fmap(i.m)
op1.sf = op2.sf = False
fmap[i.d] = op1/op2
def i_SMADDL(i,fmap):
fmap[pc] = fmap[pc]+i.length
_x = fmap(i.a + (i.n**i.m))
_x.sf = True
fmap[i.d] = _x
def i_SMSUBL(i,fmap):
fmap[pc] = fmap[pc]+i.length
_x = fmap(i.a - (i.n**i.m))
_x.sf = True
fmap[i.d] = _x
def i_UMADDL(i,fmap):
fmap[pc] = fmap[pc]+i.length
_x = fmap(i.a + (i.n**i.m))
_x.sf = False
fmap[i.d] = _x
def i_UMSUBL(i,fmap):
fmap[pc] = fmap[pc]+i.length
_x = fmap(i.a - (i.n**i.m))
_x.sf = False
fmap[i.d] = _x
def i_SMULH(i,fmap):
fmap[pc] = fmap[pc]+i.length
result = fmap(i.n**i.m)
result.sf = True
fmap[i.d] = result[64:128]
def i_UMULH(i,fmap):
fmap[pc] = fmap[pc]+i.length
result = fmap(i.n**i.m)
result.sf = False
fmap[i.d] = result[64:128]
def i_STR(i,fmap):
if len(i.operands)==3:
fmap[pc] = fmap[pc]+i.length
Xt, Xn, offset = i.operands
address = Xn
if not i.postindex: address += offset
dst = mem(address,i.datasize)
data = fmap(Xt)
fmap[dst] = data[0:i.datasize]
if i.wback:
if i.postindex: address += offset
fmap[Xn] = fmap(address)
i_STRB = i_STR
i_STRH = i_STR
i_STTR = i_STR
i_STTRB = i_STR
i_STTRH = i_STR
i_STUR = i_STR
i_STURB = i_STR
i_STURH = i_STR
def i_SMC(i,fmap):
fmap[pc] = fmap[pc]+i.length
ext('EXCEPTION.EL3 %s'%i.imm,size=pc.size).call(fmap)
def i_SVC(i,fmap):
fmap[pc] = fmap[pc]+i.length
ext('EXCEPTION.EL1 %s'%i.imm,size=pc.size).call(fmap)
def i_SYS(i,fmap):
fmap[pc] = fmap[pc]+i.length
logger.warning('semantic undefined for %s'%i.mnemonic)
def i_SYSL(i,fmap):
fmap[pc] = fmap[pc]+i.length
fmap[i.t] = top(i.t.size)
logger.warning('semantic undefined for %s'%i.mnemonic)
def i_TBNZ(i,fmap):
op = fmap(i.t)
fmap[pc] = tst(op[i.bitpos:i.bitpos+1]==1, fmap[pc]+i.offset, fmap[pc]+i.length)
def i_TBZ(i,fmap):
op = fmap(i.t)
fmap[pc] = tst(op[i.bitpos:i.bitpos+1]==0, fmap[pc]+i.offset, fmap[pc]+i.length)
| gpl-2.0 |
akirk/youtube-dl | youtube_dl/extractor/folketinget.py | 92 | 2651 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_parse_qs
from ..utils import (
int_or_none,
parse_duration,
parse_iso8601,
xpath_text,
)
class FolketingetIE(InfoExtractor):
IE_DESC = 'Folketinget (ft.dk; Danish parliament)'
_VALID_URL = r'https?://(?:www\.)?ft\.dk/webtv/video/[^?#]*?\.(?P<id>[0-9]+)\.aspx'
_TEST = {
'url': 'http://www.ft.dk/webtv/video/20141/eru/td.1165642.aspx?as=1#player',
'md5': '6269e8626fa1a891bf5369b386ae996a',
'info_dict': {
'id': '1165642',
'ext': 'mp4',
'title': 'Åbent samråd i Erhvervsudvalget',
'description': 'Åbent samråd med erhvervs- og vækstministeren om regeringens politik på teleområdet',
'view_count': int,
'width': 768,
'height': 432,
'tbr': 928000,
'timestamp': 1416493800,
'upload_date': '20141120',
'duration': 3960,
},
'params': {
# rtmp download
'skip_download': True,
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._og_search_title(webpage)
description = self._html_search_regex(
r'(?s)<div class="video-item-agenda"[^>]*>(.*?)<',
webpage, 'description', fatal=False)
player_params = compat_parse_qs(self._search_regex(
r'<embed src="http://ft\.arkena\.tv/flash/ftplayer\.swf\?([^"]+)"',
webpage, 'player params'))
xml_url = player_params['xml'][0]
doc = self._download_xml(xml_url, video_id)
timestamp = parse_iso8601(xpath_text(doc, './/date'))
duration = parse_duration(xpath_text(doc, './/duration'))
width = int_or_none(xpath_text(doc, './/width'))
height = int_or_none(xpath_text(doc, './/height'))
view_count = int_or_none(xpath_text(doc, './/views'))
formats = [{
'format_id': n.attrib['bitrate'],
'url': xpath_text(n, './url', fatal=True),
'tbr': int_or_none(n.attrib['bitrate']),
} for n in doc.findall('.//streams/stream')]
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'formats': formats,
'description': description,
'timestamp': timestamp,
'width': width,
'height': height,
'duration': duration,
'view_count': view_count,
}
| unlicense |
leemac/JellyfishRss | rss/migrations/0001_initial.py | 1 | 8815 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Folder'
db.create_table(u'rss_folder', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('title', self.gf('django.db.models.fields.TextField')()),
('color', self.gf('django.db.models.fields.TextField')(max_length=20, blank=True)),
))
db.send_create_signal(u'rss', ['Folder'])
# Adding model 'Subscription'
db.create_table(u'rss_subscription', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('last_crawled', self.gf('django.db.models.fields.CharField')(max_length=200)),
('url', self.gf('django.db.models.fields.TextField')()),
('site_url', self.gf('django.db.models.fields.TextField')()),
('title', self.gf('django.db.models.fields.TextField')()),
('favicon_url', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal(u'rss', ['Subscription'])
# Adding model 'SubscriptionUserRelation'
db.create_table(u'rss_subscriptionuserrelation', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('subscription', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['rss.Subscription'])),
('folder', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['rss.Folder'])),
))
db.send_create_signal(u'rss', ['SubscriptionUserRelation'])
# Adding model 'SubscriptionItem'
db.create_table(u'rss_subscriptionitem', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('content', self.gf('django.db.models.fields.TextField')()),
('published', self.gf('django.db.models.fields.DateTimeField')()),
('title', self.gf('django.db.models.fields.TextField')()),
('url', self.gf('django.db.models.fields.TextField')()),
('is_read', self.gf('django.db.models.fields.BooleanField')(default=False)),
('is_favorite', self.gf('django.db.models.fields.BooleanField')(default=False)),
('subscription', self.gf('django.db.models.fields.related.ForeignKey')(related_name='item', to=orm['rss.Subscription'])),
))
db.send_create_signal(u'rss', ['SubscriptionItem'])
def backwards(self, orm):
# Deleting model 'Folder'
db.delete_table(u'rss_folder')
# Deleting model 'Subscription'
db.delete_table(u'rss_subscription')
# Deleting model 'SubscriptionUserRelation'
db.delete_table(u'rss_subscriptionuserrelation')
# Deleting model 'SubscriptionItem'
db.delete_table(u'rss_subscriptionitem')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'rss.folder': {
'Meta': {'object_name': 'Folder'},
'color': ('django.db.models.fields.TextField', [], {'max_length': '20', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.TextField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'rss.subscription': {
'Meta': {'object_name': 'Subscription'},
'color': ('django.db.models.fields.TextField', [], {'max_length': '20', 'blank': 'True'}),
'favicon_url': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_crawled': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'site_url': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.TextField', [], {}),
'url': ('django.db.models.fields.TextField', [], {})
},
u'rss.subscriptionitem': {
'Meta': {'object_name': 'SubscriptionItem'},
'content': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_favorite': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'published': ('django.db.models.fields.DateTimeField', [], {}),
'subscription': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'item'", 'to': u"orm['rss.Subscription']"}),
'title': ('django.db.models.fields.TextField', [], {}),
'url': ('django.db.models.fields.TextField', [], {})
},
u'rss.subscriptionuserrelation': {
'Meta': {'object_name': 'SubscriptionUserRelation'},
'folder': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rss.Folder']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'subscription': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rss.Subscription']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
}
}
complete_apps = ['rss'] | mit |
evilpie/servo | tests/wpt/css-tests/tools/html5lib/html5lib/treebuilders/dom.py | 920 | 8469 | from __future__ import absolute_import, division, unicode_literals
from xml.dom import minidom, Node
import weakref
from . import _base
from .. import constants
from ..constants import namespaces
from ..utils import moduleFactoryFactory
def getDomBuilder(DomImplementation):
Dom = DomImplementation
class AttrList(object):
def __init__(self, element):
self.element = element
def __iter__(self):
return list(self.element.attributes.items()).__iter__()
def __setitem__(self, name, value):
self.element.setAttribute(name, value)
def __len__(self):
return len(list(self.element.attributes.items()))
def items(self):
return [(item[0], item[1]) for item in
list(self.element.attributes.items())]
def keys(self):
return list(self.element.attributes.keys())
def __getitem__(self, name):
return self.element.getAttribute(name)
def __contains__(self, name):
if isinstance(name, tuple):
raise NotImplementedError
else:
return self.element.hasAttribute(name)
class NodeBuilder(_base.Node):
def __init__(self, element):
_base.Node.__init__(self, element.nodeName)
self.element = element
namespace = property(lambda self: hasattr(self.element, "namespaceURI")
and self.element.namespaceURI or None)
def appendChild(self, node):
node.parent = self
self.element.appendChild(node.element)
def insertText(self, data, insertBefore=None):
text = self.element.ownerDocument.createTextNode(data)
if insertBefore:
self.element.insertBefore(text, insertBefore.element)
else:
self.element.appendChild(text)
def insertBefore(self, node, refNode):
self.element.insertBefore(node.element, refNode.element)
node.parent = self
def removeChild(self, node):
if node.element.parentNode == self.element:
self.element.removeChild(node.element)
node.parent = None
def reparentChildren(self, newParent):
while self.element.hasChildNodes():
child = self.element.firstChild
self.element.removeChild(child)
newParent.element.appendChild(child)
self.childNodes = []
def getAttributes(self):
return AttrList(self.element)
def setAttributes(self, attributes):
if attributes:
for name, value in list(attributes.items()):
if isinstance(name, tuple):
if name[0] is not None:
qualifiedName = (name[0] + ":" + name[1])
else:
qualifiedName = name[1]
self.element.setAttributeNS(name[2], qualifiedName,
value)
else:
self.element.setAttribute(
name, value)
attributes = property(getAttributes, setAttributes)
def cloneNode(self):
return NodeBuilder(self.element.cloneNode(False))
def hasContent(self):
return self.element.hasChildNodes()
def getNameTuple(self):
if self.namespace is None:
return namespaces["html"], self.name
else:
return self.namespace, self.name
nameTuple = property(getNameTuple)
class TreeBuilder(_base.TreeBuilder):
def documentClass(self):
self.dom = Dom.getDOMImplementation().createDocument(None, None, None)
return weakref.proxy(self)
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
domimpl = Dom.getDOMImplementation()
doctype = domimpl.createDocumentType(name, publicId, systemId)
self.document.appendChild(NodeBuilder(doctype))
if Dom == minidom:
doctype.ownerDocument = self.dom
def elementClass(self, name, namespace=None):
if namespace is None and self.defaultNamespace is None:
node = self.dom.createElement(name)
else:
node = self.dom.createElementNS(namespace, name)
return NodeBuilder(node)
def commentClass(self, data):
return NodeBuilder(self.dom.createComment(data))
def fragmentClass(self):
return NodeBuilder(self.dom.createDocumentFragment())
def appendChild(self, node):
self.dom.appendChild(node.element)
def testSerializer(self, element):
return testSerializer(element)
def getDocument(self):
return self.dom
def getFragment(self):
return _base.TreeBuilder.getFragment(self).element
def insertText(self, data, parent=None):
data = data
if parent != self:
_base.TreeBuilder.insertText(self, data, parent)
else:
# HACK: allow text nodes as children of the document node
if hasattr(self.dom, '_child_node_types'):
if Node.TEXT_NODE not in self.dom._child_node_types:
self.dom._child_node_types = list(self.dom._child_node_types)
self.dom._child_node_types.append(Node.TEXT_NODE)
self.dom.appendChild(self.dom.createTextNode(data))
implementation = DomImplementation
name = None
def testSerializer(element):
element.normalize()
rv = []
def serializeElement(element, indent=0):
if element.nodeType == Node.DOCUMENT_TYPE_NODE:
if element.name:
if element.publicId or element.systemId:
publicId = element.publicId or ""
systemId = element.systemId or ""
rv.append("""|%s<!DOCTYPE %s "%s" "%s">""" %
(' ' * indent, element.name, publicId, systemId))
else:
rv.append("|%s<!DOCTYPE %s>" % (' ' * indent, element.name))
else:
rv.append("|%s<!DOCTYPE >" % (' ' * indent,))
elif element.nodeType == Node.DOCUMENT_NODE:
rv.append("#document")
elif element.nodeType == Node.DOCUMENT_FRAGMENT_NODE:
rv.append("#document-fragment")
elif element.nodeType == Node.COMMENT_NODE:
rv.append("|%s<!-- %s -->" % (' ' * indent, element.nodeValue))
elif element.nodeType == Node.TEXT_NODE:
rv.append("|%s\"%s\"" % (' ' * indent, element.nodeValue))
else:
if (hasattr(element, "namespaceURI") and
element.namespaceURI is not None):
name = "%s %s" % (constants.prefixes[element.namespaceURI],
element.nodeName)
else:
name = element.nodeName
rv.append("|%s<%s>" % (' ' * indent, name))
if element.hasAttributes():
attributes = []
for i in range(len(element.attributes)):
attr = element.attributes.item(i)
name = attr.nodeName
value = attr.value
ns = attr.namespaceURI
if ns:
name = "%s %s" % (constants.prefixes[ns], attr.localName)
else:
name = attr.nodeName
attributes.append((name, value))
for name, value in sorted(attributes):
rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value))
indent += 2
for child in element.childNodes:
serializeElement(child, indent)
serializeElement(element, 0)
return "\n".join(rv)
return locals()
# The actual means to get a module!
getDomModule = moduleFactoryFactory(getDomBuilder)
| mpl-2.0 |
guarddogofww/cs108test | src/jarabe/model/shell.py | 3 | 28033 | # Copyright (C) 2006-2007 Owen Williams.
# Copyright (C) 2006-2008 Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import time
from gi.repository import Gio
from gi.repository import Wnck
from gi.repository import GObject
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import GdkX11
from gi.repository import GLib
import dbus
from sugar3 import dispatch
from sugar3 import profile
from gi.repository import SugarExt
from jarabe.model.bundleregistry import get_registry
_SERVICE_NAME = 'org.laptop.Activity'
_SERVICE_PATH = '/org/laptop/Activity'
_SERVICE_INTERFACE = 'org.laptop.Activity'
_model = None
class Activity(GObject.GObject):
"""Activity which appears in the "Home View" of the Sugar shell
This class stores the Sugar Shell's metadata regarding a
given activity/application in the system. It interacts with
the sugar3.activity.* modules extensively in order to
accomplish its tasks.
"""
__gtype_name__ = 'SugarHomeActivity'
__gsignals__ = {
'pause': (GObject.SignalFlags.RUN_FIRST, None, ([])),
'resume': (GObject.SignalFlags.RUN_FIRST, None, ([])),
'stop': (GObject.SignalFlags.RUN_LAST, GObject.TYPE_BOOLEAN, ([])),
}
LAUNCHING = 0
LAUNCH_FAILED = 1
LAUNCHED = 2
def __init__(self, activity_info, activity_id, color, window=None):
"""Initialise the HomeActivity
activity_info -- sugar3.activity.registry.ActivityInfo instance,
provides the information required to actually
create the new instance. This is, in effect,
the "type" of activity being created.
activity_id -- unique identifier for this instance
of the activity type
_windows -- WnckWindows registered for the activity. The lowest
one in the stack is the main window.
"""
GObject.GObject.__init__(self)
self._windows = []
self._service = None
self._shell_windows = []
self._activity_id = activity_id
self._activity_info = activity_info
self._launch_time = time.time()
self._launch_status = Activity.LAUNCHING
if color is not None:
self._color = color
else:
self._color = profile.get_color()
if window is not None:
self.add_window(window)
self._retrieve_service()
self._name_owner_changed_handler = None
if not self._service:
bus = dbus.SessionBus()
self._name_owner_changed_handler = bus.add_signal_receiver(
self._name_owner_changed_cb,
signal_name='NameOwnerChanged',
dbus_interface='org.freedesktop.DBus')
self._launch_completed_hid = \
get_model().connect('launch-completed',
self.__launch_completed_cb)
self._launch_failed_hid = get_model().connect('launch-failed',
self.__launch_failed_cb)
def get_launch_status(self):
return self._launch_status
launch_status = GObject.property(getter=get_launch_status)
def add_window(self, window, is_main_window=False):
"""Add a window to the windows stack."""
if not window:
raise ValueError('window must be valid')
self._windows.append(window)
if is_main_window:
window.connect('state-changed', self._state_changed_cb)
def push_shell_window(self, window):
"""Attach a shell run window (eg. view source) to the activity."""
self._shell_windows.append(window)
def pop_shell_window(self, window):
"""
Detach a shell run window (eg. view source) to the activity.
Only call this on **user initiated** deletion (loop issue).
"""
self._shell_windows.remove(window)
def has_shell_window(self):
return bool(self._shell_windows)
def stop(self):
# For web activities the Apisocket will connect to the 'stop'
# signal, thus preventing the window close. Then, on the
# 'activity.close' method, it will call close_window()
# directly.
close_window = not self.emit('stop')
if close_window:
self.close_window()
def close_window(self):
if self.get_window() is not None:
self.get_window().close(GLib.get_current_time())
for w in self._shell_windows:
w.destroy()
def remove_window_by_xid(self, xid):
"""Remove a window from the windows stack."""
for wnd in self._windows:
if wnd.get_xid() == xid:
self._windows.remove(wnd)
return True
return False
def get_service(self):
"""Get the activity service
Note that non-native Sugar applications will not have
such a service, so the return value will be None in
those cases.
"""
return self._service
def get_title(self):
"""Retrieve the application's root window's suggested title"""
if self._windows:
return self._windows[0].get_name()
else:
return None
def get_icon_path(self):
"""Retrieve the activity's icon (file) name"""
if self.is_journal():
icon_theme = Gtk.IconTheme.get_default()
info = icon_theme.lookup_icon('activity-journal',
Gtk.IconSize.SMALL_TOOLBAR, 0)
if not info:
return None
fname = info.get_filename()
del info
return fname
elif self._activity_info:
return self._activity_info.get_icon()
else:
return None
def get_icon_color(self):
"""Retrieve the appropriate icon colour for this activity
Uses activity_id to index into the PresenceService's
set of activity colours, if the PresenceService does not
have an entry (implying that this is not a Sugar-shared application)
uses the local user's profile colour for the icon.
"""
return self._color
def get_activity_id(self):
"""Retrieve the "activity_id" passed in to our constructor
This is a "globally likely unique" identifier generated by
sugar3.util.unique_id
"""
return self._activity_id
def get_bundle_id(self):
""" Returns the activity's bundle id"""
if self._activity_info is None:
return None
else:
return self._activity_info.get_bundle_id()
def get_xid(self):
"""Retrieve the X-windows ID of our root window"""
if self._windows:
return self._windows[0].get_xid()
else:
return None
def has_xid(self, xid):
"""Check if an X-window with the given xid is in the windows stack"""
if self._windows:
for wnd in self._windows:
if wnd.get_xid() == xid:
return True
return False
def get_window(self):
"""Retrieve the X-windows root window of this application
This was stored by the add_window method, which was
called by HomeModel._add_activity, which was called
via a callback that looks for all 'window-opened'
events.
We keep a stack of the windows. The lowest window in the
stack that is still valid we consider the main one.
HomeModel currently uses a dbus service query on the
activity to determine to which HomeActivity the newly
launched window belongs.
"""
if self._windows:
return self._windows[0]
return None
def get_type(self):
"""Retrieve the activity bundle id for future reference"""
if not self._windows:
return None
else:
return SugarExt.wm_get_bundle_id(self._windows[0].get_xid())
def is_journal(self):
"""Returns boolean if the activity is of type JournalActivity"""
return self.get_type() == 'org.laptop.JournalActivity'
def get_launch_time(self):
"""Return the time at which the activity was first launched
Format is floating-point time.time() value
(seconds since the epoch)
"""
return self._launch_time
def get_pid(self):
"""Returns the activity's PID"""
if not self._windows:
return None
return self._windows[0].get_pid()
def get_bundle_path(self):
"""Returns the activity's bundle directory"""
if self._activity_info is None:
return None
else:
return self._activity_info.get_path()
def get_activity_name(self):
"""Returns the activity's bundle name"""
if self._activity_info is None:
return None
else:
return self._activity_info.get_name()
def equals(self, activity):
if self._activity_id and activity.get_activity_id():
return self._activity_id == activity.get_activity_id()
if self._windows[0].get_xid() and activity.get_xid():
return self._windows[0].get_xid() == activity.get_xid()
return False
def _get_service_name(self):
if self._activity_id:
return _SERVICE_NAME + self._activity_id
else:
return None
def _retrieve_service(self):
if not self._activity_id:
return
try:
bus = dbus.SessionBus()
proxy = bus.get_object(self._get_service_name(),
_SERVICE_PATH + '/' + self._activity_id)
self._service = dbus.Interface(proxy, _SERVICE_INTERFACE)
except dbus.DBusException:
self._service = None
def _name_owner_changed_cb(self, name, old, new):
if name == self._get_service_name():
if old and not new:
logging.debug('Activity._name_owner_changed_cb: '
'activity %s went away', name)
self._name_owner_changed_handler.remove()
self._name_owner_changed_handler = None
self._service = None
elif not old and new:
logging.debug('Activity._name_owner_changed_cb: '
'activity %s started up', name)
self._retrieve_service()
self.set_active(True)
def set_active(self, state):
"""Propagate the current state to the activity object"""
if self._service is not None:
self._service.SetActive(state,
reply_handler=self._set_active_success,
error_handler=self._set_active_error)
def _set_active_success(self):
pass
def _set_active_error(self, err):
logging.error('set_active() failed: %s', err)
def _set_launch_status(self, value):
get_model().disconnect(self._launch_completed_hid)
get_model().disconnect(self._launch_failed_hid)
self._launch_completed_hid = None
self._launch_failed_hid = None
self._launch_status = value
self.notify('launch_status')
def __launch_completed_cb(self, model, home_activity):
if home_activity is self:
self._set_launch_status(Activity.LAUNCHED)
def __launch_failed_cb(self, model, home_activity):
if home_activity is self:
self._set_launch_status(Activity.LAUNCH_FAILED)
def _state_changed_cb(self, main_window, changed_mask, new_state):
if changed_mask & Wnck.WindowState.MINIMIZED:
if new_state & Wnck.WindowState.MINIMIZED:
self.emit('pause')
else:
self.emit('resume')
class ShellModel(GObject.GObject):
"""Model of the shell (activity management)
The ShellModel is basically the point of registration
for all running activities within Sugar. It traps
events that tell the system there is a new activity
being created (generated by the activity factories),
or removed, as well as those which tell us that the
currently focussed activity has changed.
The HomeModel tracks a set of HomeActivity instances,
which are tracking the window to activity mappings
the activity factories have set up.
"""
__gsignals__ = {
'activity-added': (GObject.SignalFlags.RUN_FIRST, None,
([GObject.TYPE_PYOBJECT])),
'activity-removed': (GObject.SignalFlags.RUN_FIRST, None,
([GObject.TYPE_PYOBJECT])),
'active-activity-changed': (GObject.SignalFlags.RUN_FIRST,
None,
([GObject.TYPE_PYOBJECT])),
'tabbing-activity-changed': (GObject.SignalFlags.RUN_FIRST,
None,
([GObject.TYPE_PYOBJECT])),
'launch-started': (GObject.SignalFlags.RUN_FIRST, None,
([GObject.TYPE_PYOBJECT])),
'launch-completed': (GObject.SignalFlags.RUN_FIRST, None,
([GObject.TYPE_PYOBJECT])),
'launch-failed': (GObject.SignalFlags.RUN_FIRST, None,
([GObject.TYPE_PYOBJECT])),
}
ZOOM_MESH = 0
ZOOM_GROUP = 1
ZOOM_HOME = 2
ZOOM_ACTIVITY = 3
def __init__(self):
GObject.GObject.__init__(self)
self._screen = Wnck.Screen.get_default()
self._screen.connect('window-opened', self._window_opened_cb)
self._screen.connect('window-closed', self._window_closed_cb)
self._screen.connect('active-window-changed',
self._active_window_changed_cb)
self.zoom_level_changed = dispatch.Signal()
self._desktop_level = self.ZOOM_HOME
self._zoom_level = self.ZOOM_HOME
self._current_activity = None
self._activities = []
self._shared_activities = {}
self._active_activity = None
self._tabbing_activity = None
self._launchers = {}
self._modal_dialogs_counter = 0
self._screen.toggle_showing_desktop(True)
settings = Gio.Settings('org.sugarlabs')
self._maximum_open_activities = settings.get_int(
'maximum-number-of-open-activities')
self._launch_timers = {}
def get_launcher(self, activity_id):
return self._launchers.get(str(activity_id))
def register_launcher(self, activity_id, launcher):
self._launchers[activity_id] = launcher
def unregister_launcher(self, activity_id):
if activity_id in self._launchers:
del self._launchers[activity_id]
def _update_zoom_level(self, window):
if window.get_window_type() == Wnck.WindowType.DIALOG:
return
elif window.get_window_type() == Wnck.WindowType.NORMAL:
new_level = self.ZOOM_ACTIVITY
else:
new_level = self._desktop_level
if self._zoom_level != new_level:
old_level = self._zoom_level
self._zoom_level = new_level
self.zoom_level_changed.send(self, old_level=old_level,
new_level=new_level)
def set_zoom_level(self, new_level, x_event_time=0):
old_level = self.zoom_level
if old_level == new_level:
return
self._zoom_level = new_level
if new_level is not self.ZOOM_ACTIVITY:
self._desktop_level = new_level
self.zoom_level_changed.send(self, old_level=old_level,
new_level=new_level)
show_desktop = new_level is not self.ZOOM_ACTIVITY
self._screen.toggle_showing_desktop(show_desktop)
if new_level is self.ZOOM_ACTIVITY:
# activate the window, in case it was iconified
# (e.g. during sugar launch, the Journal starts in this state)
window = self._active_activity.get_window()
if window:
window.activate(x_event_time or Gtk.get_current_event_time())
def _get_zoom_level(self):
return self._zoom_level
zoom_level = property(_get_zoom_level)
def _get_activities_with_window(self):
ret = []
for i in self._activities:
if i.get_window() is not None:
ret.append(i)
return ret
def get_previous_activity(self, current=None):
if not current:
current = self._active_activity
activities = self._get_activities_with_window()
i = activities.index(current)
if len(activities) == 0:
return None
elif i - 1 >= 0:
return activities[i - 1]
else:
return activities[len(activities) - 1]
def get_next_activity(self, current=None):
if not current:
current = self._active_activity
activities = self._get_activities_with_window()
i = activities.index(current)
if len(activities) == 0:
return None
elif i + 1 < len(activities):
return activities[i + 1]
else:
return activities[0]
def get_active_activity(self):
"""Returns the activity that the user is currently working in"""
return self._active_activity
def add_shared_activity(self, activity_id, color):
self._shared_activities[activity_id] = color
def remove_shared_activity(self, activity_id):
del self._shared_activities[activity_id]
def get_tabbing_activity(self):
"""Returns the activity that is currently highlighted during tabbing"""
return self._tabbing_activity
def set_tabbing_activity(self, activity):
"""Sets the activity that is currently highlighted during tabbing"""
self._tabbing_activity = activity
self.emit('tabbing-activity-changed', self._tabbing_activity)
def _set_active_activity(self, home_activity):
if self._active_activity == home_activity:
return
if home_activity:
home_activity.set_active(True)
if self._active_activity:
self._active_activity.set_active(False)
self._active_activity = home_activity
self.emit('active-activity-changed', self._active_activity)
def __iter__(self):
return iter(self._activities)
def __len__(self):
return len(self._activities)
def __getitem__(self, i):
return self._activities[i]
def index(self, obj):
return self._activities.index(obj)
def _window_opened_cb(self, screen, window):
"""Handle the callback for the 'window opened' event.
Most activities will register 2 windows during
their lifetime: the launcher window, and the 'main'
app window.
When the main window appears, we send a signal to
the launcher window to close.
Some activities (notably non-native apps) open several
windows during their lifetime, switching from one to
the next as the 'main' window. We use a stack to track
them.
"""
if window.get_window_type() == Wnck.WindowType.NORMAL or \
window.get_window_type() == Wnck.WindowType.SPLASHSCREEN:
home_activity = None
xid = window.get_xid()
activity_id = SugarExt.wm_get_activity_id(xid)
service_name = SugarExt.wm_get_bundle_id(xid)
if service_name:
registry = get_registry()
activity_info = registry.get_bundle(service_name)
else:
activity_info = None
if activity_id:
home_activity = self.get_activity_by_id(activity_id)
display = Gdk.Display.get_default()
gdk_window = GdkX11.X11Window.foreign_new_for_display(display,
xid)
gdk_window.set_decorations(0)
window.maximize()
def is_main_window(window, home_activity):
# Check if window is the 'main' app window, not the
# launcher window.
return window.get_window_type() != \
Wnck.WindowType.SPLASHSCREEN and \
home_activity.get_launch_status() == Activity.LAUNCHING
if home_activity is None and \
window.get_window_type() == Wnck.WindowType.NORMAL:
# This is a special case for the Journal
# We check if is not a splash screen to avoid #4767
logging.debug('first window registered for %s', activity_id)
color = self._shared_activities.get(activity_id, None)
home_activity = Activity(activity_info, activity_id,
color, window)
self._add_activity(home_activity)
else:
logging.debug('window registered for %s', activity_id)
home_activity.add_window(window, is_main_window(window,
home_activity))
if is_main_window(window, home_activity):
self.emit('launch-completed', home_activity)
startup_time = time.time() - home_activity.get_launch_time()
logging.debug('%s launched in %f seconds.',
activity_id, startup_time)
if self._active_activity is None:
self._set_active_activity(home_activity)
def _window_closed_cb(self, screen, window):
if window.get_window_type() == Wnck.WindowType.NORMAL or \
window.get_window_type() == Wnck.WindowType.SPLASHSCREEN:
xid = window.get_xid()
activity = self._get_activity_by_xid(xid)
if activity is not None:
activity.remove_window_by_xid(xid)
if activity.get_window() is None:
logging.debug('last window gone - remove activity %s',
activity)
activity.close_window()
self._remove_activity(activity)
def _get_activity_by_xid(self, xid):
for home_activity in self._activities:
if home_activity.has_xid(xid):
return home_activity
return None
def get_activity_by_id(self, activity_id):
for home_activity in self._activities:
if home_activity.get_activity_id() == activity_id:
return home_activity
return None
def _active_window_changed_cb(self, screen, previous_window=None):
window = screen.get_active_window()
if window is None:
return
if window.get_window_type() != Wnck.WindowType.DIALOG:
while window.get_transient() is not None:
window = window.get_transient()
act = self._get_activity_by_xid(window.get_xid())
if act is not None:
self._set_active_activity(act)
self._update_zoom_level(window)
def get_name_from_bundle_id(self, bundle_id):
for activity in self._get_activities_with_window():
if activity.get_bundle_id() == bundle_id:
return activity.get_activity_name()
return ''
def can_launch_activity_instance(self, bundle):
if bundle.get_single_instance():
bundle_id = bundle.get_bundle_id()
for activity in self._get_activities_with_window():
if activity.get_bundle_id() == bundle_id:
return False
return True
def can_launch_activity(self):
activities = self._get_activities_with_window()
if self._maximum_open_activities > 0 and \
len(activities) > self._maximum_open_activities:
return False
else:
return True
def _add_activity(self, home_activity):
self._activities.append(home_activity)
self.emit('activity-added', home_activity)
def _remove_activity(self, home_activity):
if home_activity == self._active_activity:
windows = Wnck.Screen.get_default().get_windows_stacked()
windows.reverse()
for window in windows:
new_activity = self._get_activity_by_xid(window.get_xid())
if new_activity is not None:
self._set_active_activity(new_activity)
break
else:
logging.error('No activities are running')
self._set_active_activity(None)
self.emit('activity-removed', home_activity)
self._activities.remove(home_activity)
def notify_launch(self, activity_id, service_name):
registry = get_registry()
activity_info = registry.get_bundle(service_name)
if not activity_info:
raise ValueError("Activity service name '%s'"
" was not found in the bundle registry."
% service_name)
color = self._shared_activities.get(activity_id, None)
home_activity = Activity(activity_info, activity_id, color)
self._add_activity(home_activity)
self._set_active_activity(home_activity)
self.emit('launch-started', home_activity)
if activity_id in self._launch_timers:
GObject.source_remove(self._launch_timers[activity_id])
del self._launch_timers[activity_id]
timer = GObject.timeout_add_seconds(90, self._check_activity_launched,
activity_id)
self._launch_timers[activity_id] = timer
def notify_launch_failed(self, activity_id):
home_activity = self.get_activity_by_id(activity_id)
if home_activity:
logging.debug('Activity %s (%s) launch failed', activity_id,
home_activity.get_type())
if self.get_launcher(activity_id) is not None:
self.emit('launch-failed', home_activity)
else:
# activity sent failure notification after closing launcher
self._remove_activity(home_activity)
else:
logging.error('Model for activity id %s does not exist.',
activity_id)
def _check_activity_launched(self, activity_id):
del self._launch_timers[activity_id]
home_activity = self.get_activity_by_id(activity_id)
if not home_activity:
logging.debug('Activity %s has been closed already.', activity_id)
return False
if self.get_launcher(activity_id) is not None:
logging.debug('Activity %s still launching, assuming it failed.',
activity_id)
self.notify_launch_failed(activity_id)
return False
def push_modal(self):
self._modal_dialogs_counter += 1
def pop_modal(self):
self._modal_dialogs_counter -= 1
def has_modal(self):
return self._modal_dialogs_counter > 0
def get_model():
global _model
if _model is None:
_model = ShellModel()
return _model
| gpl-3.0 |
cwisecarver/osf.io | addons/dropbox/migrations/0001_initial.py | 28 | 1508 | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-03-23 20:34
from __future__ import unicode_literals
from django.db import migrations, models
import osf.models.base
import osf.utils.datetime_aware_jsonfield
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='NodeSettings',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('_id', models.CharField(db_index=True, default=osf.models.base.generate_object_id, max_length=24, unique=True)),
('deleted', models.BooleanField(default=False)),
('folder', models.TextField(blank=True, null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='UserSettings',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('_id', models.CharField(db_index=True, default=osf.models.base.generate_object_id, max_length=24, unique=True)),
('deleted', models.BooleanField(default=False)),
('oauth_grants', osf.utils.datetime_aware_jsonfield.DateTimeAwareJSONField(blank=True, default=dict)),
],
options={
'abstract': False,
},
),
]
| apache-2.0 |
nuagenetworks/vspk-python | vspk/v5_0/numacfilterprofile.py | 1 | 7688 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUMetadatasFetcher
from .fetchers import NUGlobalMetadatasFetcher
from bambou import NURESTObject
class NUMACFilterProfile(NURESTObject):
""" Represents a MACFilterProfile in the VSD
Notes:
7x50 MAC Filter profile
"""
__rest_name__ = "macfilterprofile"
__resource_name__ = "macfilterprofiles"
## Constants
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
def __init__(self, **kwargs):
""" Initializes a MACFilterProfile instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> macfilterprofile = NUMACFilterProfile(id=u'xxxx-xxx-xxx-xxx', name=u'MACFilterProfile')
>>> macfilterprofile = NUMACFilterProfile(data=my_dict)
"""
super(NUMACFilterProfile, self).__init__()
# Read/Write Attributes
self._name = None
self._last_updated_by = None
self._description = None
self._entity_scope = None
self._assoc_entity_type = None
self._external_id = None
self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="assoc_entity_type", remote_name="assocEntityType", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
# Fetchers
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def name(self):
""" Get name value.
Notes:
A unique name of the MAC Profile entity.
"""
return self._name
@name.setter
def name(self, value):
""" Set name value.
Notes:
A unique name of the MAC Profile entity.
"""
self._name = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def description(self):
""" Get description value.
Notes:
A detailed description of the MAC Profile entity.
"""
return self._description
@description.setter
def description(self, value):
""" Set description value.
Notes:
A detailed description of the MAC Profile entity.
"""
self._description = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def assoc_entity_type(self):
""" Get assoc_entity_type value.
Notes:
Type of parent entity
This attribute is named `assocEntityType` in VSD API.
"""
return self._assoc_entity_type
@assoc_entity_type.setter
def assoc_entity_type(self, value):
""" Set assoc_entity_type value.
Notes:
Type of parent entity
This attribute is named `assocEntityType` in VSD API.
"""
self._assoc_entity_type = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
| bsd-3-clause |
teeple/pns_server | work/install/Python-2.7.4/Lib/lib2to3/fixes/fix_itertools.py | 148 | 1549 | """ Fixer for itertools.(imap|ifilter|izip) --> (map|filter|zip) and
itertools.ifilterfalse --> itertools.filterfalse (bugs 2360-2363)
imports from itertools are fixed in fix_itertools_import.py
If itertools is imported as something else (ie: import itertools as it;
it.izip(spam, eggs)) method calls will not get fixed.
"""
# Local imports
from .. import fixer_base
from ..fixer_util import Name
class FixItertools(fixer_base.BaseFix):
BM_compatible = True
it_funcs = "('imap'|'ifilter'|'izip'|'izip_longest'|'ifilterfalse')"
PATTERN = """
power< it='itertools'
trailer<
dot='.' func=%(it_funcs)s > trailer< '(' [any] ')' > >
|
power< func=%(it_funcs)s trailer< '(' [any] ')' > >
""" %(locals())
# Needs to be run after fix_(map|zip|filter)
run_order = 6
def transform(self, node, results):
prefix = None
func = results['func'][0]
if ('it' in results and
func.value not in (u'ifilterfalse', u'izip_longest')):
dot, it = (results['dot'], results['it'])
# Remove the 'itertools'
prefix = it.prefix
it.remove()
# Replace the node wich contains ('.', 'function') with the
# function (to be consistant with the second part of the pattern)
dot.remove()
func.parent.replace(func)
prefix = prefix or func.prefix
func.replace(Name(func.value[1:], prefix=prefix))
| gpl-2.0 |
benchisell/photostream-bc | flask/lib/python2.7/site-packages/migrate/versioning/schemadiff.py | 52 | 8741 | """
Schema differencing support.
"""
import logging
import sqlalchemy
from sqlalchemy.types import Float
log = logging.getLogger(__name__)
def getDiffOfModelAgainstDatabase(metadata, engine, excludeTables=None):
"""
Return differences of model against database.
:return: object which will evaluate to :keyword:`True` if there \
are differences else :keyword:`False`.
"""
db_metadata = sqlalchemy.MetaData(engine, reflect=True)
# sqlite will include a dynamically generated 'sqlite_sequence' table if
# there are autoincrement sequences in the database; this should not be
# compared.
if engine.dialect.name == 'sqlite':
if 'sqlite_sequence' in db_metadata.tables:
db_metadata.remove(db_metadata.tables['sqlite_sequence'])
return SchemaDiff(metadata, db_metadata,
labelA='model',
labelB='database',
excludeTables=excludeTables)
def getDiffOfModelAgainstModel(metadataA, metadataB, excludeTables=None):
"""
Return differences of model against another model.
:return: object which will evaluate to :keyword:`True` if there \
are differences else :keyword:`False`.
"""
return SchemaDiff(metadataA, metadataB, excludeTables)
class ColDiff(object):
"""
Container for differences in one :class:`~sqlalchemy.schema.Column`
between two :class:`~sqlalchemy.schema.Table` instances, ``A``
and ``B``.
.. attribute:: col_A
The :class:`~sqlalchemy.schema.Column` object for A.
.. attribute:: col_B
The :class:`~sqlalchemy.schema.Column` object for B.
.. attribute:: type_A
The most generic type of the :class:`~sqlalchemy.schema.Column`
object in A.
.. attribute:: type_B
The most generic type of the :class:`~sqlalchemy.schema.Column`
object in A.
"""
diff = False
def __init__(self,col_A,col_B):
self.col_A = col_A
self.col_B = col_B
self.type_A = col_A.type
self.type_B = col_B.type
self.affinity_A = self.type_A._type_affinity
self.affinity_B = self.type_B._type_affinity
if self.affinity_A is not self.affinity_B:
self.diff = True
return
if isinstance(self.type_A,Float) or isinstance(self.type_B,Float):
if not (isinstance(self.type_A,Float) and isinstance(self.type_B,Float)):
self.diff=True
return
for attr in ('precision','scale','length'):
A = getattr(self.type_A,attr,None)
B = getattr(self.type_B,attr,None)
if not (A is None or B is None) and A!=B:
self.diff=True
return
def __nonzero__(self):
return self.diff
class TableDiff(object):
"""
Container for differences in one :class:`~sqlalchemy.schema.Table`
between two :class:`~sqlalchemy.schema.MetaData` instances, ``A``
and ``B``.
.. attribute:: columns_missing_from_A
A sequence of column names that were found in B but weren't in
A.
.. attribute:: columns_missing_from_B
A sequence of column names that were found in A but weren't in
B.
.. attribute:: columns_different
A dictionary containing information about columns that were
found to be different.
It maps column names to a :class:`ColDiff` objects describing the
differences found.
"""
__slots__ = (
'columns_missing_from_A',
'columns_missing_from_B',
'columns_different',
)
def __nonzero__(self):
return bool(
self.columns_missing_from_A or
self.columns_missing_from_B or
self.columns_different
)
class SchemaDiff(object):
"""
Compute the difference between two :class:`~sqlalchemy.schema.MetaData`
objects.
The string representation of a :class:`SchemaDiff` will summarise
the changes found between the two
:class:`~sqlalchemy.schema.MetaData` objects.
The length of a :class:`SchemaDiff` will give the number of
changes found, enabling it to be used much like a boolean in
expressions.
:param metadataA:
First :class:`~sqlalchemy.schema.MetaData` to compare.
:param metadataB:
Second :class:`~sqlalchemy.schema.MetaData` to compare.
:param labelA:
The label to use in messages about the first
:class:`~sqlalchemy.schema.MetaData`.
:param labelB:
The label to use in messages about the second
:class:`~sqlalchemy.schema.MetaData`.
:param excludeTables:
A sequence of table names to exclude.
.. attribute:: tables_missing_from_A
A sequence of table names that were found in B but weren't in
A.
.. attribute:: tables_missing_from_B
A sequence of table names that were found in A but weren't in
B.
.. attribute:: tables_different
A dictionary containing information about tables that were found
to be different.
It maps table names to a :class:`TableDiff` objects describing the
differences found.
"""
def __init__(self,
metadataA, metadataB,
labelA='metadataA',
labelB='metadataB',
excludeTables=None):
self.metadataA, self.metadataB = metadataA, metadataB
self.labelA, self.labelB = labelA, labelB
self.label_width = max(len(labelA),len(labelB))
excludeTables = set(excludeTables or [])
A_table_names = set(metadataA.tables.keys())
B_table_names = set(metadataB.tables.keys())
self.tables_missing_from_A = sorted(
B_table_names - A_table_names - excludeTables
)
self.tables_missing_from_B = sorted(
A_table_names - B_table_names - excludeTables
)
self.tables_different = {}
for table_name in A_table_names.intersection(B_table_names):
td = TableDiff()
A_table = metadataA.tables[table_name]
B_table = metadataB.tables[table_name]
A_column_names = set(A_table.columns.keys())
B_column_names = set(B_table.columns.keys())
td.columns_missing_from_A = sorted(
B_column_names - A_column_names
)
td.columns_missing_from_B = sorted(
A_column_names - B_column_names
)
td.columns_different = {}
for col_name in A_column_names.intersection(B_column_names):
cd = ColDiff(
A_table.columns.get(col_name),
B_table.columns.get(col_name)
)
if cd:
td.columns_different[col_name]=cd
# XXX - index and constraint differences should
# be checked for here
if td:
self.tables_different[table_name]=td
def __str__(self):
''' Summarize differences. '''
out = []
column_template =' %%%is: %%r' % self.label_width
for names,label in (
(self.tables_missing_from_A,self.labelA),
(self.tables_missing_from_B,self.labelB),
):
if names:
out.append(
' tables missing from %s: %s' % (
label,', '.join(sorted(names))
)
)
for name,td in sorted(self.tables_different.items()):
out.append(
' table with differences: %s' % name
)
for names,label in (
(td.columns_missing_from_A,self.labelA),
(td.columns_missing_from_B,self.labelB),
):
if names:
out.append(
' %s missing these columns: %s' % (
label,', '.join(sorted(names))
)
)
for name,cd in td.columns_different.items():
out.append(' column with differences: %s' % name)
out.append(column_template % (self.labelA,cd.col_A))
out.append(column_template % (self.labelB,cd.col_B))
if out:
out.insert(0, 'Schema diffs:')
return '\n'.join(out)
else:
return 'No schema diffs'
def __len__(self):
"""
Used in bool evaluation, return of 0 means no diffs.
"""
return (
len(self.tables_missing_from_A) +
len(self.tables_missing_from_B) +
len(self.tables_different)
)
| bsd-3-clause |
bguillot/OpenUpgrade | addons/sale/sale.py | 17 | 67360 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime, timedelta
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT
import openerp.addons.decimal_precision as dp
from openerp import workflow
class sale_order(osv.osv):
_name = "sale.order"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_description = "Sales Order"
_track = {
'state': {
'sale.mt_order_confirmed': lambda self, cr, uid, obj, ctx=None: obj.state in ['manual'],
'sale.mt_order_sent': lambda self, cr, uid, obj, ctx=None: obj.state in ['sent']
},
}
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
default.update({
'date_order': fields.datetime.now(),
'state': 'draft',
'invoice_ids': [],
'date_confirm': False,
'client_order_ref': '',
'name': self.pool.get('ir.sequence').get(cr, uid, 'sale.order'),
'procurement_group_id': False,
})
return super(sale_order, self).copy(cr, uid, id, default, context=context)
def _amount_line_tax(self, cr, uid, line, context=None):
val = 0.0
for c in self.pool.get('account.tax').compute_all(cr, uid, line.tax_id, line.price_unit * (1-(line.discount or 0.0)/100.0), line.product_uom_qty, line.product_id, line.order_id.partner_id)['taxes']:
val += c.get('amount', 0.0)
return val
def _amount_all_wrapper(self, cr, uid, ids, field_name, arg, context=None):
""" Wrapper because of direct method passing as parameter for function fields """
return self._amount_all(cr, uid, ids, field_name, arg, context=context)
def _amount_all(self, cr, uid, ids, field_name, arg, context=None):
cur_obj = self.pool.get('res.currency')
res = {}
for order in self.browse(cr, uid, ids, context=context):
res[order.id] = {
'amount_untaxed': 0.0,
'amount_tax': 0.0,
'amount_total': 0.0,
}
val = val1 = 0.0
cur = order.pricelist_id.currency_id
for line in order.order_line:
val1 += line.price_subtotal
val += self._amount_line_tax(cr, uid, line, context=context)
res[order.id]['amount_tax'] = cur_obj.round(cr, uid, cur, val)
res[order.id]['amount_untaxed'] = cur_obj.round(cr, uid, cur, val1)
res[order.id]['amount_total'] = res[order.id]['amount_untaxed'] + res[order.id]['amount_tax']
return res
def _invoiced_rate(self, cursor, user, ids, name, arg, context=None):
res = {}
for sale in self.browse(cursor, user, ids, context=context):
if sale.invoiced:
res[sale.id] = 100.0
continue
tot = 0.0
for invoice in sale.invoice_ids:
if invoice.state not in ('draft', 'cancel'):
tot += invoice.amount_untaxed
if tot:
res[sale.id] = min(100.0, tot * 100.0 / (sale.amount_untaxed or 1.00))
else:
res[sale.id] = 0.0
return res
def _invoice_exists(self, cursor, user, ids, name, arg, context=None):
res = {}
for sale in self.browse(cursor, user, ids, context=context):
res[sale.id] = False
if sale.invoice_ids:
res[sale.id] = True
return res
def _invoiced(self, cursor, user, ids, name, arg, context=None):
res = {}
for sale in self.browse(cursor, user, ids, context=context):
res[sale.id] = True
invoice_existence = False
for invoice in sale.invoice_ids:
if invoice.state!='cancel':
invoice_existence = True
if invoice.state != 'paid':
res[sale.id] = False
break
if not invoice_existence or sale.state == 'manual':
res[sale.id] = False
return res
def _invoiced_search(self, cursor, user, obj, name, args, context=None):
if not len(args):
return []
clause = ''
sale_clause = ''
no_invoiced = False
for arg in args:
if arg[1] == '=':
if arg[2]:
clause += 'AND inv.state = \'paid\''
else:
clause += 'AND inv.state != \'cancel\' AND sale.state != \'cancel\' AND inv.state <> \'paid\' AND rel.order_id = sale.id '
sale_clause = ', sale_order AS sale '
no_invoiced = True
cursor.execute('SELECT rel.order_id ' \
'FROM sale_order_invoice_rel AS rel, account_invoice AS inv '+ sale_clause + \
'WHERE rel.invoice_id = inv.id ' + clause)
res = cursor.fetchall()
if no_invoiced:
cursor.execute('SELECT sale.id ' \
'FROM sale_order AS sale ' \
'WHERE sale.id NOT IN ' \
'(SELECT rel.order_id ' \
'FROM sale_order_invoice_rel AS rel) and sale.state != \'cancel\'')
res.extend(cursor.fetchall())
if not res:
return [('id', '=', 0)]
return [('id', 'in', [x[0] for x in res])]
def _get_order(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('sale.order.line').browse(cr, uid, ids, context=context):
result[line.order_id.id] = True
return result.keys()
def _get_default_company(self, cr, uid, context=None):
company_id = self.pool.get('res.users')._get_company(cr, uid, context=context)
if not company_id:
raise osv.except_osv(_('Error!'), _('There is no default company for the current user!'))
return company_id
def _get_default_section_id(self, cr, uid, context=None):
""" Gives default section by checking if present in the context """
section_id = self._resolve_section_id_from_context(cr, uid, context=context) or False
if not section_id:
section_id = self.pool.get('res.users').browse(cr, uid, uid, context).default_section_id.id or False
return section_id
def _resolve_section_id_from_context(self, cr, uid, context=None):
""" Returns ID of section based on the value of 'section_id'
context key, or None if it cannot be resolved to a single
Sales Team.
"""
if context is None:
context = {}
if type(context.get('default_section_id')) in (int, long):
return context.get('default_section_id')
if isinstance(context.get('default_section_id'), basestring):
section_ids = self.pool.get('crm.case.section').name_search(cr, uid, name=context['default_section_id'], context=context)
if len(section_ids) == 1:
return int(section_ids[0][0])
return None
_columns = {
'name': fields.char('Order Reference', size=64, required=True,
readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, select=True),
'origin': fields.char('Source Document', size=64, help="Reference of the document that generated this sales order request."),
'client_order_ref': fields.char('Reference/Description', size=64),
'state': fields.selection([
('draft', 'Draft Quotation'),
('sent', 'Quotation Sent'),
('cancel', 'Cancelled'),
('waiting_date', 'Waiting Schedule'),
('progress', 'Sales Order'),
('manual', 'Sale to Invoice'),
('shipping_except', 'Shipping Exception'),
('invoice_except', 'Invoice Exception'),
('done', 'Done'),
], 'Status', readonly=True, help="Gives the status of the quotation or sales order.\
\nThe exception status is automatically set when a cancel operation occurs \
in the invoice validation (Invoice Exception) or in the picking list process (Shipping Exception).\nThe 'Waiting Schedule' status is set when the invoice is confirmed\
but waiting for the scheduler to run on the order date.", select=True),
'date_order': fields.datetime('Date', required=True, readonly=True, select=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}),
'create_date': fields.datetime('Creation Date', readonly=True, select=True, help="Date on which sales order is created."),
'date_confirm': fields.date('Confirmation Date', readonly=True, select=True, help="Date on which sales order is confirmed."),
'user_id': fields.many2one('res.users', 'Salesperson', states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, select=True, track_visibility='onchange'),
'partner_id': fields.many2one('res.partner', 'Customer', readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, required=True, change_default=True, select=True, track_visibility='always'),
'partner_invoice_id': fields.many2one('res.partner', 'Invoice Address', readonly=True, required=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, help="Invoice address for current sales order."),
'partner_shipping_id': fields.many2one('res.partner', 'Delivery Address', readonly=True, required=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, help="Delivery address for current sales order."),
'order_policy': fields.selection([
('manual', 'On Demand'),
], 'Create Invoice', required=True, readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]},
help="""This field controls how invoice and delivery operations are synchronized."""),
'pricelist_id': fields.many2one('product.pricelist', 'Pricelist', required=True, readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, help="Pricelist for current sales order."),
'currency_id': fields.related('pricelist_id', 'currency_id', type="many2one", relation="res.currency", string="Currency", readonly=True, required=True),
'project_id': fields.many2one('account.analytic.account', 'Contract / Analytic', readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, help="The analytic account related to a sales order."),
'order_line': fields.one2many('sale.order.line', 'order_id', 'Order Lines', readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}),
'invoice_ids': fields.many2many('account.invoice', 'sale_order_invoice_rel', 'order_id', 'invoice_id', 'Invoices', readonly=True, help="This is the list of invoices that have been generated for this sales order. The same sales order may have been invoiced in several times (by line for example)."),
'invoiced_rate': fields.function(_invoiced_rate, string='Invoiced Ratio', type='float'),
'invoiced': fields.function(_invoiced, string='Paid',
fnct_search=_invoiced_search, type='boolean', help="It indicates that an invoice has been paid."),
'invoice_exists': fields.function(_invoice_exists, string='Invoiced',
fnct_search=_invoiced_search, type='boolean', help="It indicates that sales order has at least one invoice."),
'note': fields.text('Terms and conditions'),
'amount_untaxed': fields.function(_amount_all_wrapper, digits_compute=dp.get_precision('Account'), string='Untaxed Amount',
store={
'sale.order': (lambda self, cr, uid, ids, c={}: ids, ['order_line'], 10),
'sale.order.line': (_get_order, ['price_unit', 'tax_id', 'discount', 'product_uom_qty'], 10),
},
multi='sums', help="The amount without tax.", track_visibility='always'),
'amount_tax': fields.function(_amount_all_wrapper, digits_compute=dp.get_precision('Account'), string='Taxes',
store={
'sale.order': (lambda self, cr, uid, ids, c={}: ids, ['order_line'], 10),
'sale.order.line': (_get_order, ['price_unit', 'tax_id', 'discount', 'product_uom_qty'], 10),
},
multi='sums', help="The tax amount."),
'amount_total': fields.function(_amount_all_wrapper, digits_compute=dp.get_precision('Account'), string='Total',
store={
'sale.order': (lambda self, cr, uid, ids, c={}: ids, ['order_line'], 10),
'sale.order.line': (_get_order, ['price_unit', 'tax_id', 'discount', 'product_uom_qty'], 10),
},
multi='sums', help="The total amount."),
'payment_term': fields.many2one('account.payment.term', 'Payment Term'),
'fiscal_position': fields.many2one('account.fiscal.position', 'Fiscal Position'),
'company_id': fields.many2one('res.company', 'Company'),
'section_id': fields.many2one('crm.case.section', 'Sales Team'),
'procurement_group_id': fields.many2one('procurement.group', 'Procurement group'),
}
_defaults = {
'date_order': fields.datetime.now,
'order_policy': 'manual',
'company_id': _get_default_company,
'state': 'draft',
'user_id': lambda obj, cr, uid, context: uid,
'name': lambda obj, cr, uid, context: '/',
'partner_invoice_id': lambda self, cr, uid, context: context.get('partner_id', False) and self.pool.get('res.partner').address_get(cr, uid, [context['partner_id']], ['invoice'])['invoice'],
'partner_shipping_id': lambda self, cr, uid, context: context.get('partner_id', False) and self.pool.get('res.partner').address_get(cr, uid, [context['partner_id']], ['delivery'])['delivery'],
'note': lambda self, cr, uid, context: self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.sale_note,
'section_id': lambda s, cr, uid, c: s._get_default_section_id(cr, uid, c),
}
_sql_constraints = [
('name_uniq', 'unique(name, company_id)', 'Order Reference must be unique per Company!'),
]
_order = 'date_order desc, id desc'
# Form filling
def unlink(self, cr, uid, ids, context=None):
sale_orders = self.read(cr, uid, ids, ['state'], context=context)
unlink_ids = []
for s in sale_orders:
if s['state'] in ['draft', 'cancel']:
unlink_ids.append(s['id'])
else:
raise osv.except_osv(_('Invalid Action!'), _('In order to delete a confirmed sales order, you must cancel it before!'))
return osv.osv.unlink(self, cr, uid, unlink_ids, context=context)
def copy_quotation(self, cr, uid, ids, context=None):
id = self.copy(cr, uid, ids[0], context=None)
view_ref = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'sale', 'view_order_form')
view_id = view_ref and view_ref[1] or False,
return {
'type': 'ir.actions.act_window',
'name': _('Sales Order'),
'res_model': 'sale.order',
'res_id': id,
'view_type': 'form',
'view_mode': 'form',
'view_id': view_id,
'target': 'current',
'nodestroy': True,
}
def onchange_pricelist_id(self, cr, uid, ids, pricelist_id, order_lines, context=None):
context = context or {}
if not pricelist_id:
return {}
value = {
'currency_id': self.pool.get('product.pricelist').browse(cr, uid, pricelist_id, context=context).currency_id.id
}
if not order_lines:
return {'value': value}
warning = {
'title': _('Pricelist Warning!'),
'message' : _('If you change the pricelist of this order (and eventually the currency), prices of existing order lines will not be updated.')
}
return {'warning': warning, 'value': value}
def get_salenote(self, cr, uid, ids, partner_id, context=None):
context_lang = context.copy()
if partner_id:
partner_lang = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context).lang
context_lang.update({'lang': partner_lang})
return self.pool.get('res.users').browse(cr, uid, uid, context=context_lang).company_id.sale_note
def onchange_partner_id(self, cr, uid, ids, part, context=None):
if not part:
return {'value': {'partner_invoice_id': False, 'partner_shipping_id': False, 'payment_term': False, 'fiscal_position': False}}
part = self.pool.get('res.partner').browse(cr, uid, part, context=context)
addr = self.pool.get('res.partner').address_get(cr, uid, [part.id], ['delivery', 'invoice', 'contact'])
pricelist = part.property_product_pricelist and part.property_product_pricelist.id or False
payment_term = part.property_payment_term and part.property_payment_term.id or False
fiscal_position = part.property_account_position and part.property_account_position.id or False
dedicated_salesman = part.user_id and part.user_id.id or uid
val = {
'partner_invoice_id': addr['invoice'],
'partner_shipping_id': addr['delivery'],
'payment_term': payment_term,
'fiscal_position': fiscal_position,
'user_id': dedicated_salesman,
}
if pricelist:
val['pricelist_id'] = pricelist
sale_note = self.get_salenote(cr, uid, ids, part.id, context=context)
if sale_note: val.update({'note': sale_note})
return {'value': val}
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
if vals.get('name', '/') == '/':
vals['name'] = self.pool.get('ir.sequence').get(cr, uid, 'sale.order') or '/'
if vals.get('partner_id') and any(f not in vals for f in ['partner_invoice_id', 'partner_shipping_id', 'pricelist_id']):
defaults = self.onchange_partner_id(cr, uid, [], vals['partner_id'], context)['value']
vals = dict(defaults, **vals)
context.update({'mail_create_nolog': True})
new_id = super(sale_order, self).create(cr, uid, vals, context=context)
self.message_post(cr, uid, [new_id], body=_("Quotation created"), context=context)
return new_id
def button_dummy(self, cr, uid, ids, context=None):
return True
# FIXME: deprecated method, overriders should be using _prepare_invoice() instead.
# can be removed after 6.1.
def _inv_get(self, cr, uid, order, context=None):
return {}
def _prepare_invoice(self, cr, uid, order, lines, context=None):
"""Prepare the dict of values to create the new invoice for a
sales order. This method may be overridden to implement custom
invoice generation (making sure to call super() to establish
a clean extension chain).
:param browse_record order: sale.order record to invoice
:param list(int) line: list of invoice line IDs that must be
attached to the invoice
:return: dict of value to create() the invoice
"""
if context is None:
context = {}
journal_ids = self.pool.get('account.journal').search(cr, uid,
[('type', '=', 'sale'), ('company_id', '=', order.company_id.id)],
limit=1)
if not journal_ids:
raise osv.except_osv(_('Error!'),
_('Please define sales journal for this company: "%s" (id:%d).') % (order.company_id.name, order.company_id.id))
invoice_vals = {
'name': order.client_order_ref or '',
'origin': order.name,
'type': 'out_invoice',
'reference': order.client_order_ref or order.name,
'account_id': order.partner_id.property_account_receivable.id,
'partner_id': order.partner_invoice_id.id,
'journal_id': journal_ids[0],
'invoice_line': [(6, 0, lines)],
'currency_id': order.pricelist_id.currency_id.id,
'comment': order.note,
'payment_term': order.payment_term and order.payment_term.id or False,
'fiscal_position': order.fiscal_position.id or order.partner_id.property_account_position.id,
'date_invoice': context.get('date_invoice', False),
'company_id': order.company_id.id,
'user_id': order.user_id and order.user_id.id or False,
'section_id' : order.section_id.id
}
# Care for deprecated _inv_get() hook - FIXME: to be removed after 6.1
invoice_vals.update(self._inv_get(cr, uid, order, context=context))
return invoice_vals
def _make_invoice(self, cr, uid, order, lines, context=None):
inv_obj = self.pool.get('account.invoice')
obj_invoice_line = self.pool.get('account.invoice.line')
if context is None:
context = {}
invoiced_sale_line_ids = self.pool.get('sale.order.line').search(cr, uid, [('order_id', '=', order.id), ('invoiced', '=', True)], context=context)
from_line_invoice_ids = []
for invoiced_sale_line_id in self.pool.get('sale.order.line').browse(cr, uid, invoiced_sale_line_ids, context=context):
for invoice_line_id in invoiced_sale_line_id.invoice_lines:
if invoice_line_id.invoice_id.id not in from_line_invoice_ids:
from_line_invoice_ids.append(invoice_line_id.invoice_id.id)
for preinv in order.invoice_ids:
if preinv.state not in ('cancel',) and preinv.id not in from_line_invoice_ids:
for preline in preinv.invoice_line:
inv_line_id = obj_invoice_line.copy(cr, uid, preline.id, {'invoice_id': False, 'price_unit': -preline.price_unit})
lines.append(inv_line_id)
inv = self._prepare_invoice(cr, uid, order, lines, context=context)
inv_id = inv_obj.create(cr, uid, inv, context=context)
data = inv_obj.onchange_payment_term_date_invoice(cr, uid, [inv_id], inv['payment_term'], time.strftime(DEFAULT_SERVER_DATE_FORMAT))
if data.get('value', False):
inv_obj.write(cr, uid, [inv_id], data['value'], context=context)
inv_obj.button_compute(cr, uid, [inv_id])
return inv_id
def print_quotation(self, cr, uid, ids, context=None):
'''
This function prints the sales order and mark it as sent, so that we can see more easily the next step of the workflow
'''
assert len(ids) == 1, 'This option should only be used for a single id at a time'
self.signal_quotation_sent(cr, uid, ids)
return self.pool['report'].get_action(cr, uid, ids, 'sale.report_saleorder', context=context)
def manual_invoice(self, cr, uid, ids, context=None):
""" create invoices for the given sales orders (ids), and open the form
view of one of the newly created invoices
"""
mod_obj = self.pool.get('ir.model.data')
# create invoices through the sales orders' workflow
inv_ids0 = set(inv.id for sale in self.browse(cr, uid, ids, context) for inv in sale.invoice_ids)
self.signal_manual_invoice(cr, uid, ids)
inv_ids1 = set(inv.id for sale in self.browse(cr, uid, ids, context) for inv in sale.invoice_ids)
# determine newly created invoices
new_inv_ids = list(inv_ids1 - inv_ids0)
res = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_form')
res_id = res and res[1] or False,
return {
'name': _('Customer Invoices'),
'view_type': 'form',
'view_mode': 'form',
'view_id': [res_id],
'res_model': 'account.invoice',
'context': "{'type':'out_invoice'}",
'type': 'ir.actions.act_window',
'nodestroy': True,
'target': 'current',
'res_id': new_inv_ids and new_inv_ids[0] or False,
}
def action_view_invoice(self, cr, uid, ids, context=None):
'''
This function returns an action that display existing invoices of given sales order ids. It can either be a in a list or in a form view, if there is only one invoice to show.
'''
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
result = mod_obj.get_object_reference(cr, uid, 'account', 'action_invoice_tree1')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
#compute the number of invoices to display
inv_ids = []
for so in self.browse(cr, uid, ids, context=context):
inv_ids += [invoice.id for invoice in so.invoice_ids]
#choose the view_mode accordingly
if len(inv_ids)>1:
result['domain'] = "[('id','in',["+','.join(map(str, inv_ids))+"])]"
else:
res = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_form')
result['views'] = [(res and res[1] or False, 'form')]
result['res_id'] = inv_ids and inv_ids[0] or False
return result
def test_no_product(self, cr, uid, order, context):
for line in order.order_line:
if line.product_id and (line.product_id.type<>'service'):
return False
return True
def action_invoice_create(self, cr, uid, ids, grouped=False, states=None, date_invoice = False, context=None):
if states is None:
states = ['confirmed', 'done', 'exception']
res = False
invoices = {}
invoice_ids = []
invoice = self.pool.get('account.invoice')
obj_sale_order_line = self.pool.get('sale.order.line')
partner_currency = {}
if context is None:
context = {}
# If date was specified, use it as date invoiced, usefull when invoices are generated this month and put the
# last day of the last month as invoice date
if date_invoice:
context['date_invoice'] = date_invoice
for o in self.browse(cr, uid, ids, context=context):
currency_id = o.pricelist_id.currency_id.id
if (o.partner_id.id in partner_currency) and (partner_currency[o.partner_id.id] <> currency_id):
raise osv.except_osv(
_('Error!'),
_('You cannot group sales having different currencies for the same partner.'))
partner_currency[o.partner_id.id] = currency_id
lines = []
for line in o.order_line:
if line.invoiced:
continue
elif (line.state in states):
lines.append(line.id)
created_lines = obj_sale_order_line.invoice_line_create(cr, uid, lines)
if created_lines:
invoices.setdefault(o.partner_invoice_id.id or o.partner_id.id, []).append((o, created_lines))
if not invoices:
for o in self.browse(cr, uid, ids, context=context):
for i in o.invoice_ids:
if i.state == 'draft':
return i.id
for val in invoices.values():
if grouped:
res = self._make_invoice(cr, uid, val[0][0], reduce(lambda x, y: x + y, [l for o, l in val], []), context=context)
invoice_ref = ''
for o, l in val:
invoice_ref += o.name + '|'
self.write(cr, uid, [o.id], {'state': 'progress'})
cr.execute('insert into sale_order_invoice_rel (order_id,invoice_id) values (%s,%s)', (o.id, res))
#remove last '|' in invoice_ref
if len(invoice_ref) >= 1:
invoice_ref = invoice_ref[:-1]
invoice.write(cr, uid, [res], {'origin': invoice_ref, 'name': invoice_ref})
else:
for order, il in val:
res = self._make_invoice(cr, uid, order, il, context=context)
invoice_ids.append(res)
self.write(cr, uid, [order.id], {'state': 'progress'})
cr.execute('insert into sale_order_invoice_rel (order_id,invoice_id) values (%s,%s)', (order.id, res))
return res
def action_invoice_cancel(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'invoice_except'}, context=context)
return True
def action_invoice_end(self, cr, uid, ids, context=None):
for this in self.browse(cr, uid, ids, context=context):
for line in this.order_line:
if line.state == 'exception':
line.write({'state': 'confirmed'})
if this.state == 'invoice_except':
this.write({'state': 'progress'})
return True
def action_cancel(self, cr, uid, ids, context=None):
if context is None:
context = {}
sale_order_line_obj = self.pool.get('sale.order.line')
account_invoice_obj = self.pool.get('account.invoice')
for sale in self.browse(cr, uid, ids, context=context):
for inv in sale.invoice_ids:
if inv.state not in ('draft', 'cancel'):
raise osv.except_osv(
_('Cannot cancel this sales order!'),
_('First cancel all invoices attached to this sales order.'))
for r in self.read(cr, uid, ids, ['invoice_ids']):
account_invoice_obj.signal_invoice_cancel(cr, uid, r['invoice_ids'])
sale_order_line_obj.write(cr, uid, [l.id for l in sale.order_line],
{'state': 'cancel'})
self.write(cr, uid, ids, {'state': 'cancel'})
return True
def action_button_confirm(self, cr, uid, ids, context=None):
assert len(ids) == 1, 'This option should only be used for a single id at a time.'
self.signal_order_confirm(cr, uid, ids)
# redisplay the record as a sales order
view_ref = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'sale', 'view_order_form')
view_id = view_ref and view_ref[1] or False,
return {
'type': 'ir.actions.act_window',
'name': _('Sales Order'),
'res_model': 'sale.order',
'res_id': ids[0],
'view_type': 'form',
'view_mode': 'form',
'view_id': view_id,
'target': 'current',
'nodestroy': True,
}
def action_wait(self, cr, uid, ids, context=None):
context = context or {}
for o in self.browse(cr, uid, ids):
if not o.order_line:
raise osv.except_osv(_('Error!'),_('You cannot confirm a sales order which has no line.'))
noprod = self.test_no_product(cr, uid, o, context)
if (o.order_policy == 'manual') or noprod:
self.write(cr, uid, [o.id], {'state': 'manual', 'date_confirm': fields.date.context_today(self, cr, uid, context=context)})
else:
self.write(cr, uid, [o.id], {'state': 'progress', 'date_confirm': fields.date.context_today(self, cr, uid, context=context)})
self.pool.get('sale.order.line').button_confirm(cr, uid, [x.id for x in o.order_line])
return True
def action_quotation_send(self, cr, uid, ids, context=None):
'''
This function opens a window to compose an email, with the edi sale template message loaded by default
'''
assert len(ids) == 1, 'This option should only be used for a single id at a time.'
ir_model_data = self.pool.get('ir.model.data')
try:
template_id = ir_model_data.get_object_reference(cr, uid, 'sale', 'email_template_edi_sale')[1]
except ValueError:
template_id = False
try:
compose_form_id = ir_model_data.get_object_reference(cr, uid, 'mail', 'email_compose_message_wizard_form')[1]
except ValueError:
compose_form_id = False
ctx = dict(context)
ctx.update({
'default_model': 'sale.order',
'default_res_id': ids[0],
'default_use_template': bool(template_id),
'default_template_id': template_id,
'default_composition_mode': 'comment',
'mark_so_as_sent': True
})
return {
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'mail.compose.message',
'views': [(compose_form_id, 'form')],
'view_id': compose_form_id,
'target': 'new',
'context': ctx,
}
def action_done(self, cr, uid, ids, context=None):
for order in self.browse(cr, uid, ids, context=context):
self.pool.get('sale.order.line').write(cr, uid, [line.id for line in order.order_line], {'state': 'done'}, context=context)
return self.write(cr, uid, ids, {'state': 'done'}, context=context)
def _prepare_order_line_procurement(self, cr, uid, order, line, group_id=False, context=None):
date_planned = self._get_date_planned(cr, uid, order, line, order.date_order, context=context)
return {
'name': line.name,
'origin': order.name,
'date_planned': date_planned,
'product_id': line.product_id.id,
'product_qty': line.product_uom_qty,
'product_uom': line.product_uom.id,
'product_uos_qty': (line.product_uos and line.product_uos_qty) or line.product_uom_qty,
'product_uos': (line.product_uos and line.product_uos.id) or line.product_uom.id,
'company_id': order.company_id.id,
'group_id': group_id,
'invoice_state': (order.order_policy == 'picking') and '2binvoiced' or 'none',
'sale_line_id': line.id
}
def _get_date_planned(self, cr, uid, order, line, start_date, context=None):
date_planned = datetime.strptime(start_date, DEFAULT_SERVER_DATETIME_FORMAT) + timedelta(days=line.delay or 0.0)
return date_planned
def _prepare_procurement_group(self, cr, uid, order, context=None):
return {'name': order.name, 'partner_id': order.partner_shipping_id.id}
def procurement_needed(self, cr, uid, ids, context=None):
#when sale is installed only, there is no need to create procurements, that's only
#further installed modules (project_mrp, sale_stock) that will change this.
sale_line_obj = self.pool.get('sale.order.line')
res = []
for order in self.browse(cr, uid, ids, context=context):
res.append(sale_line_obj.need_procurement(cr, uid, [line.id for line in order.order_line], context=context))
return any(res)
def action_ignore_delivery_exception(self, cr, uid, ids, context=None):
for sale_order in self.browse(cr, uid, ids, context=context):
self.write(cr, uid, ids, {'state': 'progress' if sale_order.invoice_exists else 'manual'}, context=context)
return True
def action_ship_create(self, cr, uid, ids, context=None):
"""Create the required procurements to supply sales order lines, also connecting
the procurements to appropriate stock moves in order to bring the goods to the
sales order's requested location.
:return: True
"""
procurement_obj = self.pool.get('procurement.order')
sale_line_obj = self.pool.get('sale.order.line')
for order in self.browse(cr, uid, ids, context=context):
proc_ids = []
vals = self._prepare_procurement_group(cr, uid, order, context=context)
if not order.procurement_group_id:
group_id = self.pool.get("procurement.group").create(cr, uid, vals, context=context)
order.write({'procurement_group_id': group_id}, context=context)
for line in order.order_line:
#Try to fix exception procurement (possible when after a shipping exception the user choose to recreate)
if line.procurement_ids:
#first check them to see if they are in exception or not (one of the related moves is cancelled)
procurement_obj.check(cr, uid, [x.id for x in line.procurement_ids if x.state not in ['cancel', 'done']])
line.refresh()
#run again procurement that are in exception in order to trigger another move
proc_ids += [x.id for x in line.procurement_ids if x.state == 'exception']
elif sale_line_obj.need_procurement(cr, uid, [line.id], context=context):
if (line.state == 'done') or not line.product_id:
continue
vals = self._prepare_order_line_procurement(cr, uid, order, line, group_id=group_id, context=context)
proc_id = procurement_obj.create(cr, uid, vals, context=context)
proc_ids.append(proc_id)
#Confirm procurement order such that rules will be applied on it
#note that the workflow normally ensure proc_ids isn't an empty list
procurement_obj.run(cr, uid, proc_ids, context=context)
#if shipping was in exception and the user choose to recreate the delivery order, write the new status of SO
if order.state == 'shipping_except':
val = {'state': 'progress', 'shipped': False}
if (order.order_policy == 'manual'):
for line in order.order_line:
if (not line.invoiced) and (line.state not in ('cancel', 'draft')):
val['state'] = 'manual'
break
order.write(val)
return True
# if mode == 'finished':
# returns True if all lines are done, False otherwise
# if mode == 'canceled':
# returns True if there is at least one canceled line, False otherwise
def test_state(self, cr, uid, ids, mode, *args):
assert mode in ('finished', 'canceled'), _("invalid mode for test_state")
finished = True
canceled = False
write_done_ids = []
write_cancel_ids = []
for order in self.browse(cr, uid, ids, context={}):
#TODO: Need to rethink what happens when cancelling
for line in order.order_line:
states = [x.state for x in line.procurement_ids]
cancel = states and all([x == 'cancel' for x in states])
doneorcancel = all([x in ('done', 'cancel') for x in states])
if cancel:
canceled = True
if line.state != 'exception':
write_cancel_ids.append(line.id)
if not doneorcancel:
finished = False
if doneorcancel and not cancel:
write_done_ids.append(line.id)
if write_done_ids:
self.pool.get('sale.order.line').write(cr, uid, write_done_ids, {'state': 'done'})
if write_cancel_ids:
self.pool.get('sale.order.line').write(cr, uid, write_cancel_ids, {'state': 'exception'})
if mode == 'finished':
return finished
elif mode == 'canceled':
return canceled
def procurement_lines_get(self, cr, uid, ids, *args):
res = []
for order in self.browse(cr, uid, ids, context={}):
for line in order.order_line:
res += [x.id for x in line.procurement_ids]
return res
def onchange_fiscal_position(self, cr, uid, ids, fiscal_position, order_lines, context=None):
'''Update taxes of order lines for each line where a product is defined
:param list ids: not used
:param int fiscal_position: sale order fiscal position
:param list order_lines: command list for one2many write method
'''
order_line = []
fiscal_obj = self.pool.get('account.fiscal.position')
product_obj = self.pool.get('product.product')
line_obj = self.pool.get('sale.order.line')
fpos = False
if fiscal_position:
fpos = fiscal_obj.browse(cr, uid, fiscal_position, context=context)
for line in order_lines:
# create (0, 0, { fields })
# update (1, ID, { fields })
if line[0] in [0, 1]:
prod = None
if line[2].get('product_id'):
prod = product_obj.browse(cr, uid, line[2]['product_id'], context=context)
elif line[1]:
prod = line_obj.browse(cr, uid, line[1], context=context).product_id
if prod and prod.taxes_id:
line[2]['tax_id'] = [[6, 0, fiscal_obj.map_tax(cr, uid, fpos, prod.taxes_id)]]
order_line.append(line)
# link (4, ID)
# link all (6, 0, IDS)
elif line[0] in [4, 6]:
line_ids = line[0] == 4 and [line[1]] or line[2]
for line_id in line_ids:
prod = line_obj.browse(cr, uid, line_id, context=context).product_id
if prod and prod.taxes_id:
order_line.append([1, line_id, {'tax_id': [[6, 0, fiscal_obj.map_tax(cr, uid, fpos, prod.taxes_id)]]}])
else:
order_line.append([4, line_id])
else:
order_line.append(line)
return {'value': {'order_line': order_line}}
# TODO add a field price_unit_uos
# - update it on change product and unit price
# - use it in report if there is a uos
class sale_order_line(osv.osv):
def need_procurement(self, cr, uid, ids, context=None):
#when sale is installed only, there is no need to create procurements, that's only
#further installed modules (project_mrp, sale_stock) that will change this.
return False
def _amount_line(self, cr, uid, ids, field_name, arg, context=None):
tax_obj = self.pool.get('account.tax')
cur_obj = self.pool.get('res.currency')
res = {}
if context is None:
context = {}
for line in self.browse(cr, uid, ids, context=context):
price = line.price_unit * (1 - (line.discount or 0.0) / 100.0)
taxes = tax_obj.compute_all(cr, uid, line.tax_id, price, line.product_uom_qty, line.product_id, line.order_id.partner_id)
cur = line.order_id.pricelist_id.currency_id
res[line.id] = cur_obj.round(cr, uid, cur, taxes['total'])
return res
def _get_uom_id(self, cr, uid, *args):
try:
proxy = self.pool.get('ir.model.data')
result = proxy.get_object_reference(cr, uid, 'product', 'product_uom_unit')
return result[1]
except Exception, ex:
return False
def _fnct_line_invoiced(self, cr, uid, ids, field_name, args, context=None):
res = dict.fromkeys(ids, False)
for this in self.browse(cr, uid, ids, context=context):
res[this.id] = this.invoice_lines and \
all(iline.invoice_id.state != 'cancel' for iline in this.invoice_lines)
return res
def _order_lines_from_invoice(self, cr, uid, ids, context=None):
# direct access to the m2m table is the less convoluted way to achieve this (and is ok ACL-wise)
cr.execute("""SELECT DISTINCT sol.id FROM sale_order_invoice_rel rel JOIN
sale_order_line sol ON (sol.order_id = rel.order_id)
WHERE rel.invoice_id = ANY(%s)""", (list(ids),))
return [i[0] for i in cr.fetchall()]
_name = 'sale.order.line'
_description = 'Sales Order Line'
_columns = {
'order_id': fields.many2one('sale.order', 'Order Reference', required=True, ondelete='cascade', select=True, readonly=True, states={'draft':[('readonly',False)]}),
'name': fields.text('Description', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of sales order lines."),
'product_id': fields.many2one('product.product', 'Product', domain=[('sale_ok', '=', True)], change_default=True, readonly=True, states={'draft': [('readonly', False)]}, ondelete='restrict'),
'invoice_lines': fields.many2many('account.invoice.line', 'sale_order_line_invoice_rel', 'order_line_id', 'invoice_id', 'Invoice Lines', readonly=True),
'invoiced': fields.function(_fnct_line_invoiced, string='Invoiced', type='boolean',
store={
'account.invoice': (_order_lines_from_invoice, ['state'], 10),
'sale.order.line': (lambda self,cr,uid,ids,ctx=None: ids, ['invoice_lines'], 10)
}),
'price_unit': fields.float('Unit Price', required=True, digits_compute= dp.get_precision('Product Price'), readonly=True, states={'draft': [('readonly', False)]}),
'price_subtotal': fields.function(_amount_line, string='Subtotal', digits_compute= dp.get_precision('Account')),
'tax_id': fields.many2many('account.tax', 'sale_order_tax', 'order_line_id', 'tax_id', 'Taxes', readonly=True, states={'draft': [('readonly', False)]}),
'address_allotment_id': fields.many2one('res.partner', 'Allotment Partner',help="A partner to whom the particular product needs to be allotted."),
'product_uom_qty': fields.float('Quantity', digits_compute= dp.get_precision('Product UoS'), required=True, readonly=True, states={'draft': [('readonly', False)]}),
'product_uom': fields.many2one('product.uom', 'Unit of Measure ', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'product_uos_qty': fields.float('Quantity (UoS)' ,digits_compute= dp.get_precision('Product UoS'), readonly=True, states={'draft': [('readonly', False)]}),
'product_uos': fields.many2one('product.uom', 'Product UoS'),
'discount': fields.float('Discount (%)', digits_compute= dp.get_precision('Discount'), readonly=True, states={'draft': [('readonly', False)]}),
'th_weight': fields.float('Weight', readonly=True, states={'draft': [('readonly', False)]}),
'state': fields.selection([('cancel', 'Cancelled'),('draft', 'Draft'),('confirmed', 'Confirmed'),('exception', 'Exception'),('done', 'Done')], 'Status', required=True, readonly=True,
help='* The \'Draft\' status is set when the related sales order in draft status. \
\n* The \'Confirmed\' status is set when the related sales order is confirmed. \
\n* The \'Exception\' status is set when the related sales order is set as exception. \
\n* The \'Done\' status is set when the sales order line has been picked. \
\n* The \'Cancelled\' status is set when a user cancel the sales order related.'),
'order_partner_id': fields.related('order_id', 'partner_id', type='many2one', relation='res.partner', store=True, string='Customer'),
'salesman_id':fields.related('order_id', 'user_id', type='many2one', relation='res.users', store=True, string='Salesperson'),
'company_id': fields.related('order_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True),
'delay': fields.float('Delivery Lead Time', required=True, help="Number of days between the order confirmation and the shipping of the products to the customer", readonly=True, states={'draft': [('readonly', False)]}),
'procurement_ids': fields.one2many('procurement.order', 'sale_line_id', 'Procurements'),
}
_order = 'order_id desc, sequence, id'
_defaults = {
'product_uom' : _get_uom_id,
'discount': 0.0,
'product_uom_qty': 1,
'product_uos_qty': 1,
'sequence': 10,
'state': 'draft',
'price_unit': 0.0,
'delay': 0.0,
}
def _get_line_qty(self, cr, uid, line, context=None):
if line.product_uos:
return line.product_uos_qty or 0.0
return line.product_uom_qty
def _get_line_uom(self, cr, uid, line, context=None):
if line.product_uos:
return line.product_uos.id
return line.product_uom.id
def _prepare_order_line_invoice_line(self, cr, uid, line, account_id=False, context=None):
"""Prepare the dict of values to create the new invoice line for a
sales order line. This method may be overridden to implement custom
invoice generation (making sure to call super() to establish
a clean extension chain).
:param browse_record line: sale.order.line record to invoice
:param int account_id: optional ID of a G/L account to force
(this is used for returning products including service)
:return: dict of values to create() the invoice line
"""
res = {}
if not line.invoiced:
if not account_id:
if line.product_id:
account_id = line.product_id.property_account_income.id
if not account_id:
account_id = line.product_id.categ_id.property_account_income_categ.id
if not account_id:
raise osv.except_osv(_('Error!'),
_('Please define income account for this product: "%s" (id:%d).') % \
(line.product_id.name, line.product_id.id,))
else:
prop = self.pool.get('ir.property').get(cr, uid,
'property_account_income_categ', 'product.category',
context=context)
account_id = prop and prop.id or False
uosqty = self._get_line_qty(cr, uid, line, context=context)
uos_id = self._get_line_uom(cr, uid, line, context=context)
pu = 0.0
if uosqty:
pu = round(line.price_unit * line.product_uom_qty / uosqty,
self.pool.get('decimal.precision').precision_get(cr, uid, 'Product Price'))
fpos = line.order_id.fiscal_position or False
account_id = self.pool.get('account.fiscal.position').map_account(cr, uid, fpos, account_id)
if not account_id:
raise osv.except_osv(_('Error!'),
_('There is no Fiscal Position defined or Income category account defined for default properties of Product categories.'))
res = {
'name': line.name,
'sequence': line.sequence,
'origin': line.order_id.name,
'account_id': account_id,
'price_unit': pu,
'quantity': uosqty,
'discount': line.discount,
'uos_id': uos_id,
'product_id': line.product_id.id or False,
'invoice_line_tax_id': [(6, 0, [x.id for x in line.tax_id])],
'account_analytic_id': line.order_id.project_id and line.order_id.project_id.id or False,
}
return res
def invoice_line_create(self, cr, uid, ids, context=None):
if context is None:
context = {}
create_ids = []
sales = set()
for line in self.browse(cr, uid, ids, context=context):
vals = self._prepare_order_line_invoice_line(cr, uid, line, False, context)
if vals:
inv_id = self.pool.get('account.invoice.line').create(cr, uid, vals, context=context)
self.write(cr, uid, [line.id], {'invoice_lines': [(4, inv_id)]}, context=context)
sales.add(line.order_id.id)
create_ids.append(inv_id)
# Trigger workflow events
for sale_id in sales:
workflow.trg_write(uid, 'sale.order', sale_id, cr)
return create_ids
def button_cancel(self, cr, uid, ids, context=None):
for line in self.browse(cr, uid, ids, context=context):
if line.invoiced:
raise osv.except_osv(_('Invalid Action!'), _('You cannot cancel a sales order line that has already been invoiced.'))
return self.write(cr, uid, ids, {'state': 'cancel'})
def button_confirm(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'confirmed'})
def button_done(self, cr, uid, ids, context=None):
res = self.write(cr, uid, ids, {'state': 'done'})
for line in self.browse(cr, uid, ids, context=context):
workflow.trg_write(uid, 'sale.order', line.order_id.id, cr)
return res
def uos_change(self, cr, uid, ids, product_uos, product_uos_qty=0, product_id=None):
product_obj = self.pool.get('product.product')
if not product_id:
return {'value': {'product_uom': product_uos,
'product_uom_qty': product_uos_qty}, 'domain': {}}
product = product_obj.browse(cr, uid, product_id)
value = {
'product_uom': product.uom_id.id,
}
# FIXME must depend on uos/uom of the product and not only of the coeff.
try:
value.update({
'product_uom_qty': product_uos_qty / product.uos_coeff,
'th_weight': product_uos_qty / product.uos_coeff * product.weight
})
except ZeroDivisionError:
pass
return {'value': value}
def create(self, cr, uid, values, context=None):
if values.get('order_id') and values.get('product_id') and any(f not in values for f in ['name', 'price_unit', 'type', 'product_uom_qty', 'product_uom']):
order = self.pool['sale.order'].read(cr, uid, values['order_id'], ['pricelist_id', 'partner_id', 'date_order', 'fiscal_position'], context=context)
defaults = self.product_id_change(cr, uid, [], order['pricelist_id'][0], values['product_id'],
qty=float(values.get('product_uom_qty', False)),
uom=values.get('product_uom', False),
qty_uos=float(values.get('product_uos_qty', False)),
uos=values.get('product_uos', False),
name=values.get('name', False),
partner_id=order['partner_id'][0],
date_order=order['date_order'],
fiscal_position=order['fiscal_position'][0] if order['fiscal_position'] else False,
flag=False, # Force name update
context=context
)['value']
values = dict(defaults, **values)
return super(sale_order_line, self).create(cr, uid, values, context=context)
def copy_data(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
default.update({'state': 'draft', 'invoice_lines': [], 'procurement_ids': []})
return super(sale_order_line, self).copy_data(cr, uid, id, default, context=context)
def product_id_change(self, cr, uid, ids, pricelist, product, qty=0,
uom=False, qty_uos=0, uos=False, name='', partner_id=False,
lang=False, update_tax=True, date_order=False, packaging=False, fiscal_position=False, flag=False, context=None):
context = context or {}
lang = lang or context.get('lang', False)
if not partner_id:
raise osv.except_osv(_('No Customer Defined!'), _('Before choosing a product,\n select a customer in the sales form.'))
warning = False
product_uom_obj = self.pool.get('product.uom')
partner_obj = self.pool.get('res.partner')
product_obj = self.pool.get('product.product')
context = {'lang': lang, 'partner_id': partner_id}
partner = partner_obj.browse(cr, uid, partner_id)
lang = partner.lang
context_partner = {'lang': lang, 'partner_id': partner_id}
if not product:
return {'value': {'th_weight': 0,
'product_uos_qty': qty}, 'domain': {'product_uom': [],
'product_uos': []}}
if not date_order:
date_order = time.strftime(DEFAULT_SERVER_DATE_FORMAT)
result = {}
warning_msgs = ''
product_obj = product_obj.browse(cr, uid, product, context=context_partner)
uom2 = False
if uom:
uom2 = product_uom_obj.browse(cr, uid, uom)
if product_obj.uom_id.category_id.id != uom2.category_id.id:
uom = False
if uos:
if product_obj.uos_id:
uos2 = product_uom_obj.browse(cr, uid, uos)
if product_obj.uos_id.category_id.id != uos2.category_id.id:
uos = False
else:
uos = False
fpos = False
if not fiscal_position:
fpos = partner.property_account_position or False
else:
fpos = self.pool.get('account.fiscal.position').browse(cr, uid, fiscal_position)
if update_tax: #The quantity only have changed
result['tax_id'] = self.pool.get('account.fiscal.position').map_tax(cr, uid, fpos, product_obj.taxes_id)
if not flag:
result['name'] = self.pool.get('product.product').name_get(cr, uid, [product_obj.id], context=context_partner)[0][1]
if product_obj.description_sale:
result['name'] += '\n'+product_obj.description_sale
domain = {}
if (not uom) and (not uos):
result['product_uom'] = product_obj.uom_id.id
if product_obj.uos_id:
result['product_uos'] = product_obj.uos_id.id
result['product_uos_qty'] = qty * product_obj.uos_coeff
uos_category_id = product_obj.uos_id.category_id.id
else:
result['product_uos'] = False
result['product_uos_qty'] = qty
uos_category_id = False
result['th_weight'] = qty * product_obj.weight
domain = {'product_uom':
[('category_id', '=', product_obj.uom_id.category_id.id)],
'product_uos':
[('category_id', '=', uos_category_id)]}
elif uos and not uom: # only happens if uom is False
result['product_uom'] = product_obj.uom_id and product_obj.uom_id.id
result['product_uom_qty'] = qty_uos / product_obj.uos_coeff
result['th_weight'] = result['product_uom_qty'] * product_obj.weight
elif uom: # whether uos is set or not
default_uom = product_obj.uom_id and product_obj.uom_id.id
q = product_uom_obj._compute_qty(cr, uid, uom, qty, default_uom)
if product_obj.uos_id:
result['product_uos'] = product_obj.uos_id.id
result['product_uos_qty'] = qty * product_obj.uos_coeff
else:
result['product_uos'] = False
result['product_uos_qty'] = qty
result['th_weight'] = q * product_obj.weight # Round the quantity up
if not uom2:
uom2 = product_obj.uom_id
# get unit price
if not pricelist:
warn_msg = _('You have to select a pricelist or a customer in the sales form !\n'
'Please set one before choosing a product.')
warning_msgs += _("No Pricelist ! : ") + warn_msg +"\n\n"
else:
price = self.pool.get('product.pricelist').price_get(cr, uid, [pricelist],
product, qty or 1.0, partner_id, {
'uom': uom or result.get('product_uom'),
'date': date_order,
})[pricelist]
if price is False:
warn_msg = _("Cannot find a pricelist line matching this product and quantity.\n"
"You have to change either the product, the quantity or the pricelist.")
warning_msgs += _("No valid pricelist line found ! :") + warn_msg +"\n\n"
else:
result.update({'price_unit': price})
if warning_msgs:
warning = {
'title': _('Configuration Error!'),
'message' : warning_msgs
}
return {'value': result, 'domain': domain, 'warning': warning}
def product_uom_change(self, cursor, user, ids, pricelist, product, qty=0,
uom=False, qty_uos=0, uos=False, name='', partner_id=False,
lang=False, update_tax=True, date_order=False, context=None):
context = context or {}
lang = lang or ('lang' in context and context['lang'])
if not uom:
return {'value': {'price_unit': 0.0, 'product_uom' : uom or False}}
return self.product_id_change(cursor, user, ids, pricelist, product,
qty=qty, uom=uom, qty_uos=qty_uos, uos=uos, name=name,
partner_id=partner_id, lang=lang, update_tax=update_tax,
date_order=date_order, context=context)
def unlink(self, cr, uid, ids, context=None):
if context is None:
context = {}
"""Allows to delete sales order lines in draft,cancel states"""
for rec in self.browse(cr, uid, ids, context=context):
if rec.state not in ['draft', 'cancel']:
raise osv.except_osv(_('Invalid Action!'), _('Cannot delete a sales order line which is in state \'%s\'.') %(rec.state,))
return super(sale_order_line, self).unlink(cr, uid, ids, context=context)
class res_company(osv.Model):
_inherit = "res.company"
_columns = {
'sale_note': fields.text('Default Terms and Conditions', translate=True, help="Default terms and conditions for quotations."),
}
class mail_compose_message(osv.Model):
_inherit = 'mail.compose.message'
def send_mail(self, cr, uid, ids, context=None):
context = context or {}
if context.get('default_model') == 'sale.order' and context.get('default_res_id') and context.get('mark_so_as_sent'):
context = dict(context, mail_post_autofollow=True)
self.pool.get('sale.order').signal_quotation_sent(cr, uid, [context['default_res_id']])
return super(mail_compose_message, self).send_mail(cr, uid, ids, context=context)
class account_invoice(osv.Model):
_inherit = 'account.invoice'
def _get_default_section_id(self, cr, uid, context=None):
""" Gives default section by checking if present in the context """
section_id = self._resolve_section_id_from_context(cr, uid, context=context) or False
if not section_id:
section_id = self.pool.get('res.users').browse(cr, uid, uid, context).default_section_id.id or False
return section_id
def _resolve_section_id_from_context(self, cr, uid, context=None):
""" Returns ID of section based on the value of 'section_id'
context key, or None if it cannot be resolved to a single
Sales Team.
"""
if context is None:
context = {}
if type(context.get('default_section_id')) in (int, long):
return context.get('default_section_id')
if isinstance(context.get('default_section_id'), basestring):
section_ids = self.pool.get('crm.case.section').name_search(cr, uid, name=context['default_section_id'], context=context)
if len(section_ids) == 1:
return int(section_ids[0][0])
return None
_columns = {
'section_id': fields.many2one('crm.case.section', 'Sales Team'),
}
_defaults = {
'section_id': lambda self, cr, uid, c=None: self._get_default_section_id(cr, uid, context=c)
}
def confirm_paid(self, cr, uid, ids, context=None):
sale_order_obj = self.pool.get('sale.order')
res = super(account_invoice, self).confirm_paid(cr, uid, ids, context=context)
so_ids = sale_order_obj.search(cr, uid, [('invoice_ids', 'in', ids)], context=context)
for so_id in so_ids:
sale_order_obj.message_post(cr, uid, so_id, body=_("Invoice paid"), context=context)
return res
def unlink(self, cr, uid, ids, context=None):
""" Overwrite unlink method of account invoice to send a trigger to the sale workflow upon invoice deletion """
invoice_ids = self.search(cr, uid, [('id', 'in', ids), ('state', 'in', ['draft', 'cancel'])], context=context)
#if we can't cancel all invoices, do nothing
if len(invoice_ids) == len(ids):
#Cancel invoice(s) first before deleting them so that if any sale order is associated with them
#it will trigger the workflow to put the sale order in an 'invoice exception' state
for id in ids:
workflow.trg_validate(uid, 'account.invoice', id, 'invoice_cancel', cr)
return super(account_invoice, self).unlink(cr, uid, ids, context=context)
class procurement_order(osv.osv):
_inherit = 'procurement.order'
_columns = {
'sale_line_id': fields.many2one('sale.order.line', string='Sale Order Line'),
}
class product_product(osv.Model):
_inherit = 'product.product'
def _sales_count(self, cr, uid, ids, field_name, arg, context=None):
SaleOrderLine = self.pool['sale.order.line']
return {
product_id: SaleOrderLine.search_count(cr,uid, [('product_id', '=', product_id)], context=context)
for product_id in ids
}
_columns = {
'sales_count': fields.function(_sales_count, string='# Sales', type='integer'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mmalorni/server-tools | __unported__/fetchmail_attach_from_folder/match_algorithm/email_domain.py | 6 | 1985 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2013 Therp BV (<http://therp.nl>)
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from email_exact import email_exact
class email_domain(email_exact):
'''Search objects by domain name of email address.
Beware of match_first here, this is most likely to get it wrong (gmail)'''
name = 'Domain of email address'
def search_matches(self, cr, uid, conf, mail_message, mail_message_org):
ids = super(email_domain, self).search_matches(
cr, uid, conf, mail_message, mail_message_org)
if not ids:
domains = []
for addr in self._get_mailaddresses(conf, mail_message):
domains.append(addr.split('@')[-1])
ids = conf.pool.get(conf.model_id.model).search(
cr, uid,
self._get_mailaddress_search_domain(
conf, mail_message,
operator='like',
values=['%@'+domain for domain in set(domains)]),
order=conf.model_order)
return ids
| agpl-3.0 |
e9wifi-dev/android_kernel_lge_e9wifi-test | tools/perf/scripts/python/syscall-counts.py | 11181 | 1522 | # system call counts
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts.py [comm]\n";
for_comm = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
print "%-40s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
loco-odoo/localizacion_co | openerp/addons/crm_partner_assign/report/crm_partner_report.py | 264 | 3374 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
from openerp import tools
class crm_partner_report_assign(osv.osv):
""" CRM Lead Report """
_name = "crm.partner.report.assign"
_auto = False
_description = "CRM Partner Report"
_columns = {
'partner_id': fields.many2one('res.partner', 'Partner', required=False, readonly=True),
'grade_id':fields.many2one('res.partner.grade', 'Grade', readonly=True),
'activation' : fields.many2one('res.partner.activation', 'Activation', select=1),
'user_id':fields.many2one('res.users', 'User', readonly=True),
'date_review' : fields.date('Latest Partner Review'),
'date_partnership' : fields.date('Partnership Date'),
'country_id':fields.many2one('res.country', 'Country', readonly=True),
'section_id':fields.many2one('crm.case.section', 'Sales Team', readonly=True),
'opp': fields.integer('# of Opportunity', readonly=True), # TDE FIXME master: rename into nbr_opportunities
'turnover': fields.float('Turnover', readonly=True),
'period_id': fields.many2one('account.period', 'Invoice Period', readonly=True),
}
def init(self, cr):
"""
CRM Lead Report
@param cr: the current row, from the database cursor
"""
tools.drop_view_if_exists(cr, 'crm_partner_report_assign')
cr.execute("""
CREATE OR REPLACE VIEW crm_partner_report_assign AS (
SELECT
coalesce(i.id, p.id - 1000000000) as id,
p.id as partner_id,
(SELECT country_id FROM res_partner a WHERE a.parent_id=p.id AND country_id is not null limit 1) as country_id,
p.grade_id,
p.activation,
p.date_review,
p.date_partnership,
p.user_id,
p.section_id,
(SELECT count(id) FROM crm_lead WHERE partner_assigned_id=p.id) AS opp,
i.price_total as turnover,
i.period_id
FROM
res_partner p
left join account_invoice_report i
on (i.partner_id=p.id and i.type in ('out_invoice','out_refund') and i.state in ('open','paid'))
)""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
fuselock/odoo | addons/email_template/wizard/email_template_preview.py | 377 | 3851 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2009 Sharoon Thomas
# Copyright (C) 2010-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp.osv import fields, osv
class email_template_preview(osv.osv_memory):
_inherit = "email.template"
_name = "email_template.preview"
_description = "Email Template Preview"
def _get_records(self, cr, uid, context=None):
"""
Return Records of particular Email Template's Model
"""
if context is None:
context = {}
template_id = context.get('template_id', False)
if not template_id:
return []
email_template = self.pool.get('email.template')
template = email_template.browse(cr, uid, int(template_id), context=context)
template_object = template.model_id
model = self.pool[template_object.model]
record_ids = model.search(cr, uid, [], 0, 10, 'id', context=context)
default_id = context.get('default_res_id')
if default_id and default_id not in record_ids:
record_ids.insert(0, default_id)
return model.name_get(cr, uid, record_ids, context)
def default_get(self, cr, uid, fields, context=None):
if context is None:
context = {}
result = super(email_template_preview, self).default_get(cr, uid, fields, context=context)
email_template = self.pool.get('email.template')
template_id = context.get('template_id')
if 'res_id' in fields and not result.get('res_id'):
records = self._get_records(cr, uid, context=context)
result['res_id'] = records and records[0][0] or False # select first record as a Default
if template_id and 'model_id' in fields and not result.get('model_id'):
result['model_id'] = email_template.read(cr, uid, int(template_id), ['model_id'], context).get('model_id', False)
return result
_columns = {
'res_id': fields.selection(_get_records, 'Sample Document'),
'partner_ids': fields.many2many('res.partner', string='Recipients'),
}
def on_change_res_id(self, cr, uid, ids, res_id, context=None):
if context is None:
context = {'value': {}}
if not res_id or not context.get('template_id'):
return {'value': {}}
email_template = self.pool.get('email.template')
template_id = context.get('template_id')
template = email_template.browse(cr, uid, template_id, context=context)
# generate and get template values
mail_values = email_template.generate_email(cr, uid, template_id, res_id, context=context)
vals = dict((field, mail_values.get(field, False)) for field in ('email_from', 'email_to', 'email_cc', 'reply_to', 'subject', 'body_html', 'partner_to', 'partner_ids', 'attachment_ids'))
vals['name'] = template.name
return {'value': vals}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
fronzbot/blinkpy | blinkpy/blinkpy.py | 1 | 13098 | # -*- coding: utf-8 -*-
"""
blinkpy is an unofficial api for the Blink security camera system.
repo url: https://github.com/fronzbot/blinkpy
Original protocol hacking by MattTW :
https://github.com/MattTW/BlinkMonitorProtocol
Published under the MIT license - See LICENSE file for more details.
"Blink Wire-Free HS Home Monitoring & Alert Systems" is a trademark
owned by Immedia Inc., see www.blinkforhome.com for more information.
blinkpy is in no way affiliated with Blink, nor Immedia Inc.
"""
import os.path
import time
import logging
from shutil import copyfileobj
from requests.structures import CaseInsensitiveDict
from dateutil.parser import parse
from slugify import slugify
from blinkpy import api
from blinkpy.sync_module import BlinkSyncModule, BlinkOwl
from blinkpy.helpers import util
from blinkpy.helpers.constants import (
DEFAULT_MOTION_INTERVAL,
DEFAULT_REFRESH,
MIN_THROTTLE_TIME,
TIMEOUT_MEDIA,
)
from blinkpy.helpers.constants import __version__
from blinkpy.auth import Auth, TokenRefreshFailed, LoginError
_LOGGER = logging.getLogger(__name__)
class Blink:
"""Class to initialize communication."""
def __init__(
self,
refresh_rate=DEFAULT_REFRESH,
motion_interval=DEFAULT_MOTION_INTERVAL,
no_owls=False,
):
"""
Initialize Blink system.
:param refresh_rate: Refresh rate of blink information.
Defaults to 15 (seconds)
:param motion_interval: How far back to register motion in minutes.
Defaults to last refresh time.
Useful for preventing motion_detected property
from de-asserting too quickly.
:param no_owls: Disable searching for owl entries (blink mini cameras only known entity). Prevents an uneccessary API call if you don't have these in your network.
"""
self.auth = Auth()
self.account_id = None
self.client_id = None
self.network_ids = []
self.urls = None
self.sync = CaseInsensitiveDict({})
self.last_refresh = None
self.refresh_rate = refresh_rate
self.networks = []
self.cameras = CaseInsensitiveDict({})
self.video_list = CaseInsensitiveDict({})
self.motion_interval = motion_interval
self.version = __version__
self.available = False
self.key_required = False
self.homescreen = {}
self.no_owls = no_owls
@util.Throttle(seconds=MIN_THROTTLE_TIME)
def refresh(self, force=False, force_cache=False):
"""
Perform a system refresh.
:param force: Used to override throttle, resets refresh
:param force_cache: Used to force update without overriding throttle
"""
if self.check_if_ok_to_update() or force or force_cache:
if not self.available:
self.setup_post_verify()
self.get_homescreen()
for sync_name, sync_module in self.sync.items():
_LOGGER.debug("Attempting refresh of sync %s", sync_name)
sync_module.refresh(force_cache=(force or force_cache))
if not force_cache:
# Prevents rapid clearing of motion detect property
self.last_refresh = int(time.time())
return True
return False
def start(self):
"""Perform full system setup."""
try:
self.auth.startup()
self.setup_login_ids()
self.setup_urls()
self.get_homescreen()
except (LoginError, TokenRefreshFailed, BlinkSetupError):
_LOGGER.error("Cannot setup Blink platform.")
self.available = False
return False
self.key_required = self.auth.check_key_required()
if self.key_required:
if self.auth.no_prompt:
return True
self.setup_prompt_2fa()
return self.setup_post_verify()
def setup_prompt_2fa(self):
"""Prompt for 2FA."""
email = self.auth.data["username"]
pin = input(f"Enter code sent to {email}: ")
result = self.auth.send_auth_key(self, pin)
self.key_required = not result
def setup_post_verify(self):
"""Initialize blink system after verification."""
try:
self.setup_networks()
networks = self.setup_network_ids()
cameras = self.setup_camera_list()
except BlinkSetupError:
self.available = False
return False
for name, network_id in networks.items():
sync_cameras = cameras.get(network_id, {})
self.setup_sync_module(name, network_id, sync_cameras)
self.cameras = self.merge_cameras()
self.available = True
self.key_required = False
return True
def setup_sync_module(self, name, network_id, cameras):
"""Initialize a sync module."""
self.sync[name] = BlinkSyncModule(self, name, network_id, cameras)
self.sync[name].start()
def get_homescreen(self):
"""Get homecreen information."""
if self.no_owls:
_LOGGER.debug("Skipping owl extraction.")
self.homescreen = {}
return
self.homescreen = api.request_homescreen(self)
def setup_owls(self):
"""Check for mini cameras."""
network_list = []
camera_list = []
try:
for owl in self.homescreen["owls"]:
name = owl["name"]
network_id = str(owl["network_id"])
if network_id in self.network_ids:
camera_list.append(
{network_id: {"name": name, "id": network_id, "type": "mini"}}
)
continue
if owl["onboarded"]:
network_list.append(str(network_id))
self.sync[name] = BlinkOwl(self, name, network_id, owl)
self.sync[name].start()
except KeyError:
# No sync-less devices found
pass
self.network_ids.extend(network_list)
return camera_list
def setup_camera_list(self):
"""Create camera list for onboarded networks."""
all_cameras = {}
response = api.request_camera_usage(self)
try:
for network in response["networks"]:
camera_network = str(network["network_id"])
if camera_network not in all_cameras:
all_cameras[camera_network] = []
for camera in network["cameras"]:
all_cameras[camera_network].append(
{"name": camera["name"], "id": camera["id"]}
)
mini_cameras = self.setup_owls()
for camera in mini_cameras:
for network, camera_info in camera.items():
all_cameras[network].append(camera_info)
return all_cameras
except (KeyError, TypeError):
_LOGGER.error("Unable to retrieve cameras from response %s", response)
raise BlinkSetupError
def setup_login_ids(self):
"""Retrieve login id numbers from login response."""
self.client_id = self.auth.client_id
self.account_id = self.auth.account_id
def setup_urls(self):
"""Create urls for api."""
try:
self.urls = util.BlinkURLHandler(self.auth.region_id)
except TypeError:
_LOGGER.error(
"Unable to extract region is from response %s", self.auth.login_response
)
raise BlinkSetupError
def setup_networks(self):
"""Get network information."""
response = api.request_networks(self)
try:
self.networks = response["summary"]
except (KeyError, TypeError):
raise BlinkSetupError
def setup_network_ids(self):
"""Create the network ids for onboarded networks."""
all_networks = []
network_dict = {}
try:
for network, status in self.networks.items():
if status["onboarded"]:
all_networks.append(f"{network}")
network_dict[status["name"]] = network
except AttributeError:
_LOGGER.error(
"Unable to retrieve network information from %s", self.networks
)
raise BlinkSetupError
self.network_ids = all_networks
return network_dict
def check_if_ok_to_update(self):
"""Check if it is ok to perform an http request."""
current_time = int(time.time())
last_refresh = self.last_refresh
if last_refresh is None:
last_refresh = 0
if current_time >= (last_refresh + self.refresh_rate):
return True
return False
def merge_cameras(self):
"""Merge all sync camera dicts into one."""
combined = CaseInsensitiveDict({})
for sync in self.sync:
combined = util.merge_dicts(combined, self.sync[sync].cameras)
return combined
def save(self, file_name):
"""Save login data to file."""
util.json_save(self.auth.login_attributes, file_name)
def download_videos(
self, path, since=None, camera="all", stop=10, delay=1, debug=False
):
"""
Download all videos from server since specified time.
:param path: Path to write files. /path/<cameraname>_<recorddate>.mp4
:param since: Date and time to get videos from.
Ex: "2018/07/28 12:33:00" to retrieve videos since
July 28th 2018 at 12:33:00
:param camera: Camera name to retrieve. Defaults to "all".
Use a list for multiple cameras.
:param stop: Page to stop on (~25 items per page. Default page 10).
:param delay: Number of seconds to wait in between subsequent video downloads.
:param debug: Set to TRUE to prevent downloading of items.
Instead of downloading, entries will be printed to log.
"""
if since is None:
since_epochs = self.last_refresh
else:
parsed_datetime = parse(since, fuzzy=True)
since_epochs = parsed_datetime.timestamp()
formatted_date = util.get_time(time_to_convert=since_epochs)
_LOGGER.info("Retrieving videos since %s", formatted_date)
if not isinstance(camera, list):
camera = [camera]
for page in range(1, stop):
response = api.request_videos(self, time=since_epochs, page=page)
_LOGGER.debug("Processing page %s", page)
try:
result = response["media"]
if not result:
raise KeyError
except (KeyError, TypeError):
_LOGGER.info("No videos found on page %s. Exiting.", page)
break
self._parse_downloaded_items(result, camera, path, delay, debug)
def _parse_downloaded_items(self, result, camera, path, delay, debug):
"""Parse downloaded videos."""
for item in result:
try:
created_at = item["created_at"]
camera_name = item["device_name"]
is_deleted = item["deleted"]
address = item["media"]
except KeyError:
_LOGGER.info("Missing clip information, skipping...")
continue
if camera_name not in camera and "all" not in camera:
_LOGGER.debug("Skipping videos for %s.", camera_name)
continue
if is_deleted:
_LOGGER.debug("%s: %s is marked as deleted.", camera_name, address)
continue
clip_address = f"{self.urls.base_url}{address}"
filename = f"{camera_name}-{created_at}"
filename = f"{slugify(filename)}.mp4"
filename = os.path.join(path, filename)
if not debug:
if os.path.isfile(filename):
_LOGGER.info("%s already exists, skipping...", filename)
continue
response = api.http_get(
self,
url=clip_address,
stream=True,
json=False,
timeout=TIMEOUT_MEDIA,
)
with open(filename, "wb") as vidfile:
copyfileobj(response.raw, vidfile)
_LOGGER.info("Downloaded video to %s", filename)
else:
print(
(
f"Camera: {camera_name}, Timestamp: {created_at}, "
"Address: {address}, Filename: {filename}"
)
)
if delay > 0:
time.sleep(delay)
class BlinkSetupError(Exception):
"""Class to handle setup errors."""
| mit |
butchman0922/gourmet | gourmet/plugins/import_export/plaintext_plugin/plaintext_importer_plugin.py | 6 | 2087 | from gourmet.plugin import ImporterPlugin
from gourmet.importers.importer import Tester
from gourmet.threadManager import get_thread_manager
from gourmet.importers.interactive_importer import InteractiveImporter
from gourmet import check_encodings
import os.path
import fnmatch
from gettext import gettext as _
MAX_PLAINTEXT_LENGTH = 100000
class PlainTextImporter (InteractiveImporter):
name = 'Plain Text Importer'
def __init__ (self, filename):
self.filename = filename
InteractiveImporter.__init__(self)
def do_run (self):
if os.path.getsize(self.filename) > MAX_PLAINTEXT_LENGTH*16:
del data
ifi.close()
import gourmet.gtk_extras.dialog_extras as de
de.show_message(title=_('Big File'),
label=_('File %s is too big to import'%self.filename),
sublabel=_('Your file exceeds the maximum length of %s characters. You probably didn\'t mean to import it anyway. If you really do want to import this file, use a text editor to split it into smaller files and try importing again.')%MAX_PLAINTEXT_LENGTH,
message_type=gtk.MESSAGE_ERROR)
return
ifi = file(self.filename,'r')
data = '\n'.join(check_encodings.get_file(ifi))
ifi.close()
self.set_text(data)
return InteractiveImporter.do_run(self)
class PlainTextImporterPlugin (ImporterPlugin):
name = _('Plain Text file')
patterns = ['*.txt','[^.]*','*']
mimetypes = ['text/plain']
antipatterns = ['*.html','*.htm','*.xml','*.doc','*.rtf']
def test_file (self, filename):
'''Given a filename, test whether the file is of this type.'''
if filename.endswith('.txt'):
return 1
elif not True in [fnmatch.fnmatch(filename,p) for p in self.antipatterns]:
return -1 # we are a fallback option
def get_importer (self, filename):
return PlainTextImporter(filename=filename)
| gpl-2.0 |
GbalsaC/bitnamiP | lms/djangoapps/notes/models.py | 100 | 3156 | from django.db import models
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.core.exceptions import ValidationError
from django.utils.html import strip_tags
import json
from xmodule_django.models import CourseKeyField
class Note(models.Model):
user = models.ForeignKey(User, db_index=True)
course_id = CourseKeyField(max_length=255, db_index=True)
uri = models.CharField(max_length=255, db_index=True)
text = models.TextField(default="")
quote = models.TextField(default="")
range_start = models.CharField(max_length=2048) # xpath string
range_start_offset = models.IntegerField()
range_end = models.CharField(max_length=2048) # xpath string
range_end_offset = models.IntegerField()
tags = models.TextField(default="") # comma-separated string
created = models.DateTimeField(auto_now_add=True, null=True, db_index=True)
updated = models.DateTimeField(auto_now=True, db_index=True)
def clean(self, json_body):
"""
Cleans the note object or raises a ValidationError.
"""
if json_body is None:
raise ValidationError('Note must have a body.')
body = json.loads(json_body)
if not isinstance(body, dict):
raise ValidationError('Note body must be a dictionary.')
# NOTE: all three of these fields should be considered user input
# and may be output back to the user, so we need to sanitize them.
# These fields should only contain _plain text_.
self.uri = strip_tags(body.get('uri', ''))
self.text = strip_tags(body.get('text', ''))
self.quote = strip_tags(body.get('quote', ''))
ranges = body.get('ranges')
if ranges is None or len(ranges) != 1:
raise ValidationError('Note must contain exactly one range.')
self.range_start = ranges[0]['start']
self.range_start_offset = ranges[0]['startOffset']
self.range_end = ranges[0]['end']
self.range_end_offset = ranges[0]['endOffset']
self.tags = ""
tags = [strip_tags(tag) for tag in body.get('tags', [])]
if len(tags) > 0:
self.tags = ",".join(tags)
def get_absolute_url(self):
"""
Returns the absolute url for the note object.
"""
# pylint: disable=no-member
kwargs = {'course_id': self.course_id.to_deprecated_string(), 'note_id': str(self.pk)}
return reverse('notes_api_note', kwargs=kwargs)
def as_dict(self):
"""
Returns the note object as a dictionary.
"""
return {
'id': self.pk,
'user_id': self.user.pk,
'uri': self.uri,
'text': self.text,
'quote': self.quote,
'ranges': [{
'start': self.range_start,
'startOffset': self.range_start_offset,
'end': self.range_end,
'endOffset': self.range_end_offset
}],
'tags': self.tags.split(","),
'created': str(self.created),
'updated': str(self.updated)
}
| agpl-3.0 |
IllusionRom-deprecated/android_platform_external_chromium_org | tools/telemetry/telemetry/page/actions/click_element_unittest.py | 23 | 3183 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.core import util
from telemetry.page.actions import click_element
from telemetry.page.actions import wait
from telemetry.unittest import tab_test_case
class ClickElementActionTest(tab_test_case.TabTestCase):
def testClickWithSelectorWaitForNavigation(self):
self._browser.SetHTTPServerDirectories(util.GetUnittestDataDir())
self._tab.Navigate(
self._browser.http_server.UrlOf('page_with_link.html'))
self._tab.WaitForDocumentReadyStateToBeComplete()
self.assertEquals(
self._tab.EvaluateJavaScript('document.location.pathname;'),
'/page_with_link.html')
data = {'selector': 'a[id="clickme"]'}
i = click_element.ClickElementAction(data)
data = {'condition': 'href_change'}
j = wait.WaitAction(data)
j.RunAction(None, self._tab, i)
self.assertEquals(
self._tab.EvaluateJavaScript('document.location.pathname;'),
'/blank.html')
def testClickWithSingleQuoteSelectorWaitForNavigation(self):
self._browser.SetHTTPServerDirectories(util.GetUnittestDataDir())
self._tab.Navigate(
self._browser.http_server.UrlOf('page_with_link.html'))
self._tab.WaitForDocumentReadyStateToBeComplete()
self.assertEquals(
self._tab.EvaluateJavaScript('document.location.pathname;'),
'/page_with_link.html')
data = {'selector': 'a[id=\'clickme\']'}
i = click_element.ClickElementAction(data)
data = {'condition': 'href_change'}
j = wait.WaitAction(data)
j.RunAction(None, self._tab, i)
self.assertEquals(
self._tab.EvaluateJavaScript('document.location.pathname;'),
'/blank.html')
def testClickWithTextWaitForRefChange(self):
self._browser.SetHTTPServerDirectories(util.GetUnittestDataDir())
self._tab.Navigate(
self._browser.http_server.UrlOf('page_with_link.html'))
self._tab.WaitForDocumentReadyStateToBeComplete()
self.assertEquals(
self._tab.EvaluateJavaScript('document.location.pathname;'),
'/page_with_link.html')
data = {'text': 'Click me'}
i = click_element.ClickElementAction(data)
data = {'condition': 'href_change'}
j = wait.WaitAction(data)
j.RunAction(None, self._tab, i)
self.assertEquals(
self._tab.EvaluateJavaScript('document.location.pathname;'),
'/blank.html')
def testClickWithXPathWaitForRefChange(self):
self._browser.SetHTTPServerDirectories(util.GetUnittestDataDir())
self._tab.Navigate(
self._browser.http_server.UrlOf('page_with_link.html'))
self._tab.WaitForDocumentReadyStateToBeComplete()
self.assertEquals(
self._tab.EvaluateJavaScript('document.location.pathname;'),
'/page_with_link.html')
data = {'xpath': '//a[@id="clickme"]'}
i = click_element.ClickElementAction(data)
data = {'condition': 'href_change'}
j = wait.WaitAction(data)
j.RunAction(None, self._tab, i)
self.assertEquals(
self._tab.EvaluateJavaScript('document.location.pathname;'),
'/blank.html')
| bsd-3-clause |
vlegoff/tsunami | src/secondaires/crafting/commandes/__init__.py | 1 | 1676 | # -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant les commandes du module crafting."""
from secondaires.crafting.commandes import guilde
| bsd-3-clause |
airqj/ardupilot-raspilot | Tools/autotest/arducopter.py | 39 | 44394 | # fly ArduCopter in SITL
# Flight mode switch positions are set-up in arducopter.param to be
# switch 1 = Circle
# switch 2 = Land
# switch 3 = RTL
# switch 4 = Auto
# switch 5 = Loiter
# switch 6 = Stabilize
import util, pexpect, sys, time, math, shutil, os
from common import *
from pymavlink import mavutil, mavwp
import random
# get location of scripts
testdir=os.path.dirname(os.path.realpath(__file__))
FRAME='+'
TARGET='sitl'
HOME=mavutil.location(-35.362938,149.165085,584,270)
AVCHOME=mavutil.location(40.072842,-105.230575,1586,0)
homeloc = None
num_wp = 0
speedup_default = 5
def hover(mavproxy, mav, hover_throttle=1450):
mavproxy.send('rc 3 %u\n' % hover_throttle)
return True
def arm_motors(mavproxy, mav):
'''arm motors'''
print("Arming motors")
mavproxy.send('switch 6\n') # stabilize mode
wait_mode(mav, 'STABILIZE')
mavproxy.send('rc 3 1000\n')
mavproxy.send('rc 4 2000\n')
mavproxy.expect('APM: ARMING MOTORS')
mavproxy.send('rc 4 1500\n')
mav.motors_armed_wait()
print("MOTORS ARMED OK")
return True
def disarm_motors(mavproxy, mav):
'''disarm motors'''
print("Disarming motors")
mavproxy.send('switch 6\n') # stabilize mode
wait_mode(mav, 'STABILIZE')
mavproxy.send('rc 3 1000\n')
mavproxy.send('rc 4 1000\n')
mavproxy.expect('APM: DISARMING MOTORS')
mavproxy.send('rc 4 1500\n')
mav.motors_disarmed_wait()
print("MOTORS DISARMED OK")
return True
def takeoff(mavproxy, mav, alt_min = 30, takeoff_throttle=1700):
'''takeoff get to 30m altitude'''
mavproxy.send('switch 6\n') # stabilize mode
wait_mode(mav, 'STABILIZE')
mavproxy.send('rc 3 %u\n' % takeoff_throttle)
m = mav.recv_match(type='VFR_HUD', blocking=True)
if (m.alt < alt_min):
wait_altitude(mav, alt_min, (alt_min + 5))
hover(mavproxy, mav)
print("TAKEOFF COMPLETE")
return True
# loiter - fly south west, then hold loiter within 5m position and altitude
def loiter(mavproxy, mav, holdtime=10, maxaltchange=5, maxdistchange=5):
'''hold loiter position'''
mavproxy.send('switch 5\n') # loiter mode
wait_mode(mav, 'LOITER')
# first aim south east
print("turn south east")
mavproxy.send('rc 4 1580\n')
if not wait_heading(mav, 170):
return False
mavproxy.send('rc 4 1500\n')
#fly south east 50m
mavproxy.send('rc 2 1100\n')
if not wait_distance(mav, 50):
return False
mavproxy.send('rc 2 1500\n')
# wait for copter to slow moving
if not wait_groundspeed(mav, 0, 2):
return False
success = True
m = mav.recv_match(type='VFR_HUD', blocking=True)
start_altitude = m.alt
start = mav.location()
tstart = get_sim_time(mav)
tholdstart = get_sim_time(mav)
print("Holding loiter at %u meters for %u seconds" % (start_altitude, holdtime))
while get_sim_time(mav) < tstart + holdtime:
m = mav.recv_match(type='VFR_HUD', blocking=True)
pos = mav.location()
delta = get_distance(start, pos)
alt_delta = math.fabs(m.alt - start_altitude)
print("Loiter Dist: %.2fm, alt:%u" % (delta, m.alt))
if alt_delta > maxaltchange:
print("Loiter alt shifted %u meters (> limit of %u)" % (alt_delta, maxaltchange))
success = False
if delta > maxdistchange:
print("Loiter shifted %u meters (> limit of %u)" % (delta, maxdistchange))
success = False
if success:
print("Loiter OK for %u seconds" % holdtime)
else:
print("Loiter FAILED")
return success
def change_alt(mavproxy, mav, alt_min, climb_throttle=1920, descend_throttle=1080):
'''change altitude'''
m = mav.recv_match(type='VFR_HUD', blocking=True)
if(m.alt < alt_min):
print("Rise to alt:%u from %u" % (alt_min, m.alt))
mavproxy.send('rc 3 %u\n' % climb_throttle)
wait_altitude(mav, alt_min, (alt_min + 5))
else:
print("Lower to alt:%u from %u" % (alt_min, m.alt))
mavproxy.send('rc 3 %u\n' % descend_throttle)
wait_altitude(mav, (alt_min -5), alt_min)
hover(mavproxy, mav)
return True
# fly a square in stabilize mode
def fly_square(mavproxy, mav, side=50, timeout=300):
'''fly a square, flying N then E'''
tstart = get_sim_time(mav)
success = True
# ensure all sticks in the middle
mavproxy.send('rc 1 1500\n')
mavproxy.send('rc 2 1500\n')
mavproxy.send('rc 3 1500\n')
mavproxy.send('rc 4 1500\n')
# switch to loiter mode temporarily to stop us from rising
mavproxy.send('switch 5\n')
wait_mode(mav, 'LOITER')
# first aim north
print("turn right towards north")
mavproxy.send('rc 4 1580\n')
if not wait_heading(mav, 10):
print("Failed to reach heading")
success = False
mavproxy.send('rc 4 1500\n')
mav.recv_match(condition='RC_CHANNELS_RAW.chan4_raw==1500', blocking=True)
# save bottom left corner of box as waypoint
print("Save WP 1 & 2")
save_wp(mavproxy, mav)
# switch back to stabilize mode
mavproxy.send('rc 3 1430\n')
mavproxy.send('switch 6\n')
wait_mode(mav, 'STABILIZE')
# pitch forward to fly north
print("Going north %u meters" % side)
mavproxy.send('rc 2 1300\n')
if not wait_distance(mav, side):
print("Failed to reach distance of %u") % side
success = False
mavproxy.send('rc 2 1500\n')
# save top left corner of square as waypoint
print("Save WP 3")
save_wp(mavproxy, mav)
# roll right to fly east
print("Going east %u meters" % side)
mavproxy.send('rc 1 1700\n')
if not wait_distance(mav, side):
print("Failed to reach distance of %u") % side
success = False
mavproxy.send('rc 1 1500\n')
# save top right corner of square as waypoint
print("Save WP 4")
save_wp(mavproxy, mav)
# pitch back to fly south
print("Going south %u meters" % side)
mavproxy.send('rc 2 1700\n')
if not wait_distance(mav, side):
print("Failed to reach distance of %u") % side
success = False
mavproxy.send('rc 2 1500\n')
# save bottom right corner of square as waypoint
print("Save WP 5")
save_wp(mavproxy, mav)
# roll left to fly west
print("Going west %u meters" % side)
mavproxy.send('rc 1 1300\n')
if not wait_distance(mav, side):
print("Failed to reach distance of %u") % side
success = False
mavproxy.send('rc 1 1500\n')
# save bottom left corner of square (should be near home) as waypoint
print("Save WP 6")
save_wp(mavproxy, mav)
# descend to 10m
print("Descend to 10m in Loiter")
mavproxy.send('switch 5\n') # loiter mode
wait_mode(mav, 'LOITER')
mavproxy.send('rc 3 1300\n')
time_left = timeout - (get_sim_time(mav) - tstart)
print("timeleft = %u" % time_left)
if time_left < 20:
time_left = 20
if not wait_altitude(mav, -10, 10, time_left):
print("Failed to reach alt of 10m")
success = False
save_wp(mavproxy, mav)
return success
def fly_RTL(mavproxy, mav, side=60, timeout=250):
'''Return, land'''
print("# Enter RTL")
mavproxy.send('switch 3\n')
tstart = get_sim_time(mav)
while get_sim_time(mav) < tstart + timeout:
m = mav.recv_match(type='VFR_HUD', blocking=True)
pos = mav.location()
home_distance = get_distance(HOME, pos)
print("Alt: %u HomeDistance: %.0f" % (m.alt, home_distance))
if(m.alt <= 1 and home_distance < 10):
return True
return False
def fly_throttle_failsafe(mavproxy, mav, side=60, timeout=180):
'''Fly east, Failsafe, return, land'''
# switch to loiter mode temporarily to stop us from rising
mavproxy.send('switch 5\n')
wait_mode(mav, 'LOITER')
# first aim east
print("turn east")
mavproxy.send('rc 4 1580\n')
if not wait_heading(mav, 135):
return False
mavproxy.send('rc 4 1500\n')
# switch to stabilize mode
mavproxy.send('switch 6\n')
wait_mode(mav, 'STABILIZE')
hover(mavproxy, mav)
failed = False
# fly east 60 meters
print("# Going forward %u meters" % side)
mavproxy.send('rc 2 1350\n')
if not wait_distance(mav, side, 5, 60):
failed = True
mavproxy.send('rc 2 1500\n')
# pull throttle low
print("# Enter Failsafe")
mavproxy.send('rc 3 900\n')
tstart = get_sim_time(mav)
while get_sim_time(mav) < tstart + timeout:
m = mav.recv_match(type='VFR_HUD', blocking=True)
pos = mav.location()
home_distance = get_distance(HOME, pos)
print("Alt: %u HomeDistance: %.0f" % (m.alt, home_distance))
# check if we've reached home
if m.alt <= 1 and home_distance < 10:
# reduce throttle
mavproxy.send('rc 3 1100\n')
# switch back to stabilize
mavproxy.send('switch 2\n') # land mode
wait_mode(mav, 'LAND')
mavproxy.send('switch 6\n') # stabilize mode
wait_mode(mav, 'STABILIZE')
print("Reached failsafe home OK")
return True
print("Failed to land on failsafe RTL - timed out after %u seconds" % timeout)
# reduce throttle
mavproxy.send('rc 3 1100\n')
# switch back to stabilize mode
mavproxy.send('switch 2\n') # land mode
wait_mode(mav, 'LAND')
mavproxy.send('switch 6\n') # stabilize mode
wait_mode(mav, 'STABILIZE')
return False
def fly_battery_failsafe(mavproxy, mav, timeout=30):
# assume failure
success = False
# switch to loiter mode so that we hold position
mavproxy.send('switch 5\n')
wait_mode(mav, 'LOITER')
mavproxy.send("rc 3 1500\n")
# enable battery failsafe
mavproxy.send("param set FS_BATT_ENABLE 1\n")
# trigger low voltage
mavproxy.send('param set SIM_BATT_VOLTAGE 10\n')
# wait for LAND mode
new_mode = wait_mode(mav, 'LAND')
if new_mode == 'LAND':
success = True
# disable battery failsafe
mavproxy.send('param set FS_BATT_ENABLE 0\n')
# return status
if success:
print("Successfully entered LAND mode after battery failsafe")
else:
print("Failed to enter LAND mode after battery failsafe")
return success
# fly_stability_patch - fly south, then hold loiter within 5m position and altitude and reduce 1 motor to 60% efficiency
def fly_stability_patch(mavproxy, mav, holdtime=30, maxaltchange=5, maxdistchange=10):
'''hold loiter position'''
mavproxy.send('switch 5\n') # loiter mode
wait_mode(mav, 'LOITER')
# first south
print("turn south")
mavproxy.send('rc 4 1580\n')
if not wait_heading(mav, 180):
return False
mavproxy.send('rc 4 1500\n')
#fly west 80m
mavproxy.send('rc 2 1100\n')
if not wait_distance(mav, 80):
return False
mavproxy.send('rc 2 1500\n')
# wait for copter to slow moving
if not wait_groundspeed(mav, 0, 2):
return False
success = True
m = mav.recv_match(type='VFR_HUD', blocking=True)
start_altitude = m.alt
start = mav.location()
tstart = get_sim_time(mav)
tholdstart = get_sim_time(mav)
print("Holding loiter at %u meters for %u seconds" % (start_altitude, holdtime))
# cut motor 1 to 55% efficiency
print("Cutting motor 1 to 55% efficiency")
mavproxy.send('param set SIM_ENGINE_MUL 0.55\n')
while get_sim_time(mav) < tstart + holdtime:
m = mav.recv_match(type='VFR_HUD', blocking=True)
pos = mav.location()
delta = get_distance(start, pos)
alt_delta = math.fabs(m.alt - start_altitude)
print("Loiter Dist: %.2fm, alt:%u" % (delta, m.alt))
if alt_delta > maxaltchange:
print("Loiter alt shifted %u meters (> limit of %u)" % (alt_delta, maxaltchange))
success = False
if delta > maxdistchange:
print("Loiter shifted %u meters (> limit of %u)" % (delta, maxdistchange))
success = False
# restore motor 1 to 100% efficiency
mavproxy.send('param set SIM_ENGINE_MUL 1.0\n')
if success:
print("Stability patch and Loiter OK for %u seconds" % holdtime)
else:
print("Stability Patch FAILED")
return success
# fly_fence_test - fly east until you hit the horizontal circular fence
def fly_fence_test(mavproxy, mav, timeout=180):
'''hold loiter position'''
mavproxy.send('switch 5\n') # loiter mode
wait_mode(mav, 'LOITER')
# enable fence
mavproxy.send('param set FENCE_ENABLE 1\n')
# first east
print("turn east")
mavproxy.send('rc 4 1580\n')
if not wait_heading(mav, 160):
return False
mavproxy.send('rc 4 1500\n')
# fly forward (east) at least 20m
pitching_forward = True
mavproxy.send('rc 2 1100\n')
if not wait_distance(mav, 20):
return False
# start timer
tstart = get_sim_time(mav)
while get_sim_time(mav) < tstart + timeout:
m = mav.recv_match(type='VFR_HUD', blocking=True)
pos = mav.location()
home_distance = get_distance(HOME, pos)
print("Alt: %u HomeDistance: %.0f" % (m.alt, home_distance))
# recenter pitch sticks once we reach home so we don't fly off again
if pitching_forward and home_distance < 10 :
pitching_forward = False
mavproxy.send('rc 2 1500\n')
# disable fence
mavproxy.send('param set FENCE_ENABLE 0\n')
if m.alt <= 1 and home_distance < 10:
# reduce throttle
mavproxy.send('rc 3 1000\n')
# switch mode to stabilize
mavproxy.send('switch 2\n') # land mode
wait_mode(mav, 'LAND')
mavproxy.send('switch 6\n') # stabilize mode
wait_mode(mav, 'STABILIZE')
print("Reached home OK")
return True
# disable fence
mavproxy.send('param set FENCE_ENABLE 0\n')
# reduce throttle
mavproxy.send('rc 3 1000\n')
# switch mode to stabilize
mavproxy.send('switch 2\n') # land mode
wait_mode(mav, 'LAND')
mavproxy.send('switch 6\n') # stabilize mode
wait_mode(mav, 'STABILIZE')
print("Fence test failed to reach home - timed out after %u seconds" % timeout)
return False
def show_gps_and_sim_positions(mavproxy, on_off):
if on_off == True:
# turn on simulator display of gps and actual position
mavproxy.send('map set showgpspos 1\n')
mavproxy.send('map set showsimpos 1\n')
else:
# turn off simulator display of gps and actual position
mavproxy.send('map set showgpspos 0\n')
mavproxy.send('map set showsimpos 0\n')
# fly_gps_glitch_loiter_test - fly south east in loiter and test reaction to gps glitch
def fly_gps_glitch_loiter_test(mavproxy, mav, timeout=30, max_distance=20):
'''hold loiter position'''
mavproxy.send('switch 5\n') # loiter mode
wait_mode(mav, 'LOITER')
# turn on simulator display of gps and actual position
show_gps_and_sim_positions(mavproxy, True)
# set-up gps glitch array
glitch_lat = [0.0002996,0.0006958,0.0009431,0.0009991,0.0009444,0.0007716,0.0006221]
glitch_lon = [0.0000717,0.0000912,0.0002761,0.0002626,0.0002807,0.0002049,0.0001304]
glitch_num = len(glitch_lat)
print("GPS Glitches:")
for i in range(1,glitch_num):
print("glitch %d %.7f %.7f" % (i,glitch_lat[i],glitch_lon[i]))
# turn south east
print("turn south east")
mavproxy.send('rc 4 1580\n')
if not wait_heading(mav, 150):
show_gps_and_sim_positions(mavproxy, False)
return False
mavproxy.send('rc 4 1500\n')
# fly forward (south east) at least 60m
mavproxy.send('rc 2 1100\n')
if not wait_distance(mav, 60):
show_gps_and_sim_positions(mavproxy, False)
return False
mavproxy.send('rc 2 1500\n')
# wait for copter to slow down
if not wait_groundspeed(mav, 0, 1):
show_gps_and_sim_positions(mavproxy, False)
return False
# record time and position
tstart = get_sim_time(mav)
tnow = tstart
start_pos = sim_location(mav)
success = True
# initialise current glitch
glitch_current = 0;
print("Apply first glitch")
mavproxy.send('param set SIM_GPS_GLITCH_X %.7f\n' % glitch_lat[glitch_current])
mavproxy.send('param set SIM_GPS_GLITCH_Y %.7f\n' % glitch_lon[glitch_current])
# record position for 30 seconds
while tnow < tstart + timeout:
tnow = get_sim_time(mav)
desired_glitch_num = int((tnow - tstart) * 2.2)
if desired_glitch_num > glitch_current and glitch_current != -1:
glitch_current = desired_glitch_num
# turn off glitching if we've reached the end of the glitch list
if glitch_current >= glitch_num:
glitch_current = -1
print("Completed Glitches")
mavproxy.send('param set SIM_GPS_GLITCH_X 0\n')
mavproxy.send('param set SIM_GPS_GLITCH_Y 0\n')
else:
print("Applying glitch %u" % glitch_current)
#move onto the next glitch
mavproxy.send('param set SIM_GPS_GLITCH_X %.7f\n' % glitch_lat[glitch_current])
mavproxy.send('param set SIM_GPS_GLITCH_Y %.7f\n' % glitch_lon[glitch_current])
# start displaying distance moved after all glitches applied
if (glitch_current == -1):
m = mav.recv_match(type='VFR_HUD', blocking=True)
curr_pos = sim_location(mav)
moved_distance = get_distance(curr_pos, start_pos)
print("Alt: %u Moved: %.0f" % (m.alt, moved_distance))
if moved_distance > max_distance:
print("Moved over %u meters, Failed!" % max_distance)
success = False
# disable gps glitch
if glitch_current != -1:
glitch_current = -1
mavproxy.send('param set SIM_GPS_GLITCH_X 0\n')
mavproxy.send('param set SIM_GPS_GLITCH_Y 0\n')
show_gps_and_sim_positions(mavproxy, False)
if success:
print("GPS glitch test passed! stayed within %u meters for %u seconds" % (max_distance, timeout))
else:
print("GPS glitch test FAILED!")
return success
# fly_gps_glitch_auto_test - fly mission and test reaction to gps glitch
def fly_gps_glitch_auto_test(mavproxy, mav, timeout=30, max_distance=100):
# set-up gps glitch array
glitch_lat = [0.0002996,0.0006958,0.0009431,0.0009991,0.0009444,0.0007716,0.0006221]
glitch_lon = [0.0000717,0.0000912,0.0002761,0.0002626,0.0002807,0.0002049,0.0001304]
glitch_num = len(glitch_lat)
print("GPS Glitches:")
for i in range(1,glitch_num):
print("glitch %d %.7f %.7f" % (i,glitch_lat[i],glitch_lon[i]))
# Fly mission #1
print("# Load copter_glitch_mission")
if not load_mission_from_file(mavproxy, mav, os.path.join(testdir, "copter_glitch_mission.txt")):
print("load copter_glitch_mission failed")
return False
# turn on simulator display of gps and actual position
show_gps_and_sim_positions(mavproxy, True)
# load the waypoint count
global homeloc
global num_wp
print("test: Fly a mission from 1 to %u" % num_wp)
mavproxy.send('wp set 1\n')
# switch into AUTO mode and raise throttle
mavproxy.send('switch 4\n') # auto mode
wait_mode(mav, 'AUTO')
mavproxy.send('rc 3 1500\n')
# wait until 100m from home
if not wait_distance(mav, 100, 5, 60):
show_gps_and_sim_positions(mavproxy, False)
return False
# record time and position
tstart = get_sim_time(mav)
tnow = tstart
start_pos = sim_location(mav)
# initialise current glitch
glitch_current = 0;
print("Apply first glitch")
mavproxy.send('param set SIM_GPS_GLITCH_X %.7f\n' % glitch_lat[glitch_current])
mavproxy.send('param set SIM_GPS_GLITCH_Y %.7f\n' % glitch_lon[glitch_current])
# record position for 30 seconds
while glitch_current < glitch_num:
tnow = get_sim_time(mav)
desired_glitch_num = int((tnow - tstart) * 2)
if desired_glitch_num > glitch_current and glitch_current != -1:
glitch_current = desired_glitch_num
# apply next glitch
if glitch_current < glitch_num:
print("Applying glitch %u" % glitch_current)
mavproxy.send('param set SIM_GPS_GLITCH_X %.7f\n' % glitch_lat[glitch_current])
mavproxy.send('param set SIM_GPS_GLITCH_Y %.7f\n' % glitch_lon[glitch_current])
# turn off glitching
print("Completed Glitches")
mavproxy.send('param set SIM_GPS_GLITCH_X 0\n')
mavproxy.send('param set SIM_GPS_GLITCH_Y 0\n')
# continue with the mission
ret = wait_waypoint(mav, 0, num_wp-1, timeout=500, mode='AUTO')
# wait for arrival back home
m = mav.recv_match(type='VFR_HUD', blocking=True)
pos = mav.location()
dist_to_home = get_distance(HOME, pos)
while dist_to_home > 5:
m = mav.recv_match(type='VFR_HUD', blocking=True)
pos = mav.location()
dist_to_home = get_distance(HOME, pos)
print("Dist from home: %u" % dist_to_home)
# turn off simulator display of gps and actual position
show_gps_and_sim_positions(mavproxy, False)
print("GPS Glitch test Auto completed: passed=%s" % ret)
return ret
#fly_simple - assumes the simple bearing is initialised to be directly north
# flies a box with 100m west, 15 seconds north, 50 seconds east, 15 seconds south
def fly_simple(mavproxy, mav, side=50, timeout=120):
failed = False
# hold position in loiter
mavproxy.send('switch 5\n') # loiter mode
wait_mode(mav, 'LOITER')
#set SIMPLE mode for all flight modes
mavproxy.send('param set SIMPLE 63\n')
# switch to stabilize mode
mavproxy.send('switch 6\n')
wait_mode(mav, 'STABILIZE')
mavproxy.send('rc 3 1430\n')
# fly south 50m
print("# Flying south %u meters" % side)
mavproxy.send('rc 1 1300\n')
if not wait_distance(mav, side, 5, 60):
failed = True
mavproxy.send('rc 1 1500\n')
# fly west 8 seconds
print("# Flying west for 8 seconds")
mavproxy.send('rc 2 1300\n')
tstart = get_sim_time(mav)
while get_sim_time(mav) < (tstart + 8):
m = mav.recv_match(type='VFR_HUD', blocking=True)
delta = (get_sim_time(mav) - tstart)
#print("%u" % delta)
mavproxy.send('rc 2 1500\n')
# fly north 25 meters
print("# Flying north %u meters" % (side/2.0))
mavproxy.send('rc 1 1700\n')
if not wait_distance(mav, side/2, 5, 60):
failed = True
mavproxy.send('rc 1 1500\n')
# fly east 8 seconds
print("# Flying east for 8 seconds")
mavproxy.send('rc 2 1700\n')
tstart = get_sim_time(mav)
while get_sim_time(mav) < (tstart + 8):
m = mav.recv_match(type='VFR_HUD', blocking=True)
delta = (get_sim_time(mav) - tstart)
#print("%u" % delta)
mavproxy.send('rc 2 1500\n')
#restore to default
mavproxy.send('param set SIMPLE 0\n')
#hover in place
hover(mavproxy, mav)
return not failed
#fly_super_simple - flies a circle around home for 45 seconds
def fly_super_simple(mavproxy, mav, timeout=45):
failed = False
# hold position in loiter
mavproxy.send('switch 5\n') # loiter mode
wait_mode(mav, 'LOITER')
# fly forward 20m
print("# Flying forward 20 meters")
mavproxy.send('rc 2 1300\n')
if not wait_distance(mav, 20, 5, 60):
failed = True
mavproxy.send('rc 2 1500\n')
#set SUPER SIMPLE mode for all flight modes
mavproxy.send('param set SUPER_SIMPLE 63\n')
# switch to stabilize mode
mavproxy.send('switch 6\n')
wait_mode(mav, 'STABILIZE')
mavproxy.send('rc 3 1430\n')
# start copter yawing slowly
mavproxy.send('rc 4 1550\n')
# roll left for timeout seconds
print("# rolling left from pilot's point of view for %u seconds" % timeout)
mavproxy.send('rc 1 1300\n')
tstart = get_sim_time(mav)
while get_sim_time(mav) < (tstart + timeout):
m = mav.recv_match(type='VFR_HUD', blocking=True)
delta = (get_sim_time(mav) - tstart)
# stop rolling and yawing
mavproxy.send('rc 1 1500\n')
mavproxy.send('rc 4 1500\n')
#restore simple mode parameters to default
mavproxy.send('param set SUPER_SIMPLE 0\n')
#hover in place
hover(mavproxy, mav)
return not failed
#fly_circle - flies a circle with 20m radius
def fly_circle(mavproxy, mav, maxaltchange=10, holdtime=36):
# hold position in loiter
mavproxy.send('switch 5\n') # loiter mode
wait_mode(mav, 'LOITER')
# face west
print("turn west")
mavproxy.send('rc 4 1580\n')
if not wait_heading(mav, 270):
return False
mavproxy.send('rc 4 1500\n')
#set CIRCLE radius
mavproxy.send('param set CIRCLE_RADIUS 3000\n')
# fly forward (east) at least 100m
mavproxy.send('rc 2 1100\n')
if not wait_distance(mav, 100):
return False
# return pitch stick back to middle
mavproxy.send('rc 2 1500\n')
# set CIRCLE mode
mavproxy.send('switch 1\n') # circle mode
wait_mode(mav, 'CIRCLE')
# wait
m = mav.recv_match(type='VFR_HUD', blocking=True)
start_altitude = m.alt
tstart = get_sim_time(mav)
tholdstart = get_sim_time(mav)
print("Circle at %u meters for %u seconds" % (start_altitude, holdtime))
while get_sim_time(mav) < tstart + holdtime:
m = mav.recv_match(type='VFR_HUD', blocking=True)
print("heading %u" % m.heading)
print("CIRCLE OK for %u seconds" % holdtime)
return True
# fly_auto_test - fly mission which tests a significant number of commands
def fly_auto_test(mavproxy, mav):
# Fly mission #1
print("# Load copter_mission")
if not load_mission_from_file(mavproxy, mav, os.path.join(testdir, "copter_mission.txt")):
print("load copter_mission failed")
return False
# load the waypoint count
global homeloc
global num_wp
print("test: Fly a mission from 1 to %u" % num_wp)
mavproxy.send('wp set 1\n')
# switch into AUTO mode and raise throttle
mavproxy.send('switch 4\n') # auto mode
wait_mode(mav, 'AUTO')
mavproxy.send('rc 3 1500\n')
# fly the mission
ret = wait_waypoint(mav, 0, num_wp-1, timeout=500, mode='AUTO')
# set throttle to minimum
mavproxy.send('rc 3 1000\n')
# wait for disarm
mav.motors_disarmed_wait()
print("MOTORS DISARMED OK")
print("Auto mission completed: passed=%s" % ret)
return ret
# fly_avc_test - fly AVC mission
def fly_avc_test(mavproxy, mav):
# upload mission from file
print("# Load copter_AVC2013_mission")
if not load_mission_from_file(mavproxy, mav, os.path.join(testdir, "copter_AVC2013_mission.txt")):
print("load copter_AVC2013_mission failed")
return False
# load the waypoint count
global homeloc
global num_wp
print("Fly AVC mission from 1 to %u" % num_wp)
mavproxy.send('wp set 1\n')
# switch into AUTO mode and raise throttle
mavproxy.send('switch 4\n') # auto mode
wait_mode(mav, 'AUTO')
mavproxy.send('rc 3 1500\n')
# fly the mission
ret = wait_waypoint(mav, 0, num_wp-1, timeout=500, mode='AUTO')
# set throttle to minimum
mavproxy.send('rc 3 1000\n')
# wait for disarm
mav.motors_disarmed_wait()
print("MOTORS DISARMED OK")
print("AVC mission completed: passed=%s" % ret)
return ret
def land(mavproxy, mav, timeout=60):
'''land the quad'''
print("STARTING LANDING")
mavproxy.send('switch 2\n') # land mode
wait_mode(mav, 'LAND')
print("Entered Landing Mode")
ret = wait_altitude(mav, -5, 1)
print("LANDING: ok= %s" % ret)
return ret
def fly_mission(mavproxy, mav, height_accuracy=-1, target_altitude=None):
'''fly a mission from a file'''
global homeloc
global num_wp
print("test: Fly a mission from 1 to %u" % num_wp)
mavproxy.send('wp set 1\n')
mavproxy.send('switch 4\n') # auto mode
wait_mode(mav, 'AUTO')
ret = wait_waypoint(mav, 0, num_wp-1, timeout=500, mode='AUTO')
expect_msg = "Reached Command #%u" % (num_wp-1)
if (ret):
mavproxy.expect(expect_msg)
print("test: MISSION COMPLETE: passed=%s" % ret)
# wait here until ready
mavproxy.send('switch 5\n') # loiter mode
wait_mode(mav, 'LOITER')
return ret
def load_mission_from_file(mavproxy, mav, filename):
'''Load a mission from a file to flight controller'''
global num_wp
mavproxy.send('wp load %s\n' % filename)
mavproxy.expect('flight plan received')
mavproxy.send('wp list\n')
mavproxy.expect('Requesting [0-9]+ waypoints')
# update num_wp
wploader = mavwp.MAVWPLoader()
wploader.load(filename)
num_wp = wploader.count()
return True
def save_mission_to_file(mavproxy, mav, filename):
global num_wp
mavproxy.send('wp save %s\n' % filename)
mavproxy.expect('Saved ([0-9]+) waypoints')
num_wp = int(mavproxy.match.group(1))
print("num_wp: %d" % num_wp)
return True
def setup_rc(mavproxy):
'''setup RC override control'''
for chan in range(1,9):
mavproxy.send('rc %u 1500\n' % chan)
# zero throttle
mavproxy.send('rc 3 1000\n')
def fly_ArduCopter(viewerip=None, map=False):
'''fly ArduCopter in SIL
you can pass viewerip as an IP address to optionally send fg and
mavproxy packets too for local viewing of the flight in real time
'''
global homeloc
if TARGET != 'sitl':
util.build_SIL('ArduCopter', target=TARGET)
home = "%f,%f,%u,%u" % (HOME.lat, HOME.lng, HOME.alt, HOME.heading)
sil = util.start_SIL('ArduCopter', wipe=True, model='+', home=home, speedup=speedup_default)
mavproxy = util.start_MAVProxy_SIL('ArduCopter', options='--sitl=127.0.0.1:5501 --out=127.0.0.1:19550 --quadcopter')
mavproxy.expect('Received [0-9]+ parameters')
# setup test parameters
mavproxy.send("param load %s/copter_params.parm\n" % testdir)
mavproxy.expect('Loaded [0-9]+ parameters')
# reboot with new parameters
util.pexpect_close(mavproxy)
util.pexpect_close(sil)
sil = util.start_SIL('ArduCopter', model='+', home=home, speedup=speedup_default)
options = '--sitl=127.0.0.1:5501 --out=127.0.0.1:19550 --quadcopter --streamrate=5'
if viewerip:
options += ' --out=%s:14550' % viewerip
if map:
options += ' --map'
mavproxy = util.start_MAVProxy_SIL('ArduCopter', options=options)
mavproxy.expect('Telemetry log: (\S+)')
logfile = mavproxy.match.group(1)
print("LOGFILE %s" % logfile)
buildlog = util.reltopdir("../buildlogs/ArduCopter-test.tlog")
print("buildlog=%s" % buildlog)
copyTLog = False
if os.path.exists(buildlog):
os.unlink(buildlog)
try:
os.link(logfile, buildlog)
except Exception:
print( "WARN: Failed to create symlink: " + logfile + " => " + buildlog + ", Will copy tlog manually to target location" )
copyTLog = True
# the received parameters can come before or after the ready to fly message
mavproxy.expect(['Received [0-9]+ parameters', 'Ready to FLY'])
mavproxy.expect(['Received [0-9]+ parameters', 'Ready to FLY'])
util.expect_setup_callback(mavproxy, expect_callback)
expect_list_clear()
expect_list_extend([sil, mavproxy])
# get a mavlink connection going
try:
mav = mavutil.mavlink_connection('127.0.0.1:19550', robust_parsing=True)
except Exception, msg:
print("Failed to start mavlink connection on 127.0.0.1:19550" % msg)
raise
mav.message_hooks.append(message_hook)
mav.idle_hooks.append(idle_hook)
failed = False
failed_test_msg = "None"
try:
mav.wait_heartbeat()
setup_rc(mavproxy)
homeloc = mav.location()
# wait 10sec to allow EKF to settle
wait_seconds(mav, 10)
# Arm
print("# Arm motors")
if not arm_motors(mavproxy, mav):
failed_test_msg = "arm_motors failed"
print(failed_test_msg)
failed = True
print("# Takeoff")
if not takeoff(mavproxy, mav, 10):
failed_test_msg = "takeoff failed"
print(failed_test_msg)
failed = True
# Fly a square in Stabilize mode
print("#")
print("########## Fly a square and save WPs with CH7 switch ##########")
print("#")
if not fly_square(mavproxy, mav):
failed_test_msg = "fly_square failed"
print(failed_test_msg)
failed = True
# save the stored mission to file
print("# Save out the CH7 mission to file")
if not save_mission_to_file(mavproxy, mav, os.path.join(testdir, "ch7_mission.txt")):
failed_test_msg = "save_mission_to_file failed"
print(failed_test_msg)
failed = True
# fly the stored mission
print("# Fly CH7 saved mission")
if not fly_mission(mavproxy, mav,height_accuracy = 0.5, target_altitude=10):
failed_test_msg = "fly ch7_mission failed"
print(failed_test_msg)
failed = True
# Throttle Failsafe
print("#")
print("########## Test Failsafe ##########")
print("#")
if not fly_throttle_failsafe(mavproxy, mav):
failed_test_msg = "fly_throttle_failsafe failed"
print(failed_test_msg)
failed = True
# Takeoff
print("# Takeoff")
if not takeoff(mavproxy, mav, 10):
failed_test_msg = "takeoff failed"
print(failed_test_msg)
failed = True
# Battery failsafe
if not fly_battery_failsafe(mavproxy, mav):
failed_test_msg = "fly_battery_failsafe failed"
print(failed_test_msg)
failed = True
# Takeoff
print("# Takeoff")
if not takeoff(mavproxy, mav, 10):
failed_test_msg = "takeoff failed"
print(failed_test_msg)
failed = True
# Stability patch
print("#")
print("########## Test Stability Patch ##########")
print("#")
if not fly_stability_patch(mavproxy, mav, 30):
failed_test_msg = "fly_stability_patch failed"
print(failed_test_msg)
failed = True
# RTL
print("# RTL #")
if not fly_RTL(mavproxy, mav):
failed_test_msg = "fly_RTL after stab patch failed"
print(failed_test_msg)
failed = True
# Takeoff
print("# Takeoff")
if not takeoff(mavproxy, mav, 10):
failed_test_msg = "takeoff failed"
print(failed_test_msg)
failed = True
# Fence test
print("#")
print("########## Test Horizontal Fence ##########")
print("#")
if not fly_fence_test(mavproxy, mav, 180):
failed_test_msg = "fly_fence_test failed"
print(failed_test_msg)
failed = True
# Takeoff
print("# Takeoff")
if not takeoff(mavproxy, mav, 10):
failed_test_msg = "takeoff failed"
print(failed_test_msg)
failed = True
# Fly GPS Glitch Loiter test
print("# GPS Glitch Loiter Test")
if not fly_gps_glitch_loiter_test(mavproxy, mav):
failed_test_msg = "fly_gps_glitch_loiter_test failed"
print(failed_test_msg)
failed = True
# RTL after GPS Glitch Loiter test
print("# RTL #")
if not fly_RTL(mavproxy, mav):
failed_test_msg = "fly_RTL failed"
print(failed_test_msg)
failed = True
# Fly GPS Glitch test in auto mode
print("# GPS Glitch Auto Test")
if not fly_gps_glitch_auto_test(mavproxy, mav):
failed_test_msg = "fly_gps_glitch_auto_test failed"
print(failed_test_msg)
failed = True
# take-off ahead of next test
print("# Takeoff")
if not takeoff(mavproxy, mav, 10):
failed_test_msg = "takeoff failed"
print(failed_test_msg)
failed = True
# Loiter for 10 seconds
print("#")
print("########## Test Loiter for 10 seconds ##########")
print("#")
if not loiter(mavproxy, mav):
failed_test_msg = "loiter failed"
print(failed_test_msg)
failed = True
# Loiter Climb
print("#")
print("# Loiter - climb to 30m")
print("#")
if not change_alt(mavproxy, mav, 30):
failed_test_msg = "change_alt climb failed"
print(failed_test_msg)
failed = True
# Loiter Descend
print("#")
print("# Loiter - descend to 20m")
print("#")
if not change_alt(mavproxy, mav, 20):
failed_test_msg = "change_alt descend failed"
print(failed_test_msg)
failed = True
# RTL
print("#")
print("########## Test RTL ##########")
print("#")
if not fly_RTL(mavproxy, mav):
failed_test_msg = "fly_RTL after Loiter climb/descend failed"
print(failed_test_msg)
failed = True
# Takeoff
print("# Takeoff")
if not takeoff(mavproxy, mav, 10):
failed_test_msg = "takeoff failed"
print(failed_test_msg)
failed = True
# Simple mode
print("# Fly in SIMPLE mode")
if not fly_simple(mavproxy, mav):
failed_test_msg = "fly_simple failed"
print(failed_test_msg)
failed = True
# RTL
print("#")
print("########## Test RTL ##########")
print("#")
if not fly_RTL(mavproxy, mav):
failed_test_msg = "fly_RTL after simple mode failed"
print(failed_test_msg)
failed = True
# Takeoff
print("# Takeoff")
if not takeoff(mavproxy, mav, 10):
failed_test_msg = "takeoff failed"
print(failed_test_msg)
failed = True
# Fly a circle in super simple mode
print("# Fly a circle in SUPER SIMPLE mode")
if not fly_super_simple(mavproxy, mav):
failed_test_msg = "fly_super_simple failed"
print(failed_test_msg)
failed = True
# RTL
print("# RTL #")
if not fly_RTL(mavproxy, mav):
failed_test_msg = "fly_RTL after super simple mode failed"
print(failed_test_msg)
failed = True
# Takeoff
print("# Takeoff")
if not takeoff(mavproxy, mav, 10):
failed_test_msg = "takeoff failed"
print(failed_test_msg)
failed = True
# Circle mode
print("# Fly CIRCLE mode")
if not fly_circle(mavproxy, mav):
failed_test_msg = "fly_circle failed"
print(failed_test_msg)
failed = True
# RTL
print("#")
print("########## Test RTL ##########")
print("#")
if not fly_RTL(mavproxy, mav):
failed_test_msg = "fly_RTL after circle failed"
print(failed_test_msg)
failed = True
print("# Fly copter mission")
if not fly_auto_test(mavproxy, mav):
failed_test_msg = "fly_auto_test failed"
print(failed_test_msg)
failed = True
else:
print("Flew copter mission OK")
# wait for disarm
mav.motors_disarmed_wait()
if not log_download(mavproxy, mav, util.reltopdir("../buildlogs/ArduCopter-log.bin")):
failed_test_msg = "log_download failed"
print(failed_test_msg)
failed = True
except pexpect.TIMEOUT, failed_test_msg:
failed_test_msg = "Timeout"
failed = True
mav.close()
util.pexpect_close(mavproxy)
util.pexpect_close(sil)
if os.path.exists('ArduCopter-valgrind.log'):
os.chmod('ArduCopter-valgrind.log', 0644)
shutil.copy("ArduCopter-valgrind.log", util.reltopdir("../buildlogs/ArduCopter-valgrind.log"))
# [2014/05/07] FC Because I'm doing a cross machine build (source is on host, build is on guest VM) I cannot hard link
# This flag tells me that I need to copy the data out
if copyTLog:
shutil.copy(logfile, buildlog)
if failed:
print("FAILED: %s" % failed_test_msg)
return False
return True
def fly_CopterAVC(viewerip=None, map=False):
'''fly ArduCopter in SIL for AVC2013 mission
'''
global homeloc
if TARGET != 'sitl':
util.build_SIL('ArduCopter', target=TARGET)
home = "%f,%f,%u,%u" % (AVCHOME.lat, AVCHOME.lng, AVCHOME.alt, AVCHOME.heading)
sil = util.start_SIL('ArduCopter', wipe=True, model='+', home=home, speedup=speedup_default)
mavproxy = util.start_MAVProxy_SIL('ArduCopter', options='--sitl=127.0.0.1:5501 --out=127.0.0.1:19550 --quadcopter')
mavproxy.expect('Received [0-9]+ parameters')
# setup test parameters
mavproxy.send("param load %s/copter_AVC2013_params.parm\n" % testdir)
mavproxy.expect('Loaded [0-9]+ parameters')
# reboot with new parameters
util.pexpect_close(mavproxy)
util.pexpect_close(sil)
sil = util.start_SIL('ArduCopter', model='+', home=home, speedup=speedup_default)
options = '--sitl=127.0.0.1:5501 --out=127.0.0.1:19550 --quadcopter --streamrate=5'
if viewerip:
options += ' --out=%s:14550' % viewerip
if map:
options += ' --map'
mavproxy = util.start_MAVProxy_SIL('ArduCopter', options=options)
mavproxy.expect('Telemetry log: (\S+)')
logfile = mavproxy.match.group(1)
print("LOGFILE %s" % logfile)
buildlog = util.reltopdir("../buildlogs/CopterAVC-test.tlog")
print("buildlog=%s" % buildlog)
if os.path.exists(buildlog):
os.unlink(buildlog)
try:
os.link(logfile, buildlog)
except Exception:
pass
# the received parameters can come before or after the ready to fly message
mavproxy.expect(['Received [0-9]+ parameters', 'Ready to FLY'])
mavproxy.expect(['Received [0-9]+ parameters', 'Ready to FLY'])
util.expect_setup_callback(mavproxy, expect_callback)
expect_list_clear()
expect_list_extend([sil, mavproxy])
if map:
mavproxy.send('map icon 40.072467969730496 -105.2314389590174\n')
mavproxy.send('map icon 40.072600990533829 -105.23146100342274\n')
# get a mavlink connection going
try:
mav = mavutil.mavlink_connection('127.0.0.1:19550', robust_parsing=True)
except Exception, msg:
print("Failed to start mavlink connection on 127.0.0.1:19550" % msg)
raise
mav.message_hooks.append(message_hook)
mav.idle_hooks.append(idle_hook)
failed = False
failed_test_msg = "None"
try:
mav.wait_heartbeat()
setup_rc(mavproxy)
homeloc = mav.location()
# wait 10sec to allow EKF to settle
wait_seconds(mav, 10)
# Arm
print("# Arm motors")
if not arm_motors(mavproxy, mav):
failed_test_msg = "arm_motors failed"
print(failed_test_msg)
failed = True
print("# Fly AVC mission")
if not fly_avc_test(mavproxy, mav):
failed_test_msg = "fly_avc_test failed"
print(failed_test_msg)
failed = True
else:
print("Flew AVC mission OK")
#mission includes disarm at end so should be ok to download logs now
if not log_download(mavproxy, mav, util.reltopdir("../buildlogs/CopterAVC-log.bin")):
failed_test_msg = "log_download failed"
print(failed_test_msg)
failed = True
except pexpect.TIMEOUT, failed_test_msg:
failed_test_msg = "Timeout"
failed = True
mav.close()
util.pexpect_close(mavproxy)
util.pexpect_close(sil)
if failed:
print("FAILED: %s" % failed_test_msg)
return False
return True
| gpl-3.0 |
pap/nupic | tests/unit/nupic/support/consoleprinter_test/consoleprinter_test.py | 34 | 2887 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import os
import unittest2 as unittest
from nupic.support.consoleprinter import ConsolePrinterMixin, Tee
# Class used for testing
class MyClass(ConsolePrinterMixin):
def __init__(self):
ConsolePrinterMixin.__init__(self)
def run(self):
for i in xrange(0, 4):
self.cPrint(i, "message at level %d", i)
class ConsolePrinterTest(unittest.TestCase):
def testPrint(self):
mydir = os.path.dirname(os.path.abspath(__file__))
filename = os.path.abspath("console_output.txt")
if os.path.exists(filename):
os.remove(filename)
# Capture output to a file so that we can compare it
with Tee(filename):
c1 = MyClass()
print "Running with default verbosity"
c1.run()
print
print "Running with verbosity 2"
c1.consolePrinterVerbosity = 2
c1.run()
print
print "Running with verbosity 0"
c1.consolePrinterVerbosity = 0
c1.run()
print
c1.cPrint(0, "Message %s two %s", "with", "args")
c1.cPrint(0, "Message with no newline", newline=False)
c1.cPrint(0, " Message with newline")
c1.cPrint(0, "Message with %s and %s",
"no newline", "args", newline=False)
c1.cPrint(0, " Message with %s and %s", "newline", "args")
print "Done"
with self.assertRaises(KeyError):
c1.cPrint(0, "Message", badkw="badvalue")
referenceFilename = os.path.join(mydir, "consoleprinter_output.txt")
expected = open(referenceFilename).readlines()
actual = open(filename).readlines()
print ("Comparing files '%s'" % referenceFilename)
print ("and '%s'" % filename)
self.assertEqual(len(expected), len(actual))
for i in xrange(len(expected)):
self.assertEqual(expected[i].strip(), actual[i].strip())
# Clean up
os.remove(filename)
if __name__ == "__main__":
unittest.main()
| agpl-3.0 |
ritchyteam/odoo | addons/l10n_fr_hr_payroll/__openerp__.py | 374 | 2165 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'French Payroll',
'category': 'Localization/Payroll',
'author': 'Yannick Buron (SYNERPGY)',
'depends': ['hr_payroll', 'l10n_fr'],
'version': '1.0',
'description': """
French Payroll Rules.
=====================
- Configuration of hr_payroll for French localization
- All main contributions rules for French payslip, for 'cadre' and 'non-cadre'
- New payslip report
TODO:
-----
- Integration with holidays module for deduction and allowance
- Integration with hr_payroll_account for the automatic account_move_line
creation from the payslip
- Continue to integrate the contribution. Only the main contribution are
currently implemented
- Remake the report under webkit
- The payslip.line with appears_in_payslip = False should appears in the
payslip interface, but not in the payslip report
""",
'active': False,
'data': [
'l10n_fr_hr_payroll_view.xml',
'l10n_fr_hr_payroll_data.xml',
'views/report_l10nfrfichepaye.xml',
'l10n_fr_hr_payroll_reports.xml',
],
'installable': True
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
pgleeson/TestArea | templates/clusterUtils/pullallsims.py | 5 | 1714 | # -*- coding: utf-8 -*-
'''
This file can be placed in the simulations directory of a neuroConstruct project and
when run it will search in all subdirectories for time.dat, and if it doesn't find it,
will try running pullsim.sh, which will attempt to retrieve the saved data from a remotely
executed simulation
'''
import os
import subprocess
path="."
pullSimFilename = "pullsim.sh"
dirList=os.listdir(path)
for fname in dirList:
if os.path.isdir(fname):
print "\n------ Checking directory: " + fname
timeFile = fname+"/time.dat"
pullsimFile = fname+"/"+pullSimFilename
if os.path.isfile(timeFile):
print "Time file exists! Simulation was successful."
else:
print "Time file doesn't exist!"
if os.path.isfile(pullsimFile):
print pullSimFilename+" exists and will be executed..."
process = subprocess.Popen("cd "+fname+";./"+pullSimFilename, shell=True, stdout=subprocess.PIPE)
stdout_value = process.communicate()[0]
process.wait()
print "Process has finished with return code: "+str(process.returncode)
output = repr(stdout_value)
formatted = output.replace("\\n", "\n\t")
print 'Output from running '+pullSimFilename+':\n\t', formatted
if os.path.isfile(timeFile):
print "Time file %s now exists, and so simulation was successful!"%timeFile
else:
print "Time file doesn't exist! Simulation hasn't successfully finished yet."
else:
print "No "+pullsimFile+", so cannot proceed further..."
| gpl-2.0 |
vitan/hue | desktop/core/ext-py/Django-1.6.10/docs/_ext/applyxrefs.py | 132 | 1842 | """Adds xref targets to the top of files."""
import sys
import os
testing = False
DONT_TOUCH = (
'./index.txt',
)
def target_name(fn):
if fn.endswith('.txt'):
fn = fn[:-4]
return '_' + fn.lstrip('./').replace('/', '-')
def process_file(fn, lines):
lines.insert(0, '\n')
lines.insert(0, '.. %s:\n' % target_name(fn))
try:
with open(fn, 'w') as fp:
fp.writelines(lines)
except IOError:
print("Can't open %s for writing. Not touching it." % fn)
def has_target(fn):
try:
with open(fn, 'r') as fp:
lines = fp.readlines()
except IOError:
print("Can't open or read %s. Not touching it." % fn)
return (True, None)
#print fn, len(lines)
if len(lines) < 1:
print("Not touching empty file %s." % fn)
return (True, None)
if lines[0].startswith('.. _'):
return (True, None)
return (False, lines)
def main(argv=None):
if argv is None:
argv = sys.argv
if len(argv) == 1:
argv.extend('.')
files = []
for root in argv[1:]:
for (dirpath, dirnames, filenames) in os.walk(root):
files.extend([(dirpath, f) for f in filenames])
files.sort()
files = [os.path.join(p, fn) for p, fn in files if fn.endswith('.txt')]
#print files
for fn in files:
if fn in DONT_TOUCH:
print("Skipping blacklisted file %s." % fn)
continue
target_found, lines = has_target(fn)
if not target_found:
if testing:
print('%s: %s' % (fn, lines[0]))
else:
print("Adding xref to %s" % fn)
process_file(fn, lines)
else:
print("Skipping %s: already has a xref" % fn)
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 |
gohin/django | django/http/cookie.py | 460 | 4390 | from __future__ import unicode_literals
import sys
from django.utils import six
from django.utils.encoding import force_str
from django.utils.six.moves import http_cookies
# Some versions of Python 2.7 and later won't need this encoding bug fix:
_cookie_encodes_correctly = http_cookies.SimpleCookie().value_encode(';') == (';', '"\\073"')
# See ticket #13007, http://bugs.python.org/issue2193 and http://trac.edgewall.org/ticket/2256
_tc = http_cookies.SimpleCookie()
try:
_tc.load(str('foo:bar=1'))
_cookie_allows_colon_in_names = True
except http_cookies.CookieError:
_cookie_allows_colon_in_names = False
# Cookie pickling bug is fixed in Python 2.7.9 and Python 3.4.3+
# http://bugs.python.org/issue22775
cookie_pickles_properly = (
(sys.version_info[:2] == (2, 7) and sys.version_info >= (2, 7, 9)) or
sys.version_info >= (3, 4, 3)
)
if _cookie_encodes_correctly and _cookie_allows_colon_in_names and cookie_pickles_properly:
SimpleCookie = http_cookies.SimpleCookie
else:
Morsel = http_cookies.Morsel
class SimpleCookie(http_cookies.SimpleCookie):
if not cookie_pickles_properly:
def __setitem__(self, key, value):
# Apply the fix from http://bugs.python.org/issue22775 where
# it's not fixed in Python itself
if isinstance(value, Morsel):
# allow assignment of constructed Morsels (e.g. for pickling)
dict.__setitem__(self, key, value)
else:
super(SimpleCookie, self).__setitem__(key, value)
if not _cookie_encodes_correctly:
def value_encode(self, val):
# Some browsers do not support quoted-string from RFC 2109,
# including some versions of Safari and Internet Explorer.
# These browsers split on ';', and some versions of Safari
# are known to split on ', '. Therefore, we encode ';' and ','
# SimpleCookie already does the hard work of encoding and decoding.
# It uses octal sequences like '\\012' for newline etc.
# and non-ASCII chars. We just make use of this mechanism, to
# avoid introducing two encoding schemes which would be confusing
# and especially awkward for javascript.
# NB, contrary to Python docs, value_encode returns a tuple containing
# (real val, encoded_val)
val, encoded = super(SimpleCookie, self).value_encode(val)
encoded = encoded.replace(";", "\\073").replace(",", "\\054")
# If encoded now contains any quoted chars, we need double quotes
# around the whole string.
if "\\" in encoded and not encoded.startswith('"'):
encoded = '"' + encoded + '"'
return val, encoded
if not _cookie_allows_colon_in_names:
def load(self, rawdata):
self.bad_cookies = set()
if six.PY2 and isinstance(rawdata, six.text_type):
rawdata = force_str(rawdata)
super(SimpleCookie, self).load(rawdata)
for key in self.bad_cookies:
del self[key]
# override private __set() method:
# (needed for using our Morsel, and for laxness with CookieError
def _BaseCookie__set(self, key, real_value, coded_value):
key = force_str(key)
try:
M = self.get(key, Morsel())
M.set(key, real_value, coded_value)
dict.__setitem__(self, key, M)
except http_cookies.CookieError:
if not hasattr(self, 'bad_cookies'):
self.bad_cookies = set()
self.bad_cookies.add(key)
dict.__setitem__(self, key, http_cookies.Morsel())
def parse_cookie(cookie):
if cookie == '':
return {}
if not isinstance(cookie, http_cookies.BaseCookie):
try:
c = SimpleCookie()
c.load(cookie)
except http_cookies.CookieError:
# Invalid cookie
return {}
else:
c = cookie
cookiedict = {}
for key in c.keys():
cookiedict[key] = c.get(key).value
return cookiedict
| bsd-3-clause |
witcxc/libpinyin | scripts/pinyin.py | 2 | 13821 | # -*- coding: utf-8 -*-
# vim:set et sts=4 sw=4:
#
# libpinyin - Library to deal with pinyin.
#
# Copyright (c) 2007-2008 Peng Huang <[email protected]>
# Copyright (C) 2011 Peng Wu <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
N_ = lambda x : x
PINYIN_DICT = {
"a" : 1, "ai" : 2, "an" : 3, "ang" : 4, "ao" : 5,
"ba" : 6, "bai" : 7, "ban" : 8, "bang" : 9, "bao" : 10,
"bei" : 11, "ben" : 12, "beng" : 13, "bi" : 14, "bian" : 15,
"biao" : 16, "bie" : 17, "bin" : 18, "bing" : 19, "bo" : 20,
"bu" : 21, "ca" : 22, "cai" : 23, "can" : 24, "cang" : 25,
"cao" : 26, "ce" : 27, "cen" : 28, "ceng" : 29, "ci" : 30,
"cong" : 31, "cou" : 32, "cu" : 33, "cuan" : 34, "cui" : 35,
"cun" : 36, "cuo" : 37, "cha" : 38, "chai" : 39, "chan" : 40,
"chang" : 41, "chao" : 42, "che" : 43, "chen" : 44, "cheng" : 45,
"chi" : 46, "chong" : 47, "chou" : 48, "chu" : 49, "chuai" : 50,
"chuan" : 51, "chuang" : 52, "chui" : 53, "chun" : 54, "chuo" : 55,
"da" : 56, "dai" : 57, "dan" : 58, "dang" : 59, "dao" : 60,
"de" : 61, "dei" : 62,
# "den" : 63,
"deng" : 64, "di" : 65,
"dia" : 66, "dian" : 67, "diao" : 68, "die" : 69, "ding" : 70,
"diu" : 71, "dong" : 72, "dou" : 73, "du" : 74, "duan" : 75,
"dui" : 76, "dun" : 77, "duo" : 78, "e" : 79, "ei" : 80,
"en" : 81, "er" : 82, "fa" : 83, "fan" : 84, "fang" : 85,
"fei" : 86, "fen" : 87, "feng" : 88, "fo" : 89, "fou" : 90,
"fu" : 91, "ga" : 92, "gai" : 93, "gan" : 94, "gang" : 95,
"gao" : 96, "ge" : 97, "gei" : 98, "gen" : 99, "geng" : 100,
"gong" : 101, "gou" : 102, "gu" : 103, "gua" : 104, "guai" : 105,
"guan" : 106, "guang" : 107, "gui" : 108, "gun" : 109, "guo" : 110,
"ha" : 111, "hai" : 112, "han" : 113, "hang" : 114, "hao" : 115,
"he" : 116, "hei" : 117, "hen" : 118, "heng" : 119, "hong" : 120,
"hou" : 121, "hu" : 122, "hua" : 123, "huai" : 124, "huan" : 125,
"huang" : 126, "hui" : 127, "hun" : 128, "huo" : 129, "ji" : 130,
"jia" : 131, "jian" : 132, "jiang" : 133, "jiao" : 134, "jie" : 135,
"jin" : 136, "jing" : 137, "jiong" : 138, "jiu" : 139, "ju" : 140,
"juan" : 141, "jue" : 142, "jun" : 143, "ka" : 144, "kai" : 145,
"kan" : 146, "kang" : 147, "kao" : 148, "ke" : 149,
# "kei" : 150,
"ken" : 151, "keng" : 152, "kong" : 153, "kou" : 154, "ku" : 155,
"kua" : 156, "kuai" : 157, "kuan" : 158, "kuang" : 159, "kui" : 160,
"kun" : 161, "kuo" : 162, "la" : 163, "lai" : 164, "lan" : 165,
"lang" : 166, "lao" : 167, "le" : 168, "lei" : 169, "leng" : 170,
"li" : 171, "lia" : 172, "lian" : 173, "liang" : 174, "liao" : 175,
"lie" : 176, "lin" : 177, "ling" : 178, "liu" : 179,
"lo" : 180,
"long" : 181, "lou" : 182, "lu" : 183, "luan" : 184,
# "lue" : 185,
"lun" : 186, "luo" : 187, "lv" : 188, "lve" : 189,
"ma" : 190,
"mai" : 191, "man" : 192, "mang" : 193, "mao" : 194, "me" : 195,
"mei" : 196, "men" : 197, "meng" : 198, "mi" : 199, "mian" : 200,
"miao" : 201, "mie" : 202, "min" : 203, "ming" : 204, "miu" : 205,
"mo" : 206, "mou" : 207, "mu" : 208, "na" : 209, "nai" : 210,
"nan" : 211, "nang" : 212, "nao" : 213, "ne" : 214, "nei" : 215,
"nen" : 216, "neng" : 217, "ni" : 218, "nian" : 219, "niang" : 220,
"niao" : 221, "nie" : 222, "nin" : 223, "ning" : 224, "niu" : 225,
"ng" : 226,
"nong" : 227, "nou" : 228, "nu" : 229, "nuan" : 230,
# "nue" : 231,
"nuo" : 232, "nv" : 233, "nve" : 234,
"o" : 235,
"ou" : 236, "pa" : 237, "pai" : 238, "pan" : 239, "pang" : 240,
"pao" : 241, "pei" : 242, "pen" : 243, "peng" : 244, "pi" : 245,
"pian" : 246, "piao" : 247, "pie" : 248, "pin" : 249, "ping" : 250,
"po" : 251, "pou" : 252, "pu" : 253, "qi" : 254, "qia" : 255,
"qian" : 256, "qiang" : 257, "qiao" : 258, "qie" : 259, "qin" : 260,
"qing" : 261, "qiong" : 262, "qiu" : 263, "qu" : 264, "quan" : 265,
"que" : 266, "qun" : 267, "ran" : 268, "rang" : 269, "rao" : 270,
"re" : 271, "ren" : 272, "reng" : 273, "ri" : 274, "rong" : 275,
"rou" : 276, "ru" : 277, "ruan" : 278, "rui" : 279, "run" : 280,
"ruo" : 281, "sa" : 282, "sai" : 283, "san" : 284, "sang" : 285,
"sao" : 286, "se" : 287, "sen" : 288, "seng" : 289, "si" : 290,
"song" : 291, "sou" : 292, "su" : 293, "suan" : 294, "sui" : 295,
"sun" : 296, "suo" : 297, "sha" : 298, "shai" : 299, "shan" : 300,
"shang" : 301, "shao" : 302, "she" : 303, "shei" : 304, "shen" : 305,
"sheng" : 306, "shi" : 307, "shou" : 308, "shu" : 309, "shua" : 310,
"shuai" : 311, "shuan" : 312, "shuang" : 313, "shui" : 314, "shun" : 315,
"shuo" : 316, "ta" : 317, "tai" : 318, "tan" : 319, "tang" : 320,
"tao" : 321, "te" : 322,
# "tei" : 323,
"teng" : 324, "ti" : 325,
"tian" : 326, "tiao" : 327, "tie" : 328, "ting" : 329, "tong" : 330,
"tou" : 331, "tu" : 332, "tuan" : 333, "tui" : 334, "tun" : 335,
"tuo" : 336, "wa" : 337, "wai" : 338, "wan" : 339, "wang" : 340,
"wei" : 341, "wen" : 342, "weng" : 343, "wo" : 344, "wu" : 345,
"xi" : 346, "xia" : 347, "xian" : 348, "xiang" : 349, "xiao" : 350,
"xie" : 351, "xin" : 352, "xing" : 353, "xiong" : 354, "xiu" : 355,
"xu" : 356, "xuan" : 357, "xue" : 358, "xun" : 359, "ya" : 360,
"yan" : 361, "yang" : 362, "yao" : 363, "ye" : 364, "yi" : 365,
"yin" : 366, "ying" : 367, "yo" : 368, "yong" : 369, "you" : 370,
"yu" : 371, "yuan" : 372, "yue" : 373, "yun" : 374, "za" : 375,
"zai" : 376, "zan" : 377, "zang" : 378, "zao" : 379, "ze" : 380,
"zei" : 381, "zen" : 382, "zeng" : 383, "zi" : 384, "zong" : 385,
"zou" : 386, "zu" : 387, "zuan" : 388, "zui" : 389, "zun" : 390,
"zuo" : 391, "zha" : 392, "zhai" : 393, "zhan" : 394, "zhang" : 395,
"zhao" : 396, "zhe" : 397, "zhen" : 398, "zheng" : 399, "zhi" : 400,
"zhong" : 401, "zhou" : 402, "zhu" : 403, "zhua" : 404, "zhuai" : 405,
"zhuan" : 406, "zhuang" : 407, "zhui" : 408, "zhun" : 409, "zhuo" : 410,
# some weird pinyins
#~ "eng" : 411, "chua" : 412, "fe" : 413, "fiao" : 414, "liong" : 415
}
PINYIN_LIST = PINYIN_DICT.keys ()
SHENGMU_DICT = {
"b" : 1, "p" : 2, "m" : 3, "f" : 4, "d" : 5,
"t" : 6, "n" : 7, "l" : 8, "g" : 9, "k" : 10, "h" : 11,
"j" : 12, "q" : 13, "x" : 14, "zh" : 15, "ch" : 16, "sh" : 17,
"r" : 18, "z" : 19, "c" : 20, "s" : 21, "y" : 22, "w" : 23
}
SHENGMU_LIST = SHENGMU_DICT.keys ()
YUNMU_DICT = {
"a" : 1, "ai" : 2, "an" : 3, "ang" : 4, "ao" : 5,
"e" : 6, "ei" : 7, "en" : 8, "eng" : 9, "er" : 10,
"i" : 11, "ia" : 12, "ian" : 13, "iang" : 14, "iao" : 15,
"ie" : 16, "in" : 17, "ing" : 18, "iong" : 19, "iu" : 20,
"o" : 21, "ong" : 22, "ou" : 23, "u" : 24, "ua" : 25,
"uai" : 26, "uan" : 27, "uang" : 28, "ue" : 29, "ui" : 30,
"un" : 31, "uo" : 32, "v" : 33, "ve" : 34
}
YUNMU_LIST = YUNMU_DICT.keys ()
MOHU_SHENGMU = {
"z" : ("z", "zh"),
"zh" : ("z", "zh"),
"c" : ("c", "ch"),
"ch" : ("c", "ch"),
"s" : ("s", "sh"),
"sh" : ("s", "sh"),
"l" : ("l", "n"),
"n" : ("l", "n")
}
MOHU_YUNMU = {
"an" : ("an", "ang"),
"ang" : ("an", "ang"),
"en" : ("en", "eng"),
"eng" : ("en", "eng"),
"in" : ("in", "ing"),
"ing" : ("in", "ing")
}
MSPY_SHUANGPIN_SHENGMU_DICT = {
"b" : "b", "c" : "c", "d" : "d", "f" : "f", "g" : "g",
"h" : "h", "i" : "ch","j" : "j", "k" : "k", "l" : "l",
"m" : "m", "n" : "n", "o" : "'", "p" : "p", "q" : "q",
"r" : "r", "s" : "s", "t" : "t", "u" : "sh","v" : "zh",
"w" : "w", "x" : "x", "y" : "y", "z" : "z"
}
MSPY_SHUANGPIN_YUNMU_DICT = {
"a" : ("a",),
"b" : ("ou",),
"c" : ("iao",),
"d" : ("uang", "iang"),
"e" : ("e",),
"f" : ("en",),
"g" : ("eng", "ng"),
"h" : ("ang",),
"i" : ("i",),
"j" : ("an",),
"k" : ("ao",),
"l" : ("ai",),
"m" : ("ian",),
"n" : ("in",),
"o" : ("uo", "o"),
"p" : ("un",),
"q" : ("iu",),
"r" : ("uan", "er"),
"s" : ("ong", "iong"),
"t" : ("ue",),
"u" : ("u",),
"v" : ("ui","ue"),
"w" : ("ia","ua"),
"x" : ("ie",),
"y" : ("uai", "v"),
"z" : ("ei",),
";" : ("ing",)
}
ZRM_SHUANGPIN_SHENGMU_DICT = {
"b" : "b", "c" : "c", "d" : "d", "f" : "f", "g" : "g",
"h" : "h", "i" : "ch","j" : "j", "k" : "k", "l" : "l",
"m" : "m", "n" : "n", "o" : "'", "p" : "p", "q" : "q",
"r" : "r", "s" : "s", "t" : "t", "u" : "sh","v" : "zh",
"w" : "w", "x" : "x", "y" : "y", "z" : "z"
}
ZRM_SHUANGPIN_YUNMU_DICT = {
"a" : ("a",),
"b" : ("ou",),
"c" : ("iao",),
"d" : ("uang", "iang"),
"e" : ("e",),
"f" : ("en",),
"g" : ("eng", "ng"),
"h" : ("ang",),
"i" : ("i",),
"j" : ("an",),
"k" : ("ao",),
"l" : ("ai",),
"m" : ("ian",),
"n" : ("in",),
"o" : ("uo", "o"),
"p" : ("un",),
"q" : ("iu",),
"r" : ("uan", "er"),
"s" : ("ong", "iong"),
"t" : ("ue",),
"u" : ("u",),
"v" : ("ui","v"),
"w" : ("ia","ua"),
"x" : ("ie",),
"y" : ("uai", "ing"),
"z" : ("ei",),
}
ABC_SHUANGPIN_SHENGMU_DICT = {
"a" : "zh", "b" : "b", "c" : "c", "d" : "d", "e":"ch", "f" : "f", "g" : "g",
"h" : "h", "j" : "j", "k" : "k", "l" : "l",
"m" : "m", "n" : "n", "o" : "'", "p" : "p", "q" : "q",
"r" : "r", "s" : "s", "t" : "t", "v" : "sh",
"w" : "w", "x" : "x", "y" : "y", "z" : "z"
}
ABC_SHUANGPIN_YUNMU_DICT = {
"a" : ("a",),
"b" : ("ou",),
"c" : ("in","uai"),
"d" : ("ia", "ua"),
"e" : ("e",),
"f" : ("en",),
"g" : ("eng", "ng"),
"h" : ("ang",),
"i" : ("i",),
"j" : ("an",),
"k" : ("ao",),
"l" : ("ai",),
"m" : ("ue","ui"),
"n" : ("un",),
"o" : ("uo", "o"),
"p" : ("uan",),
"q" : ("ei",),
"r" : ("er", "iu"),
"s" : ("ong", "iong"),
"t" : ("iang","uang"),
"u" : ("u",),
"v" : ("v","ue"),
"w" : ("ian",),
"x" : ("ie",),
"y" : ("ing",),
"z" : ("iao",),
}
ZGPY_SHUANGPIN_SHENGMU_DICT = {
"a" : "ch", "b" : "b", "c" : "c", "d" : "d", "f" : "f", "g" : "g",
"h" : "h", "i" : "sh","j" : "j", "k" : "k", "l" : "l",
"m" : "m", "n" : "n", "o" : "'", "p" : "p", "q" : "q",
"r" : "r", "s" : "s", "t" : "t", "u" : "zh",
"w" : "w", "x" : "x", "y" : "y", "z" : "z"
}
ZGPY_SHUANGPIN_YUNMU_DICT = {
"a" : ("a", ),
"b" : ("iao", ),
"d" : ("ie", ),
"e" : ("e", ),
"f" : ("ian", ),
"g" : ("iang", "uang"),
"h" : ("ong", "iong"),
"i" : ("i", ),
"j" : ("er", "iu"),
"k" : ("ei", ),
"l" : ("uan", ),
"m" : ("un", ),
"n" : ("ue", "ui"),
"o" : ("uo", "o"),
"p" : ("ai", ),
"q" : ("ao", ),
"r" : ("an", ),
"s" : ("ang", ),
"t" : ("eng", "ng"),
"u" : ("u", ),
"v" : ("v", ),
"w" : ("en", ),
"x" : ("ia", "ua"),
"y" : ("in", "uai"),
"z" : ("ou" ,),
";" : ("ing", )
}
PYJJ_SHUANGPIN_SHENGMU_DICT = {
"a" : "'", "b" : "b", "c" : "c", "d" : "d", "f" : "f", "g" : "g",
"h" : "h", "i" : "sh","j" : "j", "k" : "k", "l" : "l",
"m" : "m", "n" : "n", "o" : "'", "p" : "p", "q" : "q",
"r" : "r", "s" : "s", "t" : "t", "u" : "ch","v" : "zh",
"w" : "w", "x" : "x", "y" : "y", "z" : "z"
}
PYJJ_SHUANGPIN_YUNMU_DICT = {
"a" : ("a",),
"b" : ("ia","ua"),
"c" : ("uan",),
"d" : ("ao", ),
"e" : ("e",),
"f" : ("an",),
"g" : ("ang",),
"h" : ("iang","uang"),
"i" : ("i",),
"j" : ("ian",),
"k" : ("iao",),
"l" : ("in",),
"m" : ("ie",),
"n" : ("iu",),
"o" : ("uo", "o"),
"p" : ("ou",),
"q" : ("er","ing"),
"r" : ("en", ),
"s" : ("ai", ),
"t" : ("eng", "ng"),
"u" : ("u",),
"v" : ("v","ui"),
"w" : ("ei",),
"x" : ("uai","ue"),
"y" : ("ong","iong"),
"z" : ("un",),
}
XHE_SHUANGPIN_SHENGMU_DICT = {
"b" : "b", "c" : "c", "d" : "d", "f" : "f", "g" : "g",
"h" : "h", "i" : "ch", "j" : "j", "k" : "k", "l" : "l",
"m" : "m", "n" : "n", "o" : "'", "p" : "p", "q" : "q",
"r" : "r", "s" : "s", "t" : "t", "u" : "sh", "v" : "zh",
"w" : "w", "x" : "x", "y" : "y", "z" : "z",
"a" : "'", "e" : "'"
}
XHE_SHUANGPIN_YUNMU_DICT = {
"a" : ("a",),
"b" : ("in",),
"c" : ("ao",),
"d" : ("ai",),
"e" : ("e",),
"f" : ("en",),
"g" : ("eng", "ng"),
"h" : ("ang",),
"i" : ("i",),
"j" : ("an",),
"k" : ("uai", "ing"),
"l" : ("iang", "uang"),
"m" : ("ian",),
"n" : ("iao",),
"o" : ("uo", "o"),
"p" : ("ie",),
"q" : ("iu",),
"r" : ("uan", "er"),
"s" : ("ong", "iong"),
"t" : ("ue",),
"u" : ("u",),
"v" : ("v", "ui"),
"w" : ("ei",),
"x" : ("ia", "ua"),
"y" : ("un",),
"z" : ("ou",),
}
SHUANGPIN_SCHEMAS = {
N_("MSPY") : (MSPY_SHUANGPIN_SHENGMU_DICT, MSPY_SHUANGPIN_YUNMU_DICT),
N_("ZRM") : (ZRM_SHUANGPIN_SHENGMU_DICT, ZRM_SHUANGPIN_YUNMU_DICT),
N_("ABC") : (ABC_SHUANGPIN_SHENGMU_DICT, ABC_SHUANGPIN_YUNMU_DICT),
N_("ZGPY") : (ZGPY_SHUANGPIN_SHENGMU_DICT, ZGPY_SHUANGPIN_YUNMU_DICT),
N_("PYJJ") : (PYJJ_SHUANGPIN_SHENGMU_DICT, PYJJ_SHUANGPIN_YUNMU_DICT),
N_("XHE") : (XHE_SHUANGPIN_SHENGMU_DICT, XHE_SHUANGPIN_YUNMU_DICT),
}
| gpl-2.0 |
widgetpl/contrib | hack/verify-flags-underscore.py | 34 | 8924 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import json
import mmap
import os
import re
import sys
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("filenames", help="list of files to check, all files if unspecified", nargs='*')
parser.add_argument("-e", "--skip-exceptions", help="ignore hack/verify-flags/exceptions.txt and print all output", action="store_true")
args = parser.parse_args()
# Cargo culted from http://stackoverflow.com/questions/898669/how-can-i-detect-if-a-file-is-binary-non-text-in-python
def is_binary(pathname):
"""Return true if the given filename is binary.
@raise EnvironmentError: if the file does not exist or cannot be accessed.
@attention: found @ http://bytes.com/topic/python/answers/21222-determine-file-type-binary-text on 6/08/2010
@author: Trent Mick <[email protected]>
@author: Jorge Orpinel <[email protected]>"""
try:
with open(pathname, 'r') as f:
CHUNKSIZE = 1024
while 1:
chunk = f.read(CHUNKSIZE)
if '\0' in chunk: # found null byte
return True
if len(chunk) < CHUNKSIZE:
break # done
except:
return True
return False
def get_all_files(rootdir):
all_files = []
for root, dirs, files in os.walk(rootdir):
# don't visit certain dirs
if 'Godeps' in dirs:
dirs.remove('Godeps')
if 'third_party' in dirs:
dirs.remove('third_party')
if '.git' in dirs:
dirs.remove('.git')
if 'exceptions.txt' in files:
files.remove('exceptions.txt')
if 'known-flags.txt' in files:
files.remove('known-flags.txt')
if 'vendor' in dirs:
dirs.remove('vendor')
for name in files:
if name.endswith(".svg"):
continue
if name.endswith(".gliffy"):
continue
pathname = os.path.join(root, name)
if is_binary(pathname):
continue
all_files.append(pathname)
return all_files
def normalize_files(rootdir, files):
newfiles = []
a = ['Godeps', 'vendor', 'third_party', 'exceptions.txt', 'known-flags.txt']
for f in files:
if any(x in f for x in a):
continue
if f.endswith(".svg"):
continue
if f.endswith(".gliffy"):
continue
newfiles.append(f)
for i, f in enumerate(newfiles):
if not os.path.isabs(f):
newfiles[i] = os.path.join(rootdir, f)
return newfiles
def line_has_bad_flag(line, flagre):
results = flagre.findall(line)
for result in results:
if not "_" in result:
return False
# this should exclude many cases where jinja2 templates use kube flags
# as variables, except it uses _ for the variable name
if "{% set" + result + "= \"" in line:
return False
if "pillar[" + result + "]" in line:
return False
if "grains" + result in line:
return False
# These are usually yaml definitions
if result.endswith(":"):
return False
# something common in juju variables...
if "template_data[" + result + "]" in line:
return False
return True
return False
# The list of files might not be the whole repo. If someone only changed a
# couple of files we don't want to run all of the golang files looking for
# flags. Instead load the list of flags from hack/verify-flags/known-flags.txt
# If running the golang files finds a new flag not in that file, return an
# error and tell the user to add the flag to the flag list.
def get_flags(rootdir, files):
# preload the 'known' flags
pathname = os.path.join(rootdir, "hack/verify-flags/known-flags.txt")
f = open(pathname, 'r')
flags = set(f.read().splitlines())
f.close()
# preload the 'known' flags which don't follow the - standard
pathname = os.path.join(rootdir, "hack/verify-flags/excluded-flags.txt")
f = open(pathname, 'r')
excluded_flags = set(f.read().splitlines())
f.close()
regexs = [ re.compile('Var[P]?\([^,]*, "([^"]*)"'),
re.compile('.String[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.Int[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.Bool[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.Duration[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.StringSlice[P]?\("([^"]*)",[^,]+,[^)]+\)') ]
new_flags = set()
new_excluded_flags = set()
# walk all the files looking for any flags being declared
for pathname in files:
if not pathname.endswith(".go"):
continue
f = open(pathname, 'r')
data = f.read()
f.close()
matches = []
for regex in regexs:
matches = matches + regex.findall(data)
for flag in matches:
if any(x in flag for x in excluded_flags):
continue
if "_" in flag:
new_excluded_flags.add(flag)
if not "-" in flag:
continue
if flag not in flags:
new_flags.add(flag)
if len(new_excluded_flags) != 0:
print("Found a flag declared with an _ but which is not explicitly listed as a valid flag name in hack/verify-flags/excluded-flags.txt")
print("Are you certain this flag should not have been declared with an - instead?")
l = list(new_excluded_flags)
l.sort()
print("%s" % "\n".join(l))
sys.exit(1)
if len(new_flags) != 0:
print("Found flags in golang files not in the list of known flags. Please add these to hack/verify-flags/known-flags.txt")
l = list(new_flags)
l.sort()
print("%s" % "\n".join(l))
sys.exit(1)
return list(flags)
def flags_to_re(flags):
"""turn the list of all flags we found into a regex find both - and _ versions"""
dashRE = re.compile('[-_]')
flagREs = []
for flag in flags:
# turn all flag names into regexs which will find both types
newre = dashRE.sub('[-_]', flag)
# only match if there is not a leading or trailing alphanumeric character
flagREs.append("[^\w${]" + newre + "[^\w]")
# turn that list of regex strings into a single large RE
flagRE = "|".join(flagREs)
flagRE = re.compile(flagRE)
return flagRE
def load_exceptions(rootdir):
exceptions = set()
if args.skip_exceptions:
return exceptions
exception_filename = os.path.join(rootdir, "hack/verify-flags/exceptions.txt")
exception_file = open(exception_filename, 'r')
for exception in exception_file.read().splitlines():
out = exception.split(":", 1)
if len(out) != 2:
printf("Invalid line in exceptions file: %s" % exception)
continue
filename = out[0]
line = out[1]
exceptions.add((filename, line))
return exceptions
def main():
rootdir = os.path.dirname(__file__) + "/../"
rootdir = os.path.abspath(rootdir)
exceptions = load_exceptions(rootdir)
if len(args.filenames) > 0:
files = args.filenames
else:
files = get_all_files(rootdir)
files = normalize_files(rootdir, files)
flags = get_flags(rootdir, files)
flagRE = flags_to_re(flags)
bad_lines = []
# walk all the file looking for any flag that was declared and now has an _
for pathname in files:
relname = os.path.relpath(pathname, rootdir)
f = open(pathname, 'r')
for line in f.read().splitlines():
if line_has_bad_flag(line, flagRE):
if (relname, line) not in exceptions:
bad_lines.append((relname, line))
f.close()
if len(bad_lines) != 0:
if not args.skip_exceptions:
print("Found illegal 'flag' usage. If these are false positives you should run `hack/verify-flags-underscore.py -e > hack/verify-flags/exceptions.txt` to update the list.")
bad_lines.sort()
for (relname, line) in bad_lines:
print("%s:%s" % (relname, line))
return 1
if __name__ == "__main__":
sys.exit(main())
| apache-2.0 |
saimn/astropy | astropy/nddata/_testing.py | 11 | 1605 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Testing utilities. Not part of the public API!"""
from astropy.wcs import WCS
from astropy.wcs.wcsapi import BaseHighLevelWCS
def assert_wcs_seem_equal(wcs1, wcs2):
"""Just checks a few attributes to make sure wcs instances seem to be
equal.
"""
if wcs1 is None and wcs2 is None:
return
assert wcs1 is not None
assert wcs2 is not None
if isinstance(wcs1, BaseHighLevelWCS):
wcs1 = wcs1.low_level_wcs
if isinstance(wcs2, BaseHighLevelWCS):
wcs2 = wcs2.low_level_wcs
assert isinstance(wcs1, WCS)
assert isinstance(wcs2, WCS)
if wcs1 is wcs2:
return
assert wcs1.wcs.compare(wcs2.wcs)
def _create_wcs_simple(naxis, ctype, crpix, crval, cdelt):
wcs = WCS(naxis=naxis)
wcs.wcs.crpix = crpix
wcs.wcs.crval = crval
wcs.wcs.cdelt = cdelt
wcs.wcs.ctype = ctype
return wcs
def create_two_equal_wcs(naxis):
return [
_create_wcs_simple(
naxis=naxis, ctype=["deg"]*naxis, crpix=[10]*naxis,
crval=[10]*naxis, cdelt=[1]*naxis),
_create_wcs_simple(
naxis=naxis, ctype=["deg"]*naxis, crpix=[10]*naxis,
crval=[10]*naxis, cdelt=[1]*naxis)
]
def create_two_unequal_wcs(naxis):
return [
_create_wcs_simple(
naxis=naxis, ctype=["deg"]*naxis, crpix=[10]*naxis,
crval=[10]*naxis, cdelt=[1]*naxis),
_create_wcs_simple(
naxis=naxis, ctype=["m"]*naxis, crpix=[20]*naxis,
crval=[20]*naxis, cdelt=[2]*naxis),
]
| bsd-3-clause |
mascot6699/Hackapi-Demo | src/core/views.py | 1 | 1412 |
from . import models, serializers, utils
from datetime import datetime, timedelta
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status, permissions
class Process(APIView):
"""
These apis are for general purpose
"""
def get(self, request, format=None):
"""
GET api for getting particular template from id
SmsSid string, unique identifier of that SMS
From string, the number of the sender
To string, your Exotel Company number where the SMS was received
Date string, Time when the SMS reached Exotel's servers
Body string, the contents of the SMS
"""
parsed_content = request.query_params.get("Body").split(' ')
# garbage = parsed_content[0].lower()
keyword= parsed_content[1].lower()
body = (" ".join(parsed_content[2:])).lower()
print body, keyword
if keyword=="hello":
body = utils.get_help()
if keyword=="wiki":
body = utils.process_wiki(body)
elif keyword=="dictionary":
body = utils.process_dictionary(body)
elif keyword=="email":
body = utils.custom_send_email(body)
elif keyword=="song":
body = utils.custom_send_email(body)
return Response(body, status=status.HTTP_200_OK, content_type="text/plain")
| mit |
altaf-ali/luigi | test/scheduler_visualisation_test.py | 49 | 13542 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import division
import os
import tempfile
import time
from helpers import unittest
import luigi
import luigi.notifications
import luigi.scheduler
import luigi.worker
luigi.notifications.DEBUG = True
tempdir = tempfile.mkdtemp()
class DummyTask(luigi.Task):
task_id = luigi.Parameter()
def run(self):
f = self.output().open('w')
f.close()
def output(self):
return luigi.LocalTarget(os.path.join(tempdir, str(self.task_id)))
class FactorTask(luigi.Task):
product = luigi.Parameter()
def requires(self):
for factor in range(2, self.product):
if self.product % factor == 0:
yield FactorTask(factor)
yield FactorTask(self.product // factor)
return
def run(self):
f = self.output().open('w')
f.close()
def output(self):
return luigi.LocalTarget(os.path.join(tempdir, 'luigi_test_factor_%d' % self.product))
class BadReqTask(luigi.Task):
succeed = luigi.BoolParameter()
def requires(self):
assert self.succeed
yield BadReqTask(False)
def run(self):
pass
def complete(self):
return False
class FailingTask(luigi.Task):
task_id = luigi.Parameter()
def run(self):
raise Exception("Error Message")
class SchedulerVisualisationTest(unittest.TestCase):
def setUp(self):
self.scheduler = luigi.scheduler.CentralPlannerScheduler()
def tearDown(self):
pass
def _assert_complete(self, tasks):
for t in tasks:
self.assert_(t.complete())
def _build(self, tasks):
w = luigi.worker.Worker(scheduler=self.scheduler, worker_processes=1)
for t in tasks:
w.add(t)
w.run()
w.stop()
def _remote(self):
return self.scheduler
def _test_run(self, workers):
tasks = [DummyTask(i) for i in range(20)]
self._build(tasks, workers=workers)
self._assert_complete(tasks)
def test_graph(self):
start = time.time()
tasks = [DummyTask(task_id=1), DummyTask(task_id=2)]
self._build(tasks)
self._assert_complete(tasks)
end = time.time()
remote = self._remote()
graph = remote.graph()
self.assertEqual(len(graph), 2)
self.assert_(u'DummyTask(task_id=1)' in graph)
d1 = graph[u'DummyTask(task_id=1)']
self.assertEqual(d1[u'status'], u'DONE')
self.assertEqual(d1[u'deps'], [])
self.assertGreaterEqual(d1[u'start_time'], start)
self.assertLessEqual(d1[u'start_time'], end)
d2 = graph[u'DummyTask(task_id=2)']
self.assertEqual(d2[u'status'], u'DONE')
self.assertEqual(d2[u'deps'], [])
self.assertGreaterEqual(d2[u'start_time'], start)
self.assertLessEqual(d2[u'start_time'], end)
def _assert_all_done(self, tasks):
self._assert_all(tasks, u'DONE')
def _assert_all(self, tasks, status):
for task in tasks.values():
self.assertEqual(task[u'status'], status)
def test_dep_graph_single(self):
self._build([FactorTask(1)])
remote = self._remote()
dep_graph = remote.dep_graph('FactorTask(product=1)')
self.assertEqual(len(dep_graph), 1)
self._assert_all_done(dep_graph)
d1 = dep_graph.get(u'FactorTask(product=1)')
self.assertEqual(type(d1), type({}))
self.assertEqual(d1[u'deps'], [])
def test_dep_graph_not_found(self):
self._build([FactorTask(1)])
remote = self._remote()
dep_graph = remote.dep_graph('FactorTask(product=5)')
self.assertEqual(len(dep_graph), 0)
def test_dep_graph_tree(self):
self._build([FactorTask(30)])
remote = self._remote()
dep_graph = remote.dep_graph('FactorTask(product=30)')
self.assertEqual(len(dep_graph), 5)
self._assert_all_done(dep_graph)
d30 = dep_graph[u'FactorTask(product=30)']
self.assertEqual(sorted(d30[u'deps']), [u'FactorTask(product=15)', 'FactorTask(product=2)'])
d2 = dep_graph[u'FactorTask(product=2)']
self.assertEqual(sorted(d2[u'deps']), [])
d15 = dep_graph[u'FactorTask(product=15)']
self.assertEqual(sorted(d15[u'deps']), [u'FactorTask(product=3)', 'FactorTask(product=5)'])
d3 = dep_graph[u'FactorTask(product=3)']
self.assertEqual(sorted(d3[u'deps']), [])
d5 = dep_graph[u'FactorTask(product=5)']
self.assertEqual(sorted(d5[u'deps']), [])
def test_dep_graph_missing_deps(self):
self._build([BadReqTask(True)])
dep_graph = self._remote().dep_graph('BadReqTask(succeed=True)')
self.assertEqual(len(dep_graph), 2)
suc = dep_graph[u'BadReqTask(succeed=True)']
self.assertEqual(suc[u'deps'], [u'BadReqTask(succeed=False)'])
fail = dep_graph[u'BadReqTask(succeed=False)']
self.assertEqual(fail[u'name'], 'BadReqTask')
self.assertEqual(fail[u'params'], {'succeed': 'False'})
self.assertEqual(fail[u'status'], 'UNKNOWN')
def test_dep_graph_diamond(self):
self._build([FactorTask(12)])
remote = self._remote()
dep_graph = remote.dep_graph('FactorTask(product=12)')
self.assertEqual(len(dep_graph), 4)
self._assert_all_done(dep_graph)
d12 = dep_graph[u'FactorTask(product=12)']
self.assertEqual(sorted(d12[u'deps']), [u'FactorTask(product=2)', 'FactorTask(product=6)'])
d6 = dep_graph[u'FactorTask(product=6)']
self.assertEqual(sorted(d6[u'deps']), [u'FactorTask(product=2)', 'FactorTask(product=3)'])
d3 = dep_graph[u'FactorTask(product=3)']
self.assertEqual(sorted(d3[u'deps']), [])
d2 = dep_graph[u'FactorTask(product=2)']
self.assertEqual(sorted(d2[u'deps']), [])
def test_task_list_single(self):
self._build([FactorTask(7)])
remote = self._remote()
tasks_done = remote.task_list('DONE', '')
self.assertEqual(len(tasks_done), 1)
self._assert_all_done(tasks_done)
t7 = tasks_done.get(u'FactorTask(product=7)')
self.assertEqual(type(t7), type({}))
self.assertEqual(remote.task_list('', ''), tasks_done)
self.assertEqual(remote.task_list('FAILED', ''), {})
self.assertEqual(remote.task_list('PENDING', ''), {})
def test_task_list_failed(self):
self._build([FailingTask(8)])
remote = self._remote()
failed = remote.task_list('FAILED', '')
self.assertEqual(len(failed), 1)
f8 = failed.get(u'FailingTask(task_id=8)')
self.assertEqual(f8[u'status'], u'FAILED')
self.assertEqual(remote.task_list('DONE', ''), {})
self.assertEqual(remote.task_list('PENDING', ''), {})
def test_task_list_upstream_status(self):
class A(luigi.ExternalTask):
pass
class B(luigi.ExternalTask):
def complete(self):
return True
class C(luigi.Task):
def requires(self):
return [A(), B()]
class F(luigi.Task):
def run(self):
raise Exception()
class D(luigi.Task):
def requires(self):
return [F()]
class E(luigi.Task):
def requires(self):
return [C(), D()]
self._build([E()])
remote = self._remote()
done = remote.task_list('DONE', '')
self.assertEqual(len(done), 1)
db = done.get('B()')
self.assertEqual(db['status'], 'DONE')
missing_input = remote.task_list('PENDING', 'UPSTREAM_MISSING_INPUT')
self.assertEqual(len(missing_input), 2)
pa = missing_input.get(u'A()')
self.assertEqual(pa['status'], 'PENDING')
self.assertEqual(remote._upstream_status('A()', {}), 'UPSTREAM_MISSING_INPUT')
pc = missing_input.get(u'C()')
self.assertEqual(pc['status'], 'PENDING')
self.assertEqual(remote._upstream_status('C()', {}), 'UPSTREAM_MISSING_INPUT')
upstream_failed = remote.task_list('PENDING', 'UPSTREAM_FAILED')
self.assertEqual(len(upstream_failed), 2)
pe = upstream_failed.get(u'E()')
self.assertEqual(pe['status'], 'PENDING')
self.assertEqual(remote._upstream_status('E()', {}), 'UPSTREAM_FAILED')
pe = upstream_failed.get(u'D()')
self.assertEqual(pe['status'], 'PENDING')
self.assertEqual(remote._upstream_status('D()', {}), 'UPSTREAM_FAILED')
pending = dict(missing_input)
pending.update(upstream_failed)
self.assertEqual(remote.task_list('PENDING', ''), pending)
self.assertEqual(remote.task_list('PENDING', 'UPSTREAM_RUNNING'), {})
failed = remote.task_list('FAILED', '')
self.assertEqual(len(failed), 1)
fd = failed.get('F()')
self.assertEqual(fd['status'], 'FAILED')
all = dict(pending)
all.update(done)
all.update(failed)
self.assertEqual(remote.task_list('', ''), all)
self.assertEqual(remote.task_list('RUNNING', ''), {})
def test_task_search(self):
self._build([FactorTask(8)])
self._build([FailingTask(8)])
remote = self._remote()
all_tasks = remote.task_search('Task')
self.assertEqual(len(all_tasks), 2)
self._assert_all(all_tasks['DONE'], 'DONE')
self._assert_all(all_tasks['FAILED'], 'FAILED')
def test_fetch_error(self):
self._build([FailingTask(8)])
remote = self._remote()
error = remote.fetch_error("FailingTask(task_id=8)")
self.assertEqual(error["taskId"], "FailingTask(task_id=8)")
self.assertTrue("Error Message" in error["error"])
self.assertTrue("Runtime error" in error["error"])
self.assertTrue("Traceback" in error["error"])
def test_inverse_deps(self):
class X(luigi.Task):
pass
class Y(luigi.Task):
def requires(self):
return [X()]
class Z(luigi.Task):
id = luigi.Parameter()
def requires(self):
return [Y()]
class ZZ(luigi.Task):
def requires(self):
return [Z(1), Z(2)]
self._build([ZZ()])
dep_graph = self._remote().inverse_dep_graph('X()')
def assert_has_deps(task_id, deps):
self.assertTrue(task_id in dep_graph, '%s not in dep_graph %s' % (task_id, dep_graph))
task = dep_graph[task_id]
self.assertEqual(sorted(task['deps']), sorted(deps), '%s does not have deps %s' % (task_id, deps))
assert_has_deps('X()', ['Y()'])
assert_has_deps('Y()', ['Z(id=1)', 'Z(id=2)'])
assert_has_deps('Z(id=1)', ['ZZ()'])
assert_has_deps('Z(id=2)', ['ZZ()'])
assert_has_deps('ZZ()', [])
def test_simple_worker_list(self):
class X(luigi.Task):
def run(self):
self._complete = True
def complete(self):
return getattr(self, '_complete', False)
self._build([X()])
workers = self._remote().worker_list()
self.assertEqual(1, len(workers))
worker = workers[0]
self.assertEqual('X()', worker['first_task'])
self.assertEqual(0, worker['num_pending'])
self.assertEqual(0, worker['num_uniques'])
self.assertEqual(0, worker['num_running'])
self.assertEqual(1, worker['workers'])
def test_worker_list_pending_uniques(self):
class X(luigi.Task):
def complete(self):
return False
class Y(X):
def requires(self):
return X()
class Z(Y):
pass
w1 = luigi.worker.Worker(scheduler=self.scheduler, worker_processes=1)
w2 = luigi.worker.Worker(scheduler=self.scheduler, worker_processes=1)
w1.add(Y())
w2.add(Z())
workers = self._remote().worker_list()
self.assertEqual(2, len(workers))
for worker in workers:
self.assertEqual(2, worker['num_pending'])
self.assertEqual(1, worker['num_uniques'])
self.assertEqual(0, worker['num_running'])
def test_worker_list_running(self):
class X(luigi.Task):
n = luigi.IntParameter()
w = luigi.worker.Worker(scheduler=self.scheduler, worker_processes=3)
w.add(X(0))
w.add(X(1))
w.add(X(2))
w.add(X(3))
w._get_work()
w._get_work()
w._get_work()
workers = self._remote().worker_list()
self.assertEqual(1, len(workers))
worker = workers[0]
self.assertEqual(3, worker['num_running'])
self.assertEqual(1, worker['num_pending'])
self.assertEqual(1, worker['num_uniques'])
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
uwevil/namebench | libnamebench/selectors.py | 176 | 3262 | #!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ways to select hostname records to test."""
import math
import random
# When running a weighted distribution, never repeat a domain more than this:
MAX_REPEAT = 3
TYPES = {
'automatic': 'Pick the most appropriate selector type for the data source',
'weighted': 'Chooses based on a weighted distribution, preferring entries in the top of the list',
'random': 'Random selection, including repeats.',
'chunk': 'Chooses a random contiguous segment of entries'
}
def MaxRepeatCount(elements, count):
# Avoid stalling out looking for the one unique choice
if count >= len(elements) * 0.5:
return 2**32
else:
return MAX_REPEAT
def GetTypes():
"""Return a tuple of type names with a description."""
return sorted(TYPES.keys())
def WeightedDistribution(elements, count):
"""Given a set of elements, return a weighted distribution back.
Args:
elements: A list of elements to choose from
count: how many elements to return
Returns:
A random but fairly distributed list of elements of count count.
The distribution is designed to mimic real-world DNS usage. The observed
formula for request popularity was:
522.520776 * math.pow(x, -0.998506)-2
"""
def FindY(x, total):
return total * math.pow(x, -0.408506)
total = len(elements)
picks = []
picked = {}
offset = FindY(total, total)
max_repeat = MaxRepeatCount(elements, count)
attempts = 0
while len(picks) < count:
attempts += 1
# avoid dead-lock
if attempts > (count * 4):
break
x = random.random() * total
y = FindY(x, total) - offset
index = abs(int(y))
if index < total:
if picked.get(index, 0) < max_repeat:
picks.append(elements[index])
picked[index] = picked.get(index, 0) + 1
# print '%s: %s' % (index, elements[index])
return picks
def ChunkSelect(elements, count):
"""Return a random count-sized contiguous chunk of elements."""
if len(elements) <= count:
return elements
start = random.randint(0, len(elements) - count)
return elements[start:start + count]
def RandomSelect(elements, count, include_duplicates=False):
"""Randomly select elements, but enforce duplication limits."""
picks = []
picked = {}
if include_duplicates:
max_repeat = 2**32
else:
max_repeat = MaxRepeatCount(elements, count)
attempts = 0
while len(picks) < count:
attempts += 1
# avoid dead-lock
if attempts > (count * 4):
break
choice = random.choice(elements)
if picked.get(choice, 0) < max_repeat:
picks.append(choice)
picked[choice] = picked.get(choice, 0) + 1
return picks
| apache-2.0 |
TiVoMaker/boto | tests/integration/ec2/autoscale/test_cert_verification.py | 126 | 1575 | # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Check that all of the certs on all service endpoints validate.
"""
import unittest
from tests.integration import ServiceCertVerificationTest
import boto.ec2.autoscale
class AutoscaleCertVerificationTest(unittest.TestCase, ServiceCertVerificationTest):
autoscale = True
regions = boto.ec2.autoscale.regions()
def sample_service_call(self, conn):
conn.get_all_groups()
| mit |
asutherland/opc-reviewboard | contrib/internal/release.py | 1 | 1785 | #!/usr/bin/env python
#
# Performs a release of Review Board. This can only be run by the core
# developers with release permissions.
#
import os
import re
import subprocess
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", ".."))
from reviewboard import get_package_version, VERSION
PY_VERSIONS = ["2.4", "2.5", "2.6"]
LATEST_PY_VERSION = PY_VERSIONS[-1]
PACKAGE_NAME = 'ReviewBoard'
RELEASES_URL = \
'reviewboard.org:/var/www/downloads.reviewboard.org/' \
'htdocs/releases/%s/%s.%s/' % (PACKAGE_NAME, VERSION[0], VERSION[1])
built_files = []
def execute(cmdline):
print ">>> %s" % cmdline
if os.system(cmdline) != 0:
print "!!! Error invoking command."
sys.exit(1)
def run_setup(target, pyver = LATEST_PY_VERSION):
execute("python%s ./setup.py release %s" % (pyver, target))
def clean():
execute("rm -rf build dist")
def build_targets():
for pyver in PY_VERSIONS:
run_setup("bdist_egg", pyver)
built_files.append("dist/%s-%s-py%s.egg" %
(PACKAGE_NAME, get_package_version(), pyver))
run_setup("sdist")
built_files.append("dist/%s-%s.tar.gz" %
(PACKAGE_NAME, get_package_version()))
def upload_files():
execute("scp %s %s" % (" ".join(built_files), RELEASES_URL))
def tag_release():
execute("git tag release-%s" % get_package_version())
def register_release():
run_setup("register")
def main():
if not os.path.exists("setup.py"):
sys.stderr.write("This must be run from the root of the "
"Review Board tree.\n")
sys.exit(1)
clean()
build_targets()
upload_files()
tag_release()
register_release()
if __name__ == "__main__":
main()
| mit |
frenkowski/Tyrannus_Kernel_MM_SM-G925F | tools/perf/scripts/python/failed-syscalls-by-pid.py | 11180 | 2058 | # failed system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
| gpl-2.0 |
sliz1/servo | tests/wpt/css-tests/css-text-decor-3_dev/xhtml1print/support/generate-text-emphasis-line-height-tests.py | 829 | 3431 | #!/usr/bin/env python
# - * - coding: UTF-8 - * -
"""
This script generates tests text-emphasis-line-height-001 ~ 004 except
001z. They test the line height expansion in different directions. This
script outputs a list of all tests it generated in the format of Mozilla
reftest.list to the stdout.
"""
from __future__ import unicode_literals
TEST_FILE = 'text-emphasis-line-height-{:03}{}.html'
TEST_TEMPLATE = '''<!DOCTYPE html>
<meta charset="utf-8">
<title>CSS Test: text-emphasis line height, {pos}, {wm}, {tag}</title>
<link rel="author" title="Xidorn Quan" href="https://www.upsuper.org">
<link rel="author" title="Mozilla" href="https://www.mozilla.org">
<link rel="help" href="https://drafts.csswg.org/css-text-decor-3/#text-emphasis-position-property">
<meta name="assert" content="text emphasis marks should expand the line height like ruby if necessary">
<link rel="match" href="text-emphasis-line-height-{index:03}-ref.html">
<p>Pass if the emphasis marks are {dir} the black line:</p>
{start}試験テスト{end}
'''
REF_FILE = 'text-emphasis-line-height-{:03}-ref.html'
REF_TEMPLATE='''<!DOCTYPE html>
<meta charset="utf-8">
<title>CSS Reference: text-emphasis line height, {pos}</title>
<link rel="author" title="Xidorn Quan" href="https://www.upsuper.org">
<link rel="author" title="Mozilla" href="https://www.mozilla.org">
<style> rt {{ font-variant-east-asian: inherit; }} </style>
<p>Pass if the emphasis marks are {dir} the black line:</p>
<div style="line-height: 1; border-{pos}: 1px solid black; writing-mode: {wm}; ruby-position: {posval}"><ruby>試<rt>●</rt>験<rt>●</rt>テ<rt>●</rt>ス<rt>●</rt>ト<rt>●</rt></ruby></div>
'''
STYLE1 = 'line-height: 1; border-{pos}: 1px solid black; ' + \
'writing-mode: {wm}; text-emphasis-position: {posval};'
STYLE2 = 'text-emphasis: circle;'
TAGS = [
# (tag, start, end)
('div', '<div style="{style1}{style2}">', '</div>'),
('span', '<div style="{style1}"><span style="{style2}">', '</span></div>'),
]
POSITIONS = [
# pos, text-emphasis-position, ruby-position,
# writing-modes, dir text
('top', 'over right', 'over',
['horizontal-tb'], 'below'),
('bottom', 'under right', 'under',
['horizontal-tb'], 'over'),
('right', 'over right', 'over',
['vertical-rl', 'vertical-lr'], 'to the left of'),
('left', 'over left', 'under',
['vertical-rl', 'vertical-lr'], 'to the right of'),
]
import string
def write_file(filename, content):
with open(filename, 'wb') as f:
f.write(content.encode('UTF-8'))
print("# START tests from {}".format(__file__))
idx = 0
for (pos, emphasis_pos, ruby_pos, wms, dir) in POSITIONS:
idx += 1
ref_file = REF_FILE.format(idx)
content = REF_TEMPLATE.format(pos=pos, dir=dir, wm=wms[0], posval=ruby_pos)
write_file(ref_file, content)
suffix = iter(string.ascii_lowercase)
for wm in wms:
style1 = STYLE1.format(pos=pos, wm=wm, posval=emphasis_pos)
for (tag, start, end) in TAGS:
test_file = TEST_FILE.format(idx, next(suffix))
content = TEST_TEMPLATE.format(
pos=pos, wm=wm, tag=tag, index=idx, dir=dir,
start=start.format(style1=style1, style2=STYLE2), end=end)
write_file(test_file, content)
print("== {} {}".format(test_file, ref_file))
print("# END tests from {}".format(__file__))
| mpl-2.0 |
tordans/volunteer_planner | scheduler/management/commands/calculate_volunteer_hours.py | 3 | 1164 | # coding: utf-8
import datetime
from django.core.management.base import BaseCommand
from registration.models import RegistrationProfile
from stats.models import ValueStore
class Command(BaseCommand):
help = 'creates bulk shifts from existing data'
args = ""
option_list = BaseCommand.option_list
def handle(self, *args, **options):
shifts = RegistrationProfile.objects.all() \
.filter(needs__starting_time__lte=datetime.datetime.now()) \
.only('needs__starting_time', 'needs__ending_time')
# .prefetch_related('needs')
total_seconds = 0.0
for shift in shifts:
needs_in_shift = shift.needs.all()
for single_shift in needs_in_shift:
delta = single_shift.ending_time - single_shift.starting_time
total_seconds += delta.total_seconds()
total_hours = int(total_seconds) / 3600
value_object, created = ValueStore.objects.get_or_create(
name="total-volunteer-hours", defaults=dict(value=total_hours))
if not created:
value_object.value = total_hours
value_object.save()
| agpl-3.0 |
syedjafri/ThinkStats2 | code/chap02soln.py | 69 | 2263 | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import sys
from operator import itemgetter
import first
import thinkstats2
def Mode(hist):
"""Returns the value with the highest frequency.
hist: Hist object
returns: value from Hist
"""
p, x = max([(p, x) for x, p in hist.Items()])
return x
def AllModes(hist):
"""Returns value-freq pairs in decreasing order of frequency.
hist: Hist object
returns: iterator of value-freq pairs
"""
return sorted(hist.Items(), key=itemgetter(1), reverse=True)
def WeightDifference(live, firsts, others):
"""Explore the difference in weight between first babies and others.
live: DataFrame of all live births
firsts: DataFrame of first babies
others: DataFrame of others
"""
mean0 = live.totalwgt_lb.mean()
mean1 = firsts.totalwgt_lb.mean()
mean2 = others.totalwgt_lb.mean()
var1 = firsts.totalwgt_lb.var()
var2 = others.totalwgt_lb.var()
print('Mean')
print('First babies', mean1)
print('Others', mean2)
print('Variance')
print('First babies', var1)
print('Others', var2)
print('Difference in lbs', mean1 - mean2)
print('Difference in oz', (mean1 - mean2) * 16)
print('Difference relative to mean (%age points)',
(mean1 - mean2) / mean0 * 100)
d = thinkstats2.CohenEffectSize(firsts.totalwgt_lb, others.totalwgt_lb)
print('Cohen d', d)
def main(script):
"""Tests the functions in this module.
script: string script name
"""
live, firsts, others = first.MakeFrames()
hist = thinkstats2.Hist(live.prglngth)
# explore the weight difference between first babies and others
WeightDifference(live, firsts, others)
# test Mode
mode = Mode(hist)
print('Mode of preg length', mode)
assert(mode == 39)
# test AllModes
modes = AllModes(hist)
assert(modes[0][1] == 4693)
for value, freq in modes[:5]:
print(value, freq)
print('%s: All tests passed.' % script)
if __name__ == '__main__':
main(*sys.argv)
| gpl-3.0 |
guilhermegm/django-tastypie | tests/core/tests/paginator.py | 25 | 11701 | # -*- coding: utf-8 -*-
from django.conf import settings
from django.test import TestCase
from tastypie.exceptions import BadRequest
from tastypie.paginator import Paginator
from core.models import Note
from core.tests.resources import NoteResource
from django.db import reset_queries
from django.http import QueryDict
class PaginatorTestCase(TestCase):
fixtures = ['note_testdata.json']
def setUp(self):
super(PaginatorTestCase, self).setUp()
self.data_set = Note.objects.all()
self.old_debug = settings.DEBUG
settings.DEBUG = True
def tearDown(self):
settings.DEBUG = self.old_debug
super(PaginatorTestCase, self).tearDown()
def _get_query_count(self):
try:
from django.db import connections
return connections['default'].queries
except ImportError:
from django.db import connection
return connection.queries
def test_page1(self):
reset_queries()
self.assertEqual(len(self._get_query_count()), 0)
paginator = Paginator({}, self.data_set, resource_uri='/api/v1/notes/', limit=2, offset=0)
# REGRESSION: Check to make sure only part of the cache is full.
# We used to run ``len()`` on the ``QuerySet``, which would populate
# the entire result set. Owwie.
paginator.get_count()
self.assertEqual(len(self._get_query_count()), 1)
# Should be nothing in the cache.
self.assertEqual(paginator.objects._result_cache, None)
meta = paginator.page()['meta']
self.assertEqual(meta['limit'], 2)
self.assertEqual(meta['offset'], 0)
self.assertEqual(meta['previous'], None)
self.assertTrue('limit=2' in meta['next'])
self.assertTrue('offset=2' in meta['next'])
self.assertEqual(meta['total_count'], 6)
def test_page2(self):
paginator = Paginator({}, self.data_set, resource_uri='/api/v1/notes/', limit=2, offset=2)
meta = paginator.page()['meta']
self.assertEqual(meta['limit'], 2)
self.assertEqual(meta['offset'], 2)
self.assertTrue('limit=2' in meta['previous'])
self.assertTrue('offset=0' in meta['previous'])
self.assertTrue('limit=2' in meta['next'])
self.assertTrue('offset=4' in meta['next'])
self.assertEqual(meta['total_count'], 6)
def test_page3(self):
paginator = Paginator({}, self.data_set, resource_uri='/api/v1/notes/', limit=2, offset=4)
meta = paginator.page()['meta']
self.assertEqual(meta['limit'], 2)
self.assertEqual(meta['offset'], 4)
self.assertTrue('limit=2' in meta['previous'])
self.assertTrue('offset=2' in meta['previous'])
self.assertEqual(meta['next'], None)
self.assertEqual(meta['total_count'], 6)
def test_page2_with_request(self):
for req in [{'offset' : '2', 'limit' : '2'}, QueryDict('offset=2&limit=2')]:
paginator = Paginator(req, self.data_set, resource_uri='/api/v1/notes/', limit=2, offset=2)
meta = paginator.page()['meta']
self.assertEqual(meta['limit'], 2)
self.assertEqual(meta['offset'], 2)
self.assertTrue('limit=2' in meta['previous'])
self.assertTrue('offset=0' in meta['previous'])
self.assertTrue('limit=2' in meta['next'])
self.assertTrue('offset=4' in meta['next'])
self.assertEqual(meta['total_count'], 6)
def test_page3_with_request(self):
for req in [{'offset' : '4', 'limit' : '2'}, QueryDict('offset=4&limit=2')]:
paginator = Paginator(req, self.data_set, resource_uri='/api/v1/notes/', limit=2, offset=4)
meta = paginator.page()['meta']
self.assertEqual(meta['limit'], 2)
self.assertEqual(meta['offset'], 4)
self.assertTrue('limit=2' in meta['previous'])
self.assertTrue('offset=2' in meta['previous'])
self.assertEqual(meta['next'], None)
self.assertEqual(meta['total_count'], 6)
def test_large_limit(self):
paginator = Paginator({}, self.data_set, resource_uri='/api/v1/notes/', limit=20, offset=0)
meta = paginator.page()['meta']
self.assertEqual(meta['limit'], 20)
self.assertEqual(meta['offset'], 0)
self.assertEqual(meta['previous'], None)
self.assertEqual(meta['next'], None)
self.assertEqual(meta['total_count'], 6)
def test_all(self):
paginator = Paginator({'limit': 0}, self.data_set, resource_uri='/api/v1/notes/', limit=2, offset=0)
page = paginator.page()
meta = page['meta']
self.assertEqual(meta['limit'], 1000)
self.assertEqual(meta['offset'], 0)
self.assertEqual(meta['total_count'], 6)
self.assertEqual(len(page['objects']), 6)
def test_complex_get(self):
request = {
'slug__startswith': 'food',
'format': 'json',
}
paginator = Paginator(request, self.data_set, resource_uri='/api/v1/notes/', limit=2, offset=2)
meta = paginator.page()['meta']
self.assertEqual(meta['limit'], 2)
self.assertEqual(meta['offset'], 2)
self.assertTrue('limit=2' in meta['previous'])
self.assertTrue('offset=0' in meta['previous'])
self.assertTrue('slug__startswith=food' in meta['previous'])
self.assertTrue('format=json' in meta['previous'])
self.assertTrue('limit=2' in meta['next'])
self.assertTrue('offset=4' in meta['next'])
self.assertTrue('slug__startswith=food' in meta['next'])
self.assertTrue('format=json' in meta['next'])
self.assertEqual(meta['total_count'], 6)
def test_limit(self):
paginator = Paginator({}, self.data_set, limit=20, offset=0)
paginator.limit = '10'
self.assertEqual(paginator.get_limit(), 10)
paginator.limit = None
self.assertEqual(paginator.get_limit(), 20)
paginator.limit = 10
self.assertEqual(paginator.get_limit(), 10)
paginator.limit = -10
raised = False
try:
paginator.get_limit()
except BadRequest as e:
raised = e
self.assertTrue(raised)
self.assertEqual(str(raised), "Invalid limit '-10' provided. Please provide a positive integer >= 0.")
paginator.limit = 'hAI!'
raised = False
try:
paginator.get_limit()
except BadRequest as e:
raised = e
self.assertTrue(raised)
self.assertEqual(str(raised), "Invalid limit 'hAI!' provided. Please provide a positive integer.")
# Test the max_limit.
paginator.limit = 1000
self.assertEqual(paginator.get_limit(), 1000)
paginator.limit = 1001
self.assertEqual(paginator.get_limit(), 1000)
paginator = Paginator({}, self.data_set, limit=20, offset=0, max_limit=10)
self.assertEqual(paginator.get_limit(), 10)
def test_offset(self):
paginator = Paginator({}, self.data_set, limit=20, offset=0)
paginator.offset = '10'
self.assertEqual(paginator.get_offset(), 10)
paginator.offset = 0
self.assertEqual(paginator.get_offset(), 0)
paginator.offset = 10
self.assertEqual(paginator.get_offset(), 10)
paginator.offset= -10
raised = False
try:
paginator.get_offset()
except BadRequest as e:
raised = e
self.assertTrue(raised)
self.assertEqual(str(raised), "Invalid offset '-10' provided. Please provide a positive integer >= 0.")
paginator.offset = 'hAI!'
raised = False
try:
paginator.get_offset()
except BadRequest as e:
raised = e
self.assertTrue(raised)
self.assertEqual(str(raised), "Invalid offset 'hAI!' provided. Please provide an integer.")
def test_regression_nonqueryset(self):
paginator = Paginator({}, ['foo', 'bar', 'baz'], limit=2, offset=0)
# This would fail due to ``count`` being present on ``list`` but called
# differently.
page = paginator.page()
self.assertEqual(page['objects'], ['foo', 'bar'])
def test_unicode_request(self):
request = {
'slug__startswith': u'☃',
'format': 'json',
}
paginator = Paginator(request, self.data_set, resource_uri='/api/v1/notes/', limit=2, offset=2)
meta = paginator.page()['meta']
self.assertEqual(meta['limit'], 2)
self.assertEqual(meta['offset'], 2)
self.assertTrue('limit=2' in meta['previous'])
self.assertTrue('offset=0' in meta['previous'])
self.assertTrue('slug__startswith=%E2%98%83' in meta['previous'])
self.assertTrue('format=json' in meta['previous'])
self.assertTrue('limit=2' in meta['next'])
self.assertTrue('offset=4' in meta['next'])
self.assertTrue('slug__startswith=%E2%98%83' in meta['next'])
self.assertTrue('format=json' in meta['next'])
self.assertEqual(meta['total_count'], 6)
request = QueryDict('slug__startswith=☃&format=json')
paginator = Paginator(request, self.data_set, resource_uri='/api/v1/notes/', limit=2, offset=2)
meta = paginator.page()['meta']
self.assertEqual(meta['limit'], 2)
self.assertEqual(meta['offset'], 2)
self.assertTrue('limit=2' in meta['previous'])
self.assertTrue('offset=0' in meta['previous'])
self.assertTrue('slug__startswith=%E2%98%83' in meta['previous'])
self.assertTrue('format=json' in meta['previous'])
self.assertTrue('limit=2' in meta['next'])
self.assertTrue('offset=4' in meta['next'])
self.assertTrue('slug__startswith=%E2%98%83' in meta['next'])
self.assertTrue('format=json' in meta['next'])
self.assertEqual(meta['total_count'], 6)
def test_custom_collection_name(self):
paginator = Paginator({}, self.data_set, resource_uri='/api/v1/notes/', limit=20, offset=0, collection_name='notes')
meta = paginator.page()['meta']
self.assertEqual(meta['limit'], 20)
self.assertEqual(meta['offset'], 0)
self.assertEqual(meta['previous'], None)
self.assertEqual(meta['next'], None)
self.assertEqual(meta['total_count'], 6)
self.assertEqual(len(paginator.page()['notes']), 6)
def test_multiple(self):
request = QueryDict('a=1&a=2')
paginator = Paginator(request, self.data_set, resource_uri='/api/v1/notes/', limit=2, offset=2)
meta = paginator.page()['meta']
self.assertEqual(meta['limit'], 2)
self.assertEqual(meta['offset'], 2)
self.assertTrue('limit=2' in meta['previous'])
self.assertTrue('offset=0' in meta['previous'])
self.assertTrue('a=1' in meta['previous'])
self.assertTrue('a=2' in meta['previous'])
self.assertTrue('limit=2' in meta['next'])
self.assertTrue('offset=4' in meta['next'])
self.assertTrue('a=1' in meta['next'])
self.assertTrue('a=2' in meta['next'])
def test_max_limit(self):
paginator = Paginator({'limit': 0}, self.data_set, max_limit=10,
resource_uri='/api/v1/notes/')
meta = paginator.page()['meta']
self.assertEqual(meta['limit'], 10)
def test_max_limit_none(self):
paginator = Paginator({'limit': 0}, self.data_set, max_limit=None,
resource_uri='/api/v1/notes/')
meta = paginator.page()['meta']
self.assertEqual(meta['limit'], 0)
| bsd-3-clause |
wroersma/volatility | volatility/scan.py | 14 | 6848 | # Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# Derived from source in PyFlag developed by:
# Copyright 2004: Commonwealth of Australia.
# Michael Cohen <[email protected]>
# David Collett <[email protected]>
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
# Special thanks to Michael Cohen for ideas and comments!
#
#pylint: disable-msg=C0111
"""
@author: AAron Walters
@license: GNU General Public License 2.0
@contact: [email protected]
@organization: Volatility Foundation
"""
import volatility.debug as debug
import volatility.registry as registry
import volatility.addrspace as addrspace
import volatility.constants as constants
import volatility.conf as conf
########### Following is the new implementation of the scanning
########### framework. The old framework was based on PyFlag's
########### scanning framework which is probably too complex for this.
class BaseScanner(object):
""" A more thorough scanner which checks every byte """
checks = []
def __init__(self, window_size = 8):
self.buffer = addrspace.BufferAddressSpace(conf.DummyConfig(), data = '\x00' * 1024)
self.window_size = window_size
self.constraints = []
self.error_count = 0
def check_addr(self, found):
""" This calls all our constraints on the offset found and
returns the number of contraints that matched.
We shortcut the loop as soon as its obvious that there will
not be sufficient matches to fit the criteria. This allows for
an early exit and a speed boost.
"""
cnt = 0
for check in self.constraints:
## constraints can raise for an error
try:
val = check.check(found)
except Exception:
debug.b()
val = False
if not val:
cnt = cnt + 1
if cnt > self.error_count:
return False
return True
overlap = 20
def scan(self, address_space, offset = 0, maxlen = None):
self.buffer.profile = address_space.profile
current_offset = offset
## Build our constraints from the specified ScannerCheck
## classes:
self.constraints = []
for class_name, args in self.checks:
check = registry.get_plugin_classes(ScannerCheck)[class_name](self.buffer, **args)
self.constraints.append(check)
## Which checks also have skippers?
skippers = [ c for c in self.constraints if hasattr(c, "skip") ]
for (range_start, range_size) in sorted(address_space.get_available_addresses()):
# Jump to the next available point to scan from
# self.base_offset jumps up to be at least range_start
current_offset = max(range_start, current_offset)
range_end = range_start + range_size
# If we have a maximum length, we make sure it's less than the range_end
if maxlen:
range_end = min(range_end, offset + maxlen)
while (current_offset < range_end):
# We've now got range_start <= self.base_offset < range_end
# Figure out how much data to read
l = min(constants.SCAN_BLOCKSIZE + self.overlap, range_end - current_offset)
# Populate the buffer with data
# We use zread to scan what we can because there are often invalid
# pages in the DTB
data = address_space.zread(current_offset, l)
self.buffer.assign_buffer(data, current_offset)
## Run checks throughout this block of data
i = 0
while i < l:
if self.check_addr(i + current_offset):
## yield the offset to the start of the memory
## (after the pool tag)
yield i + current_offset
## Where should we go next? By default we go 1 byte
## ahead, but if some of the checkers have skippers,
## we may actually go much farther. Checkers with
## skippers basically tell us that there is no way
## they can match anything before the skipped result,
## so there is no point in trying them on all the data
## in between. This optimization is useful to really
## speed things up. FIXME - currently skippers assume
## that the check must match, therefore we can skip
## the unmatchable region, but its possible that a
## scanner needs to match only some checkers.
skip = 1
for s in skippers:
skip = max(skip, s.skip(data, i))
i += skip
current_offset += min(constants.SCAN_BLOCKSIZE, l)
class DiscontigScanner(BaseScanner):
def scan(self, address_space, offset = 0, maxlen = None):
debug.warning("DiscontigScanner has been deprecated, all functionality is now contained in BaseScanner")
for match in BaseScanner.scan(self, address_space, offset, maxlen):
yield match
class ScannerCheck(object):
""" A scanner check is a special class which is invoked on an AS to check for a specific condition.
The main method is def check(self, offset):
This will return True if the condition is true or False otherwise.
This class is the base class for all checks.
"""
def __init__(self, address_space, **_kwargs):
self.address_space = address_space
def object_offset(self, offset, address_space):
return offset
def check(self, _offset):
return False
## If you want to speed up the scanning define this method - it
## will be used to skip the data which is obviously not going to
## match. You will need to return the number of bytes from offset
## to skip to. We take the maximum number of bytes to guarantee
## that all checks have a chance of passing.
#def skip(self, data, offset):
# return -1
| gpl-2.0 |
hasecbinusr/pysal | pysal/esda/tests/test_geary.py | 5 | 2997 | """Geary Unittest."""
import unittest
from ... import open as popen
from ... import examples
from .. import geary
import numpy as np
from ...common import pandas
PANDAS_EXTINCT = pandas is None
class Geary_Tester(unittest.TestCase):
"""Geary class for unit tests."""
def setUp(self):
self.w = popen(examples.get_path("book.gal")).read()
f = popen(examples.get_path("book.txt"))
self.y = np.array(f.by_col['y'])
def test_Geary(self):
c = geary.Geary(self.y, self.w, permutations=0)
self.assertAlmostEquals(c.C, 0.33301083591331254)
self.assertAlmostEquals(c.EC, 1.0)
self.assertAlmostEquals(c.VC_norm, 0.031805300245097874)
self.assertAlmostEquals(c.p_norm, 9.2018240680169505e-05)
self.assertAlmostEquals(c.z_norm, -3.7399778367629564)
self.assertAlmostEquals(c.seC_norm, 0.17834040553138225)
self.assertAlmostEquals(c.VC_rand, 0.018437747611029367)
self.assertAlmostEquals(c.p_rand, 4.5059156794646782e-07)
self.assertAlmostEquals(c.z_rand, -4.9120733751216008)
self.assertAlmostEquals(c.seC_rand, 0.13578566791465646)
np.random.seed(12345)
c = geary.Geary(self.y, self.w, permutations=999)
self.assertAlmostEquals(c.C, 0.33301083591331254)
self.assertAlmostEquals(c.EC, 1.0)
self.assertAlmostEquals(c.VC_norm, 0.031805300245097874)
self.assertAlmostEquals(c.p_norm, 9.2018240680169505e-05)
self.assertAlmostEquals(c.z_norm, -3.7399778367629564)
self.assertAlmostEquals(c.seC_norm, 0.17834040553138225)
self.assertAlmostEquals(c.VC_rand, 0.018437747611029367)
self.assertAlmostEquals(c.p_rand, 4.5059156794646782e-07)
self.assertAlmostEquals(c.z_rand, -4.9120733751216008)
self.assertAlmostEquals(c.seC_rand, 0.13578566791465646)
self.assertAlmostEquals(c.EC_sim, 0.9980676303238214)
self.assertAlmostEquals(c.VC_sim, 0.034430408799858946)
self.assertAlmostEquals(c.p_sim, 0.001)
self.assertAlmostEquals(c.p_z_sim, 0.00016908100514811952)
self.assertAlmostEquals(c.z_sim, -3.5841621159171746)
self.assertAlmostEquals(c.seC_sim, 0.18555432843202269)
@unittest.skipIf(PANDAS_EXTINCT, 'missing pandas')
def test_by_col(self):
import pandas as pd
df = pd.DataFrame(self.y, columns=['y'])
r1 = geary.Geary.by_col(df, ['y'], w=self.w, permutations=999)
this_geary = np.unique(r1.y_geary.values)
this_pval = np.unique(r1.y_p_sim.values)
np.random.seed(12345)
c = geary.Geary(self.y, self.w, permutations=999)
self.assertAlmostEquals(this_geary, c.C)
self.assertAlmostEquals(this_pval, c.p_sim)
suite = unittest.TestSuite()
test_classes = [Geary_Tester]
for i in test_classes:
a = unittest.TestLoader().loadTestsFromTestCase(i)
suite.addTest(a)
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite)
| bsd-3-clause |
peterbarker/ardupilot-1 | Tools/LogAnalyzer/DataflashLog.py | 17 | 29790 | #
# Code to abstract the parsing of APM Dataflash log files, currently only used by the LogAnalyzer
#
# Initial code by Andrew Chapman ([email protected]), 16th Jan 2014
#
from __future__ import print_function
import collections
import os
import numpy
import bisect
import sys
import ctypes
from VehicleType import VehicleType, VehicleTypeString
class Format(object):
'''Data channel format as specified by the FMT lines in the log file'''
def __init__(self,msgType,msgLen,name,types,labels):
self.NAME = 'FMT'
self.msgType = msgType
self.msgLen = msgLen
self.name = name
self.types = types
self.labels = labels.split(',')
def __str__(self):
return "%8s %s" % (self.name, `self.labels`)
@staticmethod
def trycastToFormatType(value,valueType):
'''using format characters from libraries/DataFlash/DataFlash.h to cast strings to basic python int/float/string types
tries a cast, if it does not work, well, acceptable as the text logs do not match the format, e.g. MODE is expected to be int'''
try:
if valueType in "fcCeELd":
return float(value)
elif valueType in "bBhHiIMQq":
return int(value)
elif valueType in "nNZ":
return str(value)
except:
pass
return value
def to_class(self):
members = dict(
NAME = self.name,
labels = self.labels[:],
)
fieldtypes = [i for i in self.types]
fieldlabels = self.labels[:]
# field access
for (label, _type) in zip(fieldlabels, fieldtypes):
def createproperty(name, format):
# extra scope for variable sanity
# scaling via _NAME and def NAME(self): return self._NAME / SCALE
propertyname = name
attributename = '_' + name
p = property(lambda x:getattr(x, attributename),
lambda x, v:setattr(x,attributename, Format.trycastToFormatType(v,format)))
members[propertyname] = p
members[attributename] = None
createproperty(label, _type)
# repr shows all values but the header
members['__repr__'] = lambda x: "<{cls} {data}>".format(cls=x.__class__.__name__, data = ' '.join(["{}:{}".format(k,getattr(x,'_'+k)) for k in x.labels]))
def init(a, *x):
if len(x) != len(a.labels):
raise ValueError("Invalid Length")
#print(list(zip(a.labels, x)))
for (l,v) in zip(a.labels, x):
try:
setattr(a, l, v)
except Exception as e:
print("{} {} {} failed".format(a,l,v))
print(e)
members['__init__'] = init
# finally, create the class
cls = type(\
'Log__{:s}'.format(self.name),
(object,),
members
)
#print(members)
return cls
class logheader(ctypes.LittleEndianStructure):
_fields_ = [ \
('head1', ctypes.c_uint8),
('head2', ctypes.c_uint8),
('msgid', ctypes.c_uint8),
]
def __repr__(self):
return "<logheader head1=0x{self.head1:x} head2=0x{self.head2:x} msgid=0x{self.msgid:x} ({self.msgid})>".format(self=self)
class BinaryFormat(ctypes.LittleEndianStructure):
NAME = 'FMT'
MSG = 128
SIZE = 0
FIELD_FORMAT = {
'b': ctypes.c_int8,
'B': ctypes.c_uint8,
'h': ctypes.c_int16,
'H': ctypes.c_uint16,
'i': ctypes.c_int32,
'I': ctypes.c_uint32,
'f': ctypes.c_float,
'd': ctypes.c_double,
'n': ctypes.c_char * 4,
'N': ctypes.c_char * 16,
'Z': ctypes.c_char * 64,
'c': ctypes.c_int16,# * 100,
'C': ctypes.c_uint16,# * 100,
'e': ctypes.c_int32,# * 100,
'E': ctypes.c_uint32,# * 100,
'L': ctypes.c_int32,
'M': ctypes.c_uint8,
'q': ctypes.c_int64,
'Q': ctypes.c_uint64,
}
FIELD_SCALE = {
'c': 100,
'C': 100,
'e': 100,
'E': 100,
}
_packed_ = True
_fields_ = [ \
('head', logheader),
('type', ctypes.c_uint8),
('length', ctypes.c_uint8),
('name', ctypes.c_char * 4),
('types', ctypes.c_char * 16),
('labels', ctypes.c_char * 64),
]
def __repr__(self):
return "<{cls} {data}>".format(cls=self.__class__.__name__, data = ' '.join(["{}:{}".format(k,getattr(self,k)) for (k,_) in self._fields_[1:]]))
def to_class(self):
members = dict(
NAME = self.name,
MSG = self.type,
SIZE = self.length,
labels = self.labels.split(",") if self.labels else [],
_pack_ = True)
fieldtypes = [i for i in self.types]
fieldlabels = self.labels.split(",")
if self.labels and (len(fieldtypes) != len(fieldlabels)):
print("Broken FMT message for {} .. ignoring".format(self.name), file=sys.stderr)
return None
fields = [('head',logheader)]
# field access
for (label, _type) in zip(fieldlabels, fieldtypes):
def createproperty(name, format):
# extra scope for variable sanity
# scaling via _NAME and def NAME(self): return self._NAME / SCALE
propertyname = name
attributename = '_' + name
scale = BinaryFormat.FIELD_SCALE.get(format, None)
p = property(lambda x:getattr(x, attributename))
if scale is not None:
p = property(lambda x:getattr(x, attributename) / scale)
members[propertyname] = p
try:
fields.append((attributename, BinaryFormat.FIELD_FORMAT[format]))
except KeyError:
print('ERROR: Failed to add FMT type: {}, with format: {}'.format(attributename, format))
raise
createproperty(label, _type)
members['_fields_'] = fields
# repr shows all values but the header
members['__repr__'] = lambda x: "<{cls} {data}>".format(cls=x.__class__.__name__, data = ' '.join(["{}:{}".format(k,getattr(x,k)) for k in x.labels]))
# finally, create the class
cls = type(\
'Log__{:s}'.format(self.name),
(ctypes.LittleEndianStructure,),
members
)
if ctypes.sizeof(cls) != cls.SIZE:
print("size mismatch for {} expected {} got {}".format(cls, ctypes.sizeof(cls), cls.SIZE), file=sys.stderr)
# for i in cls.labels:
# print("{} = {}".format(i,getattr(cls,'_'+i)))
return None
return cls
BinaryFormat.SIZE = ctypes.sizeof(BinaryFormat)
class Channel(object):
'''storage for a single stream of data, i.e. all GPS.RelAlt values'''
# TODO: rethink data storage, but do more thorough regression testing before refactoring it
# TODO: store data as a scipy spline curve so we can more easily interpolate and sample the slope?
def __init__(self):
self.dictData = {} # dict of linenum->value # store dupe data in dict and list for now, until we decide which is the better way to go
self.listData = [] # list of (linenum,value) # store dupe data in dict and list for now, until we decide which is the better way to go
def getSegment(self, startLine, endLine):
'''returns a segment of this data (from startLine to endLine, inclusive) as a new Channel instance'''
segment = Channel()
segment.dictData = {k:v for k,v in self.dictData.iteritems() if k >= startLine and k <= endLine}
return segment
def min(self):
return min(self.dictData.values())
def max(self):
return max(self.dictData.values())
def avg(self):
return numpy.mean(self.dictData.values())
def getNearestValueFwd(self, lineNumber):
'''Returns (value,lineNumber)'''
index = bisect.bisect_left(self.listData, (lineNumber,-99999))
while index<len(self.listData):
line = self.listData[index][0]
#print "Looking forwards for nearest value to line number %d, starting at line %d" % (lineNumber,line) # TEMP
if line >= lineNumber:
return (self.listData[index][1],line)
index += 1
raise Exception("Error finding nearest value for line %d" % lineNumber)
def getNearestValueBack(self, lineNumber):
'''Returns (value,lineNumber)'''
index = bisect.bisect_left(self.listData, (lineNumber,-99999)) - 1
while index>=0:
line = self.listData[index][0]
#print "Looking backwards for nearest value to line number %d, starting at line %d" % (lineNumber,line) # TEMP
if line <= lineNumber:
return (self.listData[index][1],line)
index -= 1
raise Exception("Error finding nearest value for line %d" % lineNumber)
def getNearestValue(self, lineNumber, lookForwards=True):
'''find the nearest data value to the given lineNumber, defaults to first looking forwards. Returns (value,lineNumber)'''
if lookForwards:
try:
return self.getNearestValueFwd(lineNumber)
except:
return self.getNearestValueBack(lineNumber)
else:
try:
return self.getNearestValueBack(lineNumber)
except:
return self.getNearestValueFwd(lineNumber)
raise Exception("Error finding nearest value for line %d" % lineNumber)
def getInterpolatedValue(self, lineNumber):
(prevValue,prevValueLine) = self.getNearestValue(lineNumber, lookForwards=False)
(nextValue,nextValueLine) = self.getNearestValue(lineNumber, lookForwards=True)
if prevValueLine == nextValueLine:
return prevValue
weight = (lineNumber-prevValueLine) / float(nextValueLine-prevValueLine)
return ((weight*prevValue) + ((1-weight)*nextValue))
def getIndexOf(self, lineNumber):
'''returns the index within this channel's listData of the given lineNumber, or raises an Exception if not found'''
index = bisect.bisect_left(self.listData, (lineNumber,-99999))
#print "INDEX of line %d: %d" % (lineNumber,index)
#print "self.listData[index][0]: %d" % self.listData[index][0]
if (self.listData[index][0] == lineNumber):
return index
else:
raise Exception("Error finding index for line %d" % lineNumber)
class LogIterator:
'''Smart iterator that can move through a log by line number and maintain an index into the nearest values of all data channels'''
# TODO: LogIterator currently indexes the next available value rather than the nearest value, we should make it configurable between next/nearest
class LogIteratorSubValue:
'''syntactic sugar to allow access by LogIterator[lineLabel][dataLabel]'''
logdata = None
iterators = None
lineLabel = None
def __init__(self, logdata, iterators, lineLabel):
self.logdata = logdata
self.lineLabel = lineLabel
self.iterators = iterators
def __getitem__(self, dataLabel):
index = self.iterators[self.lineLabel][0]
return self.logdata.channels[self.lineLabel][dataLabel].listData[index][1]
iterators = {} # lineLabel -> (listIndex,lineNumber)
logdata = None
currentLine = None
def __init__(self, logdata, lineNumber=0):
self.logdata = logdata
self.currentLine = lineNumber
for lineLabel in self.logdata.formats:
if lineLabel in self.logdata.channels:
self.iterators[lineLabel] = ()
self.jump(lineNumber)
def __iter__(self):
return self
def __getitem__(self, lineLabel):
return LogIterator.LogIteratorSubValue(self.logdata, self.iterators, lineLabel)
def next(self):
'''increment iterator to next log line'''
self.currentLine += 1
if self.currentLine > self.logdata.lineCount:
return self
for lineLabel in self.iterators.keys():
# check if the currentLine has gone past our the line we're pointing to for this type of data
dataLabel = self.logdata.formats[lineLabel].labels[0]
(index, lineNumber) = self.iterators[lineLabel]
# if so, and it is not the last entry in the log, then increment the indices for all dataLabels under that lineLabel
if (self.currentLine > lineNumber) and (index < len(self.logdata.channels[lineLabel][dataLabel].listData)-1):
index += 1
lineNumber = self.logdata.channels[lineLabel][dataLabel].listData[index][0]
self.iterators[lineLabel] = (index,lineNumber)
return self
def jump(self, lineNumber):
'''jump iterator to specified log line'''
self.currentLine = lineNumber
for lineLabel in self.iterators.keys():
dataLabel = self.logdata.formats[lineLabel].labels[0]
(value,lineNumber) = self.logdata.channels[lineLabel][dataLabel].getNearestValue(self.currentLine)
self.iterators[lineLabel] = (self.logdata.channels[lineLabel][dataLabel].getIndexOf(lineNumber), lineNumber)
class DataflashLogHelper:
'''helper functions for dealing with log data, put here to keep DataflashLog class as a simple parser and data store'''
@staticmethod
def getTimeAtLine(logdata, lineNumber):
'''returns the nearest GPS timestamp in milliseconds after the given line number'''
if not "GPS" in logdata.channels:
raise Exception("no GPS log data found")
# older logs use 'TIme', newer logs use 'TimeMS'
timeLabel = "TimeMS"
if "Time" in logdata.channels["GPS"]:
timeLabel = "Time"
while lineNumber <= logdata.lineCount:
if lineNumber in logdata.channels["GPS"][timeLabel].dictData:
return logdata.channels["GPS"][timeLabel].dictData[lineNumber]
lineNumber = lineNumber + 1
sys.stderr.write("didn't find GPS data for " + str(lineNumber) + " - using maxtime\n")
return logdata.channels["GPS"][timeLabel].max()
@staticmethod
def findLoiterChunks(logdata, minLengthSeconds=0, noRCInputs=True):
'''returns a list of (to,from) pairs defining sections of the log which are in loiter mode. Ordered from longest to shortest in time. If noRCInputs == True it only returns chunks with no control inputs'''
# TODO: implement noRCInputs handling when identifying stable loiter chunks, for now we're ignoring it
def chunkSizeCompare(chunk1, chunk2):
chunk1Len = chunk1[1]-chunk1[0]
chunk2Len = chunk2[1]-chunk2[0]
if chunk1Len == chunk2Len:
return 0
elif chunk1Len > chunk2Len:
return -1
else:
return 1
od = collections.OrderedDict(sorted(logdata.modeChanges.items(), key=lambda t: t[0]))
chunks = []
for i in range(len(od.keys())):
if od.values()[i][0] == "LOITER":
startLine = od.keys()[i]
endLine = None
if i == len(od.keys())-1:
endLine = logdata.lineCount
else:
endLine = od.keys()[i+1]-1
chunkTimeSeconds = (DataflashLogHelper.getTimeAtLine(logdata,endLine)-DataflashLogHelper.getTimeAtLine(logdata,startLine)+1) / 1000.0
if chunkTimeSeconds > minLengthSeconds:
chunks.append((startLine,endLine))
#print "LOITER chunk: %d to %d, %d lines" % (startLine,endLine,endLine-startLine+1)
#print " (time %d to %d, %d seconds)" % (DataflashLogHelper.getTimeAtLine(logdata,startLine), DataflashLogHelper.getTimeAtLine(logdata,endLine), chunkTimeSeconds)
chunks.sort(chunkSizeCompare)
return chunks
@staticmethod
def isLogEmpty(logdata):
'''returns an human readable error string if the log is essentially empty, otherwise returns None'''
# naive check for now, see if the throttle output was ever above 20%
throttleThreshold = 20
if logdata.vehicleType == VehicleType.Copter:
throttleThreshold = 200 # copter uses 0-1000, plane+rover use 0-100
if "CTUN" in logdata.channels:
try:
maxThrottle = logdata.channels["CTUN"]["ThrOut"].max()
except KeyError as e:
# ThrOut was shorted to ThO at some stage...
maxThrottle = logdata.channels["CTUN"]["ThO"].max()
# at roughly the same time ThO became a range from 0 to 1
throttleThreshold = 0.2
if maxThrottle < throttleThreshold:
return "Throttle never above 20%"
return None
class DataflashLog(object):
'''APM Dataflash log file reader and container class. Keep this simple, add more advanced or specific functions to DataflashLogHelper class'''
knownHardwareTypes = ["APM", "PX4", "MPNG"]
intTypes = "bBhHiIM"
floatTypes = "fcCeEL"
charTypes = "nNZ"
def __init__(self, logfile=None, format="auto", ignoreBadlines=False):
self.filename = None
self.vehicleType = None # from VehicleType enumeration; value derived from header
self.vehicleTypeString = None # set at same time has the enum value
self.firmwareVersion = ""
self.firmwareHash = ""
self.freeRAM = 0
self.hardwareType = "" # APM 1, APM 2, PX4, MPNG, etc What is VRBrain? BeagleBone, etc? Needs more testing
self.formats = {} # name -> Format
self.parameters = {} # token -> value
self.messages = {} # lineNum -> message
self.modeChanges = {} # lineNum -> (mode,value)
self.channels = {} # lineLabel -> {dataLabel:Channel}
self.filesizeKB = 0
self.durationSecs = 0
self.lineCount = 0
self.skippedLines = 0
self.backpatch_these_modechanges = []
if logfile:
self.read(logfile, format, ignoreBadlines)
def getCopterType(self):
'''returns quad/hex/octo/tradheli if this is a copter log'''
if self.vehicleType != VehicleType.Copter:
return None
motLabels = []
if "MOT" in self.formats: # not listed in PX4 log header for some reason?
motLabels = self.formats["MOT"].labels
if "GGain" in motLabels:
return "tradheli"
elif len(motLabels) == 4:
return "quad"
elif len(motLabels) == 6:
return "hex"
elif len(motLabels) == 8:
return "octo"
else:
return ""
def read(self, logfile, format="auto", ignoreBadlines=False):
'''returns on successful log read (including bad lines if ignoreBadlines==True), will throw an Exception otherwise'''
# TODO: dataflash log parsing code is pretty hacky, should re-write more methodically
self.filename = logfile
if self.filename == '<stdin>':
f = sys.stdin
else:
f = open(self.filename, 'r')
if format == 'bin':
head = '\xa3\x95\x80\x80'
elif format == 'log':
head = ""
elif format == 'auto':
if self.filename == '<stdin>':
# assuming TXT format
# raise ValueError("Invalid log format for stdin: {}".format(format))
head = ""
else:
head = f.read(4)
f.seek(0)
else:
raise ValueError("Unknown log format for {}: {}".format(self.filename, format))
if head == '\xa3\x95\x80\x80':
numBytes, lineNumber = self.read_binary(f, ignoreBadlines)
pass
else:
numBytes, lineNumber = self.read_text(f, ignoreBadlines)
# gather some general stats about the log
self.lineCount = lineNumber
self.filesizeKB = numBytes / 1024.0
# TODO: switch duration calculation to use TimeMS values rather than GPS timestemp
if "GPS" in self.channels:
# the GPS time label changed at some point, need to handle both
timeLabel = None
for i in 'TimeMS','TimeUS','Time':
if i in self.channels["GPS"]:
timeLabel = i
break
firstTimeGPS = int(self.channels["GPS"][timeLabel].listData[0][1])
lastTimeGPS = int(self.channels["GPS"][timeLabel].listData[-1][1])
if timeLabel == 'TimeUS':
firstTimeGPS /= 1000
lastTimeGPS /= 1000
self.durationSecs = (lastTimeGPS-firstTimeGPS) / 1000
# TODO: calculate logging rate based on timestamps
# ...
msg_vehicle_to_vehicle_map = {
"ArduCopter": VehicleType.Copter,
"APM:Copter": VehicleType.Copter,
"ArduPlane": VehicleType.Plane,
"ArduRover": VehicleType.Rover
}
# takes the vehicle type supplied via "MSG" and sets vehicleType from
# the VehicleType enumeration
def set_vehicleType_from_MSG_vehicle(self, MSG_vehicle):
ret = self.msg_vehicle_to_vehicle_map.get(MSG_vehicle, None)
if ret is None:
raise ValueError("Unknown vehicle type (%s)" % (MSG_vehicle))
self.vehicleType = ret
self.vehicleTypeString = VehicleTypeString[ret]
def handleModeChange(self, lineNumber, e):
if self.vehicleType == VehicleType.Copter:
try:
modes = {0:'STABILIZE',
1:'ACRO',
2:'ALT_HOLD',
3:'AUTO',
4:'GUIDED',
5:'LOITER',
6:'RTL',
7:'CIRCLE',
9:'LAND',
10:'OF_LOITER',
11:'DRIFT',
13:'SPORT',
14:'FLIP',
15:'AUTOTUNE',
16:'HYBRID',}
if hasattr(e, 'ThrCrs'):
self.modeChanges[lineNumber] = (modes[int(e.Mode)], e.ThrCrs)
else:
# assume it has ModeNum:
self.modeChanges[lineNumber] = (modes[int(e.Mode)], e.ModeNum)
except:
if hasattr(e, 'ThrCrs'):
self.modeChanges[lineNumber] = (e.Mode, e.ThrCrs)
else:
# assume it has ModeNum:
self.modeChanges[lineNumber] = (e.Mode, e.ModeNum)
elif self.vehicleType in [VehicleType.Plane, VehicleType.Copter, VehicleType.Rover]:
self.modeChanges[lineNumber] = (e.Mode, e.ModeNum)
else:
# if you've gotten to here the chances are we don't
# know what vehicle you're flying...
raise Exception("Unknown log type for MODE line vehicletype=({}) line=({})".format(self.vehicleTypeString, repr(e)))
def backPatchModeChanges(self):
for (lineNumber, e) in self.backpatch_these_modechanges:
self.handleModeChange(lineNumber, e)
def process(self, lineNumber, e):
if e.NAME == 'FMT':
cls = e.to_class()
if cls is not None: # FMT messages can be broken ...
if hasattr(e, 'type') and e.type not in self._formats: # binary log specific
self._formats[e.type] = cls
if cls.NAME not in self.formats:
self.formats[cls.NAME] = cls
elif e.NAME == "PARM":
self.parameters[e.Name] = e.Value
elif e.NAME == "MSG":
if not self.vehicleType:
tokens = e.Message.split(' ')
self.set_vehicleType_from_MSG_vehicle(tokens[0]);
self.backPatchModeChanges()
self.firmwareVersion = tokens[1]
if len(tokens) == 3:
self.firmwareHash = tokens[2][1:-1]
else:
self.messages[lineNumber] = e.Message
elif e.NAME == "MODE":
if self.vehicleType is None:
self.backpatch_these_modechanges.append( (lineNumber, e) )
else:
self.handleModeChange(lineNumber, e)
# anything else must be the log data
else:
groupName = e.NAME
# first time seeing this type of log line, create the channel storage
if not groupName in self.channels:
self.channels[groupName] = {}
for label in e.labels:
self.channels[groupName][label] = Channel()
# store each token in its relevant channel
for label in e.labels:
value = getattr(e, label)
channel = self.channels[groupName][label]
channel.dictData[lineNumber] = value
channel.listData.append((lineNumber, value))
def read_text(self, f, ignoreBadlines):
self.formats = {'FMT':Format}
lineNumber = 0
numBytes = 0
knownHardwareTypes = ["APM", "PX4", "MPNG"]
for line in f:
lineNumber = lineNumber + 1
numBytes += len(line) + 1
try:
#print "Reading line: %d" % lineNumber
line = line.strip('\n\r')
tokens = line.split(', ')
# first handle the log header lines
if line == " Ready to drive." or line == " Ready to FLY.":
continue
if line == "----------------------------------------": # present in pre-3.0 logs
raise Exception("Log file seems to be in the older format (prior to self-describing logs), which isn't supported")
if len(tokens) == 1:
tokens2 = line.split(' ')
if line == "":
pass
elif len(tokens2) == 1 and tokens2[0].isdigit(): # log index
pass
elif len(tokens2) == 3 and tokens2[0] == "Free" and tokens2[1] == "RAM:":
self.freeRAM = int(tokens2[2])
elif tokens2[0] in knownHardwareTypes:
self.hardwareType = line # not sure if we can parse this more usefully, for now only need to report it back verbatim
elif (len(tokens2) == 2 or len(tokens2) == 3) and tokens2[1][0].lower() == "v": # e.g. ArduCopter V3.1 (5c6503e2)
self.set_vehicleType_from_MSG_vehicle(tokens2[0])
self.firmwareVersion = tokens2[1]
if len(tokens2) == 3:
self.firmwareHash = tokens2[2][1:-1]
else:
errorMsg = "Error parsing line %d of log file: %s" % (lineNumber, self.filename)
if ignoreBadlines:
print(errorMsg + " (skipping line)", file=sys.stderr)
self.skippedLines += 1
else:
raise Exception("")
else:
if not tokens[0] in self.formats:
raise ValueError("Unknown Format {}".format(tokens[0]))
e = self.formats[tokens[0]](*tokens[1:])
self.process(lineNumber, e)
except Exception as e:
print("BAD LINE: " + line, file=sys.stderr)
if not ignoreBadlines:
raise Exception("Error parsing line %d of log file %s - %s" % (lineNumber,self.filename,e.args[0]))
return (numBytes,lineNumber)
def read_binary(self, f, ignoreBadlines):
lineNumber = 0
numBytes = 0
for e in self._read_binary(f, ignoreBadlines):
lineNumber += 1
if e is None:
continue
numBytes += e.SIZE
# print(e)
self.process(lineNumber, e)
return (numBytes,lineNumber)
def _read_binary(self, f, ignoreBadlines):
self._formats = {128:BinaryFormat}
data = bytearray(f.read())
offset = 0
while len(data) > offset + ctypes.sizeof(logheader):
h = logheader.from_buffer(data, offset)
if not (h.head1 == 0xa3 and h.head2 == 0x95):
if ignoreBadlines == False:
raise ValueError(h)
else:
if h.head1 == 0xff and h.head2 == 0xff and h.msgid == 0xff:
print("Assuming EOF due to dataflash block tail filled with \\xff... (offset={off})".format(off=offset), file=sys.stderr)
break
offset += 1
continue
if h.msgid in self._formats:
typ = self._formats[h.msgid]
if len(data) <= offset + typ.SIZE:
break
try:
e = typ.from_buffer(data, offset)
except:
print("data:{} offset:{} size:{} sizeof:{} sum:{}".format(len(data),offset,typ.SIZE,ctypes.sizeof(typ),offset+typ.SIZE))
raise
offset += typ.SIZE
else:
raise ValueError(str(h) + "unknown type")
yield e
| gpl-3.0 |
DazWorrall/zulip | zilencer/management/commands/profile_request.py | 117 | 1632 | from __future__ import absolute_import
from optparse import make_option
from django.core.management.base import BaseCommand
from zerver.models import get_user_profile_by_email, UserMessage
from zerver.views.old_messages import get_old_messages_backend
import cProfile
import logging
from zerver.middleware import LogRequests
request_logger = LogRequests()
class MockSession(object):
def __init__(self):
self.modified = False
class MockRequest(object):
def __init__(self, email):
self.user = get_user_profile_by_email(email)
self.path = '/'
self.method = "POST"
self.META = {"REMOTE_ADDR": "127.0.0.1"}
self.REQUEST = {"anchor": UserMessage.objects.filter(user_profile=self.user).order_by("-message")[200].message_id,
"num_before": 1200,
"num_after": 200}
self.GET = {}
self.session = MockSession()
def get_full_path(self):
return self.path
def profile_request(request):
request_logger.process_request(request)
prof = cProfile.Profile()
prof.enable()
ret = get_old_messages_backend(request, request.user,
apply_markdown=True)
prof.disable()
prof.dump_stats("/tmp/profile.data")
request_logger.process_response(request, ret)
logging.info("Profiling data written to /tmp/profile.data")
return ret
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--email', action='store'),
)
def handle(self, *args, **options):
profile_request(MockRequest(options["email"]))
| apache-2.0 |
hellotomfan/v8-coroutine | build/gyp/test/library/gyptest-shared-obj-install-path.py | 289 | 1180 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that .so files that are order only dependencies are specified by
their install location rather than by their alias.
"""
# Python 2.5 needs this for the with statement.
from __future__ import with_statement
import os
import TestGyp
test = TestGyp.TestGyp(formats=['make'])
test.run_gyp('shared_dependency.gyp',
chdir='src')
test.relocate('src', 'relocate/src')
test.build('shared_dependency.gyp', test.ALL, chdir='relocate/src')
if test.format=='android':
makefile_path = 'relocate/src/GypAndroid.mk'
else:
makefile_path = 'relocate/src/Makefile'
with open(makefile_path) as makefile:
make_contents = makefile.read()
# If we remove the code to generate lib1, Make should still be able
# to build lib2 since lib1.so already exists.
make_contents = make_contents.replace('include lib1.target.mk', '')
with open(makefile_path, 'w') as makefile:
makefile.write(make_contents)
test.build('shared_dependency.gyp', test.ALL, chdir='relocate/src')
test.pass_test()
| gpl-2.0 |
jereze/scikit-learn | sklearn/preprocessing/data.py | 68 | 57385 | # Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Eric Martin <[email protected]>
# License: BSD 3 clause
from itertools import chain, combinations
import numbers
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..utils import check_array
from ..utils.extmath import row_norms
from ..utils.fixes import combinations_with_replacement as combinations_w_r
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale, mean_variance_axis,
min_max_axis, inplace_row_scale)
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
]
DEPRECATION_MSG_1D = (
"Passing 1d arrays as data is deprecated in 0.17 and will "
"raise ValueError in 0.19. Reshape your data either using "
"X.reshape(-1, 1) if your data has a single feature or "
"X.reshape(1, -1) if it contains a single sample."
)
def _mean_and_std(X, axis=0, with_mean=True, with_std=True):
"""Compute mean and std deviation for centering, scaling.
Zero valued std components are reset to 1.0 to avoid NaNs when scaling.
"""
X = np.asarray(X)
Xr = np.rollaxis(X, axis)
if with_mean:
mean_ = Xr.mean(axis=0)
else:
mean_ = None
if with_std:
std_ = Xr.std(axis=0)
std_ = _handle_zeros_in_scale(std_)
else:
std_ = None
return mean_, std_
def _handle_zeros_in_scale(scale):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == 0:
scale = 1.
elif isinstance(scale, np.ndarray):
scale[scale == 0.0] = 1.0
scale[~np.isfinite(scale)] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like or CSR matrix.
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse='csr', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
mean_, std_ = _mean_and_std(
X, axis, with_mean=with_mean, with_std=with_std)
if copy:
X = X.copy()
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = Xr.mean(axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
Xr /= std_
if with_mean:
mean_2 = Xr.mean(axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# std_ is very small so that mean_2 = mean_1/std_ > 0, even if
# mean_1 was close to zero. The problem is thus essentially due
# to the lack of precision of mean_. A solution is then to
# substract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, copy=self.copy, ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
data_min = np.min(X, axis=0)
data_range = np.max(X, axis=0) - data_min
data_range = _handle_zeros_in_scale(data_range)
self.scale_ = (feature_range[1] - feature_range[0]) / data_range
self.min_ = feature_range[0] - data_min * self.scale_
self.data_range = data_range
self.data_min = data_min
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
std_ : array of floats with shape [n_features]
The standard deviation for each feature in the training set.
Set to one if the standard deviation is zero for a given feature.
See also
--------
:func:`sklearn.preprocessing.scale` to perform centering and
scaling without using the ``Transformer`` object oriented API
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
"""
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : array-like or CSR matrix with shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse='csr', copy=self.copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
self.mean_ = None
if self.with_std:
var = mean_variance_axis(X, axis=0)[1]
self.std_ = np.sqrt(var)
self.std_ = _handle_zeros_in_scale(self.std_)
else:
self.std_ = None
return self
else:
self.mean_, self.std_ = _mean_and_std(
X, axis=0, with_mean=self.with_mean, with_std=self.with_std)
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.std_ is not None:
inplace_column_scale(X, 1 / self.std_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.std_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.std_ is not None:
inplace_column_scale(X, self.std_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.std_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, copy=True):
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0)
scales = np.maximum(np.abs(mins), np.abs(maxs))
else:
scales = np.abs(X).max(axis=0)
scales = np.array(scales)
scales = scales.reshape(-1)
self.scale_ = _handle_zeros_in_scale(scales)
return self
def transform(self, X, y=None):
"""Scale the data
Parameters
----------
X : array-like or CSR matrix.
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
else:
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MaxAbsScaler(copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the Interquartile Range (IQR). The IQR is the range between the 1st
quartile (25th quantile) and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature (or each
sample, depending on the `axis` argument) by computing the relevant
statistics on the samples in the training set. Median and interquartile
range are then stored to be used on later data using the `transform`
method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering
and scaling using mean and variance.
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
Notes
-----
See examples/preprocessing/plot_robust_scaling.py for an example.
http://en.wikipedia.org/wiki/Median_(statistics)
http://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True, copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.copy = copy
def _check_array(self, X, copy):
"""Makes sure centering is not enabled for sparse matrices."""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_centering:
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
return X
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
if sparse.issparse(X):
raise TypeError("RobustScaler cannot be fitted on sparse inputs")
X = self._check_array(X, self.copy)
if self.with_centering:
self.center_ = np.median(X, axis=0)
if self.with_scaling:
q = np.percentile(X, (25, 75), axis=0)
self.scale_ = (q[1] - q[0])
self.scale_ = _handle_zeros_in_scale(self.scale_)
return self
def transform(self, X, y=None):
"""Center and scale the data
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
elif self.axis == 0:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True, copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like.
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.RobustScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0, 0, 1],
[ 1, 2, 3, 4, 6, 9],
[ 1, 4, 5, 16, 20, 25]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0],
[ 1, 2, 3, 6],
[ 1, 4, 5, 20]])
Attributes
----------
powers_ : array, shape (n_input_features, n_output_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<example_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(np.bincount(c, minlength=self.n_input_features_)
for c in combinations)
def fit(self, X, y=None):
"""
Compute number of output features.
"""
n_samples, n_features = check_array(X).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X, y=None):
"""Transform data to polynomial features
Parameters
----------
X : array with shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
for i, c in enumerate(combinations):
XP[:, i] = X[:, c].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Normalizer` to perform normalization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms = norms.repeat(np.diff(X.indptr))
mask = norms != 0
X.data[mask] /= norms[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
:func:`sklearn.preprocessing.normalize` equivalent function
without the object oriented API
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Binarizer` to perform binarization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
check_is_fitted(self, 'K_fit_all_')
K = check_array(K)
if copy:
K = K.copy()
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : array or scipy.sparse matrix with shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'])
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
if selected == "all":
return transform(X)
X = check_array(X, accept_sparse='csc', copy=copy)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : maximum value for all features.
- array : maximum value per feature.
categorical_features: "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and two samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'float'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if self.n_values == 'auto':
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those catgorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are
# ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
| bsd-3-clause |
TNT-Samuel/Coding-Projects | DNS Server/Source/Lib/site-packages/pygments/console.py | 27 | 1809 | # -*- coding: utf-8 -*-
"""
pygments.console
~~~~~~~~~~~~~~~~
Format colored console output.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
esc = "\x1b["
codes = {}
codes[""] = ""
codes["reset"] = esc + "39;49;00m"
codes["bold"] = esc + "01m"
codes["faint"] = esc + "02m"
codes["standout"] = esc + "03m"
codes["underline"] = esc + "04m"
codes["blink"] = esc + "05m"
codes["overline"] = esc + "06m"
dark_colors = ["black", "darkred", "darkgreen", "brown", "darkblue",
"purple", "teal", "lightgray"]
light_colors = ["darkgray", "red", "green", "yellow", "blue",
"fuchsia", "turquoise", "white"]
x = 30
for d, l in zip(dark_colors, light_colors):
codes[d] = esc + "%im" % x
codes[l] = esc + "%i;01m" % x
x += 1
del d, l, x
codes["darkteal"] = codes["turquoise"]
codes["darkyellow"] = codes["brown"]
codes["fuscia"] = codes["fuchsia"]
codes["white"] = codes["bold"]
def reset_color():
return codes["reset"]
def colorize(color_key, text):
return codes[color_key] + text + codes["reset"]
def ansiformat(attr, text):
"""
Format ``text`` with a color and/or some attributes::
color normal color
*color* bold color
_color_ underlined color
+color+ blinking color
"""
result = []
if attr[:1] == attr[-1:] == '+':
result.append(codes['blink'])
attr = attr[1:-1]
if attr[:1] == attr[-1:] == '*':
result.append(codes['bold'])
attr = attr[1:-1]
if attr[:1] == attr[-1:] == '_':
result.append(codes['underline'])
attr = attr[1:-1]
result.append(codes[attr])
result.append(text)
result.append(codes['reset'])
return ''.join(result)
| gpl-3.0 |
aruppen/xwot.py | xwot/device/lightbulb.py | 1 | 4704 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# xwot.py - Python tools for the extended Web of Things
# Copyright (C) 2015 Alexander Rüedlinger
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
__author__ = 'Alexander Rüedlinger'
from xwot.model import Context as XWOTContext
from xwot.model import Sensor as XWOTSensor
from xwot.model import Device as XWOTDevice
from xwot.model import Model
from xwot.model import BaseModel
class LightBulb(XWOTDevice, BaseModel):
__mutable_props__ = ['name', 'streetAddress', 'roomAddress', 'postalCode', 'addressLocality']
__expose__ = __mutable_props__ + ['description', 'switch', 'sensor']
def __init__(self, name, street_address, postal_code, address_locality, room_address):
super(LightBulb, self).__init__()
self._dic = {
'name': name,
'streetAddress': street_address,
'postalCode': postal_code,
'addressLocality': address_locality,
'roomAddress': room_address
}
self.add_type('xwot-ext:LightBulb')
self.add_link('switch')
self.add_link('sensor')
@property
def resource_path(self):
return '/lightbulb'
@property
def name(self):
return self._dic['name']
@property
def description(self):
return "Hi there my name is %s. I'm a light bulb and currently present in room %s at the location: %s, %s, %s" % \
(self.name, self.roomAddress, self.streetAddress, self.addressLocality, self.postalCode)
@property
def switch(self):
return '/lightbulb/switch'
@property
def sensor(self):
return '/lightbulb/sensor'
@property
def streetAddress(self):
return self._dic['streetAddress']
@property
def postalCode(self):
return self._dic['postalCode']
@property
def addressLocality(self):
return self._dic['addressLocality']
@property
def roomAddress(self):
return self._dic['roomAddress']
from xwot.i2c.adapter import LightBulbAdapter
class Switch(XWOTContext, Model):
__mutable_props__ = ['name', 'state']
__expose__ = __mutable_props__ + ['description', 'lightbulb']
def __init__(self, name, adapter=LightBulbAdapter()):
super(Switch, self).__init__()
self._dic = {
'name': name
}
self._adapter = adapter
self.add_type('xwot-ext:Switch')
self.add_link('lightbulb')
@property
def resource_path(self):
return '/lightbulb/switch'
@property
def description(self):
return "A light switch to turn off or on."
@property
def lightbulb(self):
return '/lightbulb'
@property
def state(self):
return self._adapter.state
@property
def name(self):
return self._dic['name']
def handle_update(self, dic):
if dic.get('state') == 'off':
self._adapter.switch_off()
if dic.get('state') == 'on':
self._adapter.switch_on()
self._dic['name'] = str(dic.get('name', self._dic['name']))
return 200
class Sensor(XWOTSensor, Model):
__expose__ = ['name', 'unit', 'measures', 'description', 'measurement', 'symbol', 'lightbulb']
def __init__(self, adapter=LightBulbAdapter()):
super(Sensor, self).__init__()
self._adapter = adapter
self.add_type('xwot-ext:IlluminanceSensor')
self.add_link('lightbulb')
@property
def resource_path(self):
return '/lightbulb/sensor'
@property
def name(self):
return 'Illuminance sensor'
@property
def lightbulb(self):
return '/lightbulb'
@property
def unit(self):
return 'Lux'
@property
def description(self):
return 'A sensor that measures the illuminance of this light bulb.'
@property
def measures(self):
return 'Illuminance'
@property
def measurement(self):
return self._adapter.illuminance
@property
def symbol(self):
return 'lx'
def handle_update(self, dic):
pass
| gpl-3.0 |
pquerna/cloud-init-debian-pkg-dead | cloudinit/config/cc_disk_setup.py | 6 | 25274 | # vi: ts=4 expandtab
#
# Copyright (C) 2009-2010 Canonical Ltd.
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
#
# Author: Ben Howard <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from cloudinit.settings import PER_INSTANCE
from cloudinit import util
import logging
import os
import shlex
frequency = PER_INSTANCE
# Define the commands to use
UDEVADM_CMD = util.which('udevadm')
SFDISK_CMD = util.which("sfdisk")
LSBLK_CMD = util.which("lsblk")
BLKID_CMD = util.which("blkid")
BLKDEV_CMD = util.which("blockdev")
WIPEFS_CMD = util.which("wipefs")
LOG = logging.getLogger(__name__)
def handle(_name, cfg, cloud, log, _args):
"""
See doc/examples/cloud-config_disk-setup.txt for documentation on the
format.
"""
disk_setup = cfg.get("disk_setup")
if isinstance(disk_setup, dict):
update_disk_setup_devices(disk_setup, cloud.device_name_to_device)
log.debug("Partitioning disks: %s", str(disk_setup))
for disk, definition in disk_setup.items():
if not isinstance(definition, dict):
log.warn("Invalid disk definition for %s" % disk)
continue
try:
log.debug("Creating new partition table/disk")
util.log_time(logfunc=LOG.debug,
msg="Creating partition on %s" % disk,
func=mkpart, args=(disk, definition))
except Exception as e:
util.logexc(LOG, "Failed partitioning operation\n%s" % e)
fs_setup = cfg.get("fs_setup")
if isinstance(fs_setup, list):
log.debug("setting up filesystems: %s", str(fs_setup))
update_fs_setup_devices(fs_setup, cloud.device_name_to_device)
for definition in fs_setup:
if not isinstance(definition, dict):
log.warn("Invalid file system definition: %s" % definition)
continue
try:
log.debug("Creating new filesystem.")
device = definition.get('device')
util.log_time(logfunc=LOG.debug,
msg="Creating fs for %s" % device,
func=mkfs, args=(definition,))
except Exception as e:
util.logexc(LOG, "Failed during filesystem operation\n%s" % e)
def update_disk_setup_devices(disk_setup, tformer):
# update 'disk_setup' dictionary anywhere were a device may occur
# update it with the response from 'tformer'
for origname in disk_setup.keys():
transformed = tformer(origname)
if transformed is None or transformed == origname:
continue
if transformed in disk_setup:
LOG.info("Replacing %s in disk_setup for translation of %s",
origname, transformed)
del disk_setup[transformed]
disk_setup[transformed] = disk_setup[origname]
disk_setup[transformed]['_origname'] = origname
del disk_setup[origname]
LOG.debug("updated disk_setup device entry '%s' to '%s'",
origname, transformed)
def update_fs_setup_devices(disk_setup, tformer):
# update 'fs_setup' dictionary anywhere were a device may occur
# update it with the response from 'tformer'
for definition in disk_setup:
if not isinstance(definition, dict):
LOG.warn("entry in disk_setup not a dict: %s", definition)
continue
origname = definition.get('device')
if origname is None:
continue
(dev, part) = util.expand_dotted_devname(origname)
tformed = tformer(dev)
if tformed is not None:
dev = tformed
LOG.debug("%s is mapped to disk=%s part=%s",
origname, tformed, part)
definition['_origname'] = origname
definition['device'] = tformed
if part and 'partition' in definition:
definition['_partition'] = definition['partition']
definition['partition'] = part
def value_splitter(values, start=None):
"""
Returns the key/value pairs of output sent as string
like: FOO='BAR' HOME='127.0.0.1'
"""
_values = shlex.split(values)
if start:
_values = _values[start:]
for key, value in [x.split('=') for x in _values]:
yield key, value
def enumerate_disk(device, nodeps=False):
"""
Enumerate the elements of a child device.
Parameters:
device: the kernel device name
nodeps <BOOL>: don't enumerate children devices
Return a dict describing the disk:
type: the entry type, i.e disk or part
fstype: the filesystem type, if it exists
label: file system label, if it exists
name: the device name, i.e. sda
"""
lsblk_cmd = [LSBLK_CMD, '--pairs', '--out', 'NAME,TYPE,FSTYPE,LABEL',
device]
if nodeps:
lsblk_cmd.append('--nodeps')
info = None
try:
info, _err = util.subp(lsblk_cmd)
except Exception as e:
raise Exception("Failed during disk check for %s\n%s" % (device, e))
parts = [x for x in (info.strip()).splitlines() if len(x.split()) > 0]
for part in parts:
d = {'name': None,
'type': None,
'fstype': None,
'label': None,
}
for key, value in value_splitter(part):
d[key.lower()] = value
yield d
def device_type(device):
"""
Return the device type of the device by calling lsblk.
"""
for d in enumerate_disk(device, nodeps=True):
if "type" in d:
return d["type"].lower()
return None
def is_device_valid(name, partition=False):
"""
Check if the device is a valid device.
"""
d_type = ""
try:
d_type = device_type(name)
except:
LOG.warn("Query against device %s failed" % name)
return False
if partition and d_type == 'part':
return True
elif not partition and d_type == 'disk':
return True
return False
def check_fs(device):
"""
Check if the device has a filesystem on it
Output of blkid is generally something like:
/dev/sda: LABEL="Backup500G" UUID="..." TYPE="ext4"
Return values are device, label, type, uuid
"""
out, label, fs_type, uuid = None, None, None, None
blkid_cmd = [BLKID_CMD, '-c', '/dev/null', device]
try:
out, _err = util.subp(blkid_cmd, rcs=[0, 2])
except Exception as e:
raise Exception("Failed during disk check for %s\n%s" % (device, e))
if out:
if len(out.splitlines()) == 1:
for key, value in value_splitter(out, start=1):
if key.lower() == 'label':
label = value
elif key.lower() == 'type':
fs_type = value
elif key.lower() == 'uuid':
uuid = value
return label, fs_type, uuid
def is_filesystem(device):
"""
Returns true if the device has a file system.
"""
_, fs_type, _ = check_fs(device)
return fs_type
def find_device_node(device, fs_type=None, label=None, valid_targets=None,
label_match=True, replace_fs=None):
"""
Find a device that is either matches the spec, or the first
The return is value is (<device>, <bool>) where the device is the
device to use and the bool is whether the device matches the
fs_type and label.
Note: This works with GPT partition tables!
"""
# label of None is same as no label
if label is None:
label = ""
if not valid_targets:
valid_targets = ['disk', 'part']
raw_device_used = False
for d in enumerate_disk(device):
if d['fstype'] == replace_fs and label_match is False:
# We found a device where we want to replace the FS
return ('/dev/%s' % d['name'], False)
if (d['fstype'] == fs_type and
((label_match and d['label'] == label) or not label_match)):
# If we find a matching device, we return that
return ('/dev/%s' % d['name'], True)
if d['type'] in valid_targets:
if d['type'] != 'disk' or d['fstype']:
raw_device_used = True
if d['type'] == 'disk':
# Skip the raw disk, its the default
pass
elif not d['fstype']:
return ('/dev/%s' % d['name'], False)
if not raw_device_used:
return (device, False)
LOG.warn("Failed to find device during available device search.")
return (None, False)
def is_disk_used(device):
"""
Check if the device is currently used. Returns true if the device
has either a file system or a partition entry
is no filesystem found on the disk.
"""
# If the child count is higher 1, then there are child nodes
# such as partition or device mapper nodes
use_count = [x for x in enumerate_disk(device)]
if len(use_count.splitlines()) > 1:
return True
# If we see a file system, then its used
_, check_fstype, _ = check_fs(device)
if check_fstype:
return True
return False
def get_hdd_size(device):
"""
Returns the hard disk size.
This works with any disk type, including GPT.
"""
size_cmd = [SFDISK_CMD, '--show-size', device]
size = None
try:
size, _err = util.subp(size_cmd)
except Exception as e:
raise Exception("Failed to get %s size\n%s" % (device, e))
return int(size.strip())
def get_dyn_func(*args):
"""
Call the appropriate function.
The first value is the template for function name
The second value is the template replacement
The remain values are passed to the function
For example: get_dyn_func("foo_%s", 'bar', 1, 2, 3,)
would call "foo_bar" with args of 1, 2, 3
"""
if len(args) < 2:
raise Exception("Unable to determine dynamic funcation name")
func_name = (args[0] % args[1])
func_args = args[2:]
try:
if func_args:
return globals()[func_name](*func_args)
else:
return globals()[func_name]
except KeyError:
raise Exception("No such function %s to call!" % func_name)
def check_partition_mbr_layout(device, layout):
"""
Returns true if the partition layout matches the one on the disk
Layout should be a list of values. At this time, this only
verifies that the number of partitions and their labels is correct.
"""
read_parttbl(device)
prt_cmd = [SFDISK_CMD, "-l", device]
try:
out, _err = util.subp(prt_cmd, data="%s\n" % layout)
except Exception as e:
raise Exception("Error running partition command on %s\n%s" % (
device, e))
found_layout = []
for line in out.splitlines():
_line = line.split()
if len(_line) == 0:
continue
if device in _line[0]:
# We don't understand extended partitions yet
if _line[-1].lower() in ['extended', 'empty']:
continue
# Find the partition types
type_label = None
for x in sorted(range(1, len(_line)), reverse=True):
if _line[x].isdigit() and _line[x] != '/':
type_label = _line[x]
break
found_layout.append(type_label)
if isinstance(layout, bool):
# if we are using auto partitioning, or "True" be happy
# if a single partition exists.
if layout and len(found_layout) >= 1:
return True
return False
else:
if len(found_layout) != len(layout):
return False
else:
# This just makes sure that the number of requested
# partitions and the type labels are right
for x in range(1, len(layout) + 1):
if isinstance(layout[x - 1], tuple):
_, part_type = layout[x]
if int(found_layout[x]) != int(part_type):
return False
return True
return False
def check_partition_layout(table_type, device, layout):
"""
See if the partition lay out matches.
This is future a future proofing function. In order
to add support for other disk layout schemes, add a
function called check_partition_%s_layout
"""
return get_dyn_func("check_partition_%s_layout", table_type, device,
layout)
def get_partition_mbr_layout(size, layout):
"""
Calculate the layout of the partition table. Partition sizes
are defined as percentage values or a tuple of percentage and
partition type.
For example:
[ 33, [66: 82] ]
Defines the first partition to be a size of 1/3 the disk,
while the remaining 2/3's will be of type Linux Swap.
"""
if not isinstance(layout, list) and isinstance(layout, bool):
# Create a single partition
return "0,"
if ((len(layout) == 0 and isinstance(layout, list)) or
not isinstance(layout, list)):
raise Exception("Partition layout is invalid")
last_part_num = len(layout)
if last_part_num > 4:
raise Exception("Only simply partitioning is allowed.")
part_definition = []
part_num = 0
for part in layout:
part_type = 83 # Default to Linux
percent = part
part_num += 1
if isinstance(part, list):
if len(part) != 2:
raise Exception("Partition was incorrectly defined: %s" % part)
percent, part_type = part
part_size = int((float(size) * (float(percent) / 100)) / 1024)
if part_num == last_part_num:
part_definition.append(",,%s" % part_type)
else:
part_definition.append(",%s,%s" % (part_size, part_type))
sfdisk_definition = "\n".join(part_definition)
if len(part_definition) > 4:
raise Exception("Calculated partition definition is too big\n%s" %
sfdisk_definition)
return sfdisk_definition
def purge_disk_ptable(device):
# wipe the first and last megabyte of a disk (or file)
# gpt stores partition table both at front and at end.
null = '\0' # pylint: disable=W1401
start_len = 1024 * 1024
end_len = 1024 * 1024
with open(device, "rb+") as fp:
fp.write(null * (start_len))
fp.seek(-end_len, os.SEEK_END)
fp.write(null * end_len)
fp.flush()
read_parttbl(device)
def purge_disk(device):
"""
Remove parition table entries
"""
# wipe any file systems first
for d in enumerate_disk(device):
if d['type'] not in ["disk", "crypt"]:
wipefs_cmd = [WIPEFS_CMD, "--all", "/dev/%s" % d['name']]
try:
LOG.info("Purging filesystem on /dev/%s" % d['name'])
util.subp(wipefs_cmd)
except Exception:
raise Exception("Failed FS purge of /dev/%s" % d['name'])
purge_disk_ptable(device)
def get_partition_layout(table_type, size, layout):
"""
Call the appropriate function for creating the table
definition. Returns the table definition
This is a future proofing function. To add support for
other layouts, simply add a "get_partition_%s_layout"
function.
"""
return get_dyn_func("get_partition_%s_layout", table_type, size, layout)
def read_parttbl(device):
"""
Use partprobe instead of 'udevadm'. Partprobe is the only
reliable way to probe the partition table.
"""
blkdev_cmd = [BLKDEV_CMD, '--rereadpt', device]
udev_cmd = [UDEVADM_CMD, 'settle']
try:
util.subp(udev_cmd)
util.subp(blkdev_cmd)
util.subp(udev_cmd)
except Exception as e:
util.logexc(LOG, "Failed reading the partition table %s" % e)
def exec_mkpart_mbr(device, layout):
"""
Break out of mbr partition to allow for future partition
types, i.e. gpt
"""
# Create the partitions
prt_cmd = [SFDISK_CMD, "--Linux", "-uM", device]
try:
util.subp(prt_cmd, data="%s\n" % layout)
except Exception as e:
raise Exception("Failed to partition device %s\n%s" % (device, e))
read_parttbl(device)
def exec_mkpart(table_type, device, layout):
"""
Fetches the function for creating the table type.
This allows to dynamically find which function to call.
Paramaters:
table_type: type of partition table to use
device: the device to work on
layout: layout definition specific to partition table
"""
return get_dyn_func("exec_mkpart_%s", table_type, device, layout)
def mkpart(device, definition):
"""
Creates the partition table.
Parameters:
definition: dictionary describing how to create the partition.
The following are supported values in the dict:
overwrite: Should the partition table be created regardless
of any pre-exisiting data?
layout: the layout of the partition table
table_type: Which partition table to use, defaults to MBR
device: the device to work on.
"""
LOG.debug("Checking values for %s definition" % device)
overwrite = definition.get('overwrite', False)
layout = definition.get('layout', False)
table_type = definition.get('table_type', 'mbr')
# Check if the default device is a partition or not
LOG.debug("Checking against default devices")
if (isinstance(layout, bool) and not layout) or not layout:
LOG.debug("Device is not to be partitioned, skipping")
return # Device is not to be partitioned
# This prevents you from overwriting the device
LOG.debug("Checking if device %s is a valid device", device)
if not is_device_valid(device):
raise Exception("Device %s is not a disk device!", device)
# Remove the partition table entries
if isinstance(layout, str) and layout.lower() == "remove":
LOG.debug("Instructed to remove partition table entries")
purge_disk(device)
return
LOG.debug("Checking if device layout matches")
if check_partition_layout(table_type, device, layout):
LOG.debug("Device partitioning layout matches")
return True
LOG.debug("Checking if device is safe to partition")
if not overwrite and (is_disk_used(device) or is_filesystem(device)):
LOG.debug("Skipping partitioning on configured device %s" % device)
return
LOG.debug("Checking for device size")
device_size = get_hdd_size(device)
LOG.debug("Calculating partition layout")
part_definition = get_partition_layout(table_type, device_size, layout)
LOG.debug(" Layout is: %s" % part_definition)
LOG.debug("Creating partition table on %s", device)
exec_mkpart(table_type, device, part_definition)
LOG.debug("Partition table created for %s", device)
def lookup_force_flag(fs):
"""
A force flag might be -F or -F, this look it up
"""
flags = {'ext': '-F',
'btrfs': '-f',
'xfs': '-f',
'reiserfs': '-f',
}
if 'ext' in fs.lower():
fs = 'ext'
if fs.lower() in flags:
return flags[fs]
LOG.warn("Force flag for %s is unknown." % fs)
return ''
def mkfs(fs_cfg):
"""
Create a file system on the device.
label: defines the label to use on the device
fs_cfg: defines how the filesystem is to look
The following values are required generally:
device: which device or cloud defined default_device
filesystem: which file system type
overwrite: indiscriminately create the file system
partition: when device does not define a partition,
setting this to a number will mean
device + partition. When set to 'auto', the
first free device or the first device which
matches both label and type will be used.
'any' means the first filesystem that matches
on the device.
When 'cmd' is provided then no other parameter is required.
"""
label = fs_cfg.get('label')
device = fs_cfg.get('device')
partition = str(fs_cfg.get('partition', 'any'))
fs_type = fs_cfg.get('filesystem')
fs_cmd = fs_cfg.get('cmd', [])
fs_opts = fs_cfg.get('extra_opts', [])
fs_replace = fs_cfg.get('replace_fs', False)
overwrite = fs_cfg.get('overwrite', False)
# This allows you to define the default ephemeral or swap
LOG.debug("Checking %s against default devices", device)
if not partition or partition.isdigit():
# Handle manual definition of partition
if partition.isdigit():
device = "%s%s" % (device, partition)
LOG.debug("Manual request of partition %s for %s",
partition, device)
# Check to see if the fs already exists
LOG.debug("Checking device %s", device)
check_label, check_fstype, _ = check_fs(device)
LOG.debug("Device %s has %s %s", device, check_label, check_fstype)
if check_label == label and check_fstype == fs_type:
LOG.debug("Existing file system found at %s", device)
if not overwrite:
LOG.debug("Device %s has required file system", device)
return
else:
LOG.warn("Destroying filesystem on %s", device)
else:
LOG.debug("Device %s is cleared for formating", device)
elif partition and str(partition).lower() in ('auto', 'any'):
# For auto devices, we match if the filesystem does exist
odevice = device
LOG.debug("Identifying device to create %s filesytem on", label)
# any mean pick the first match on the device with matching fs_type
label_match = True
if partition.lower() == 'any':
label_match = False
device, reuse = find_device_node(device, fs_type=fs_type, label=label,
label_match=label_match,
replace_fs=fs_replace)
LOG.debug("Automatic device for %s identified as %s", odevice, device)
if reuse:
LOG.debug("Found filesystem match, skipping formating.")
return
if not reuse and fs_replace and device:
LOG.debug("Replacing file system on %s as instructed." % device)
if not device:
LOG.debug("No device aviable that matches request. "
"Skipping fs creation for %s", fs_cfg)
return
elif not partition or str(partition).lower() == 'none':
LOG.debug("Using the raw device to place filesystem %s on" % label)
else:
LOG.debug("Error in device identification handling.")
return
LOG.debug("File system %s will be created on %s", label, device)
# Make sure the device is defined
if not device:
LOG.warn("Device is not known: %s", device)
return
# Check that we can create the FS
if not (fs_type or fs_cmd):
raise Exception("No way to create filesystem '%s'. fs_type or fs_cmd "
"must be set.", label)
# Create the commands
if fs_cmd:
fs_cmd = fs_cfg['cmd'] % {'label': label,
'filesystem': fs_type,
'device': device,
}
else:
# Find the mkfs command
mkfs_cmd = util.which("mkfs.%s" % fs_type)
if not mkfs_cmd:
mkfs_cmd = util.which("mk%s" % fs_type)
if not mkfs_cmd:
LOG.warn("Cannot create fstype '%s'. No mkfs.%s command", fs_type,
fs_type)
return
fs_cmd = [mkfs_cmd, device]
if label:
fs_cmd.extend(["-L", label])
# File systems that support the -F flag
if not fs_cmd and (overwrite or device_type(device) == "disk"):
fs_cmd.append(lookup_force_flag(fs_type))
# Add the extends FS options
if fs_opts:
fs_cmd.extend(fs_opts)
LOG.debug("Creating file system %s on %s", label, device)
LOG.debug(" Using cmd: %s", " ".join(fs_cmd))
try:
util.subp(fs_cmd)
except Exception as e:
raise Exception("Failed to exec of '%s':\n%s" % (fs_cmd, e))
| gpl-3.0 |
DESHRAJ/fjord | vendor/packages/translate-toolkit/translate/tools/test_pretranslate.py | 3 | 14236 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import warnings
from pytest import mark
from translate.tools import pretranslate
from translate.convert import test_convert
from translate.misc import wStringIO
from translate.storage import po
from translate.storage import xliff
class TestPretranslate:
xliff_skeleton = '''<?xml version="1.0" encoding="utf-8"?>
<xliff version="1.1" xmlns="urn:oasis:names:tc:xliff:document:1.1">
<file original="doc.txt" source-language="en-US">
<body>
%s
</body>
</file>
</xliff>'''
def setup_method(self, method):
warnings.resetwarnings()
def teardown_method(self, method):
warnings.resetwarnings()
def pretranslatepo(self, input_source, template_source=None):
"""helper that converts strings to po source without requiring files"""
input_file = wStringIO.StringIO(input_source)
if template_source:
template_file = wStringIO.StringIO(template_source)
else:
template_file = None
output_file = wStringIO.StringIO()
pretranslate.pretranslate_file(input_file, output_file, template_file)
output_file.seek(0)
return po.pofile(output_file.read())
def pretranslatexliff(self, input_source, template_source=None):
"""helper that converts strings to po source without requiring files"""
input_file = wStringIO.StringIO(input_source)
if template_source:
template_file = wStringIO.StringIO(template_source)
else:
template_file = None
output_file = wStringIO.StringIO()
pretranslate.pretranslate_file(input_file, output_file, template_file)
output_file.seek(0)
return xliff.xlifffile(output_file.read())
def singleunit(self, pofile):
"""checks that the pofile contains a single non-header unit, and
returns it"""
if len(pofile.units) == 2 and pofile.units[0].isheader():
print pofile.units[1]
return pofile.units[1]
else:
print pofile.units[0]
return pofile.units[0]
def test_pretranslatepo_blank(self):
"""checks that the pretranslatepo function is working for a simple file
initialisation"""
input_source = '''#: simple.label%ssimple.accesskey\nmsgid "A &hard coded newline.\\n"\nmsgstr ""\n''' % po.lsep
newpo = self.pretranslatepo(input_source)
assert str(self.singleunit(newpo)) == input_source
def test_merging_simple(self):
"""checks that the pretranslatepo function is working for a simple
merge"""
input_source = '''#: simple.label%ssimple.accesskey\nmsgid "A &hard coded newline.\\n"\nmsgstr ""\n''' % po.lsep
template_source = '''#: simple.label%ssimple.accesskey\nmsgid "A &hard coded newline.\\n"\nmsgstr "&Hart gekoeerde nuwe lyne\\n"\n''' % po.lsep
newpo = self.pretranslatepo(input_source, template_source)
assert str(self.singleunit(newpo)) == template_source
def test_merging_messages_marked_fuzzy(self):
"""test that when we merge PO files with a fuzzy message that it
remains fuzzy"""
input_source = '''#: simple.label%ssimple.accesskey\nmsgid "A &hard coded newline.\\n"\nmsgstr ""\n''' % po.lsep
template_source = '''#: simple.label%ssimple.accesskey\n#, fuzzy\nmsgid "A &hard coded newline.\\n"\nmsgstr "&Hart gekoeerde nuwe lyne\\n"\n''' % po.lsep
newpo = self.pretranslatepo(input_source, template_source)
assert str(self.singleunit(newpo)) == template_source
def test_merging_plurals_with_fuzzy_matching(self):
"""test that when we merge PO files with a fuzzy message that it
remains fuzzy"""
input_source = r'''#: file.cpp:2
msgid "%d manual"
msgid_plural "%d manuals"
msgstr[0] ""
msgstr[1] ""
'''
template_source = r'''#: file.cpp:3
#, fuzzy
msgid "%d manual"
msgid_plural "%d manuals"
msgstr[0] "%d handleiding."
msgstr[1] "%d handleidings."
'''
# The #: comment and msgid's are different between the pot and the po
poexpected = r'''#: file.cpp:2
#, fuzzy
msgid "%d manual"
msgid_plural "%d manuals"
msgstr[0] "%d handleiding."
msgstr[1] "%d handleidings."
'''
newpo = self.pretranslatepo(input_source, template_source)
assert str(self.singleunit(newpo)) == poexpected
@mark.xfail(reason="Not Implemented")
def test_merging_msgid_change(self):
"""tests that if the msgid changes but the location stays the same that
we merge"""
input_source = '''#: simple.label\n#: simple.accesskey\nmsgid "Its &hard coding a newline.\\n"\nmsgstr ""\n'''
template_source = '''#: simple.label\n#: simple.accesskey\nmsgid "A &hard coded newline.\\n"\nmsgstr "&Hart gekoeerde nuwe lyne\\n"\n'''
poexpected = '''#: simple.label\n#: simple.accesskey\n#, fuzzy\nmsgid "Its &hard coding a newline.\\n"\nmsgstr "&Hart gekoeerde nuwe lyne\\n"\n'''
newpo = self.pretranslatepo(input_source, template_source)
print newpo
assert str(newpo) == poexpected
def test_merging_location_change(self):
"""tests that if the location changes but the msgid stays the same that
we merge"""
input_source = '''#: new_simple.label%snew_simple.accesskey\nmsgid "A &hard coded newline.\\n"\nmsgstr ""\n''' % po.lsep
template_source = '''#: simple.label%ssimple.accesskey\nmsgid "A &hard coded newline.\\n"\nmsgstr "&Hart gekoeerde nuwe lyne\\n"\n''' % po.lsep
poexpected = '''#: new_simple.label%snew_simple.accesskey\nmsgid "A &hard coded newline.\\n"\nmsgstr "&Hart gekoeerde nuwe lyne\\n"\n''' % po.lsep
newpo = self.pretranslatepo(input_source, template_source)
print newpo
assert str(newpo) == poexpected
def test_merging_location_and_whitespace_change(self):
"""test that even if the location changes that if the msgid only has
whitespace changes we can still merge"""
input_source = '''#: singlespace.label%ssinglespace.accesskey\nmsgid "&We have spaces"\nmsgstr ""\n''' % po.lsep
template_source = '''#: doublespace.label%sdoublespace.accesskey\nmsgid "&We have spaces"\nmsgstr "&One het spasies"\n''' % po.lsep
poexpected = '''#: singlespace.label%ssinglespace.accesskey\n#, fuzzy\nmsgid "&We have spaces"\nmsgstr "&One het spasies"\n''' % po.lsep
newpo = self.pretranslatepo(input_source, template_source)
print newpo
assert str(newpo) == poexpected
@mark.xfail(reason="Not Implemented")
def test_merging_accelerator_changes(self):
"""test that a change in the accelerator localtion still allows
merging"""
input_source = '''#: someline.c\nmsgid "A&bout"\nmsgstr ""\n'''
template_source = '''#: someline.c\nmsgid "&About"\nmsgstr "&Info"\n'''
poexpected = '''#: someline.c\nmsgid "A&bout"\nmsgstr "&Info"\n'''
newpo = self.pretranslatepo(input_source, template_source)
print newpo
assert str(newpo) == poexpected
@mark.xfail(reason="Not Implemented")
def test_lines_cut_differently(self):
"""Checks that the correct formatting is preserved when pot an po lines
differ."""
input_source = '''#: simple.label\nmsgid "Line split "\n"differently"\nmsgstr ""\n'''
template_source = '''#: simple.label\nmsgid "Line"\n" split differently"\nmsgstr "Lyne verskillend gesny"\n'''
newpo = self.pretranslatepo(input_source, template_source)
newpounit = self.singleunit(newpo)
assert str(newpounit) == template_source
def test_merging_automatic_comments_dont_duplicate(self):
"""ensure that we can merge #. comments correctly"""
input_source = '''#. Row 35\nmsgid "&About"\nmsgstr ""\n'''
template_source = '''#. Row 35\nmsgid "&About"\nmsgstr "&Info"\n'''
newpo = self.pretranslatepo(input_source, template_source)
newpounit = self.singleunit(newpo)
assert str(newpounit) == template_source
def test_merging_automatic_comments_new_overides_old(self):
"""ensure that new #. comments override the old comments"""
input_source = '''#. new comment\n#: someline.c\nmsgid "&About"\nmsgstr ""\n'''
template_source = '''#. old comment\n#: someline.c\nmsgid "&About"\nmsgstr "&Info"\n'''
poexpected = '''#. new comment\n#: someline.c\nmsgid "&About"\nmsgstr "&Info"\n'''
newpo = self.pretranslatepo(input_source, template_source)
newpounit = self.singleunit(newpo)
assert str(newpounit) == poexpected
def test_merging_comments_with_blank_comment_lines(self):
"""test that when we merge a comment that has a blank line we keep the
blank line"""
input_source = '''#: someline.c\nmsgid "About"\nmsgstr ""\n'''
template_source = '''# comment1\n#\n# comment2\n#: someline.c\nmsgid "About"\nmsgstr "Omtrent"\n'''
poexpected = template_source
newpo = self.pretranslatepo(input_source, template_source)
newpounit = self.singleunit(newpo)
assert str(newpounit) == poexpected
def test_empty_commentlines(self):
input_source = '''#: paneSecurity.title
msgid "Security"
msgstr ""
'''
template_source = '''# - Contributor(s):
# -
# - Alternatively, the
# -
#: paneSecurity.title
msgid "Security"
msgstr "Sekuriteit"
'''
poexpected = template_source
newpo = self.pretranslatepo(input_source, template_source)
newpounit = self.singleunit(newpo)
print "expected"
print poexpected
print "got:"
print str(newpounit)
assert str(newpounit) == poexpected
def test_merging_msgidcomments(self):
"""ensure that we can merge msgidcomments messages"""
input_source = r'''#: window.width
msgid ""
"_: Do not translate this.\n"
"36em"
msgstr ""
'''
template_source = r'''#: window.width
msgid ""
"_: Do not translate this.\n"
"36em"
msgstr "36em"
'''
newpo = self.pretranslatepo(input_source, template_source)
newpounit = self.singleunit(newpo)
assert str(newpounit) == template_source
def test_merging_plurals(self):
"""ensure that we can merge plural messages"""
input_source = '''msgid "One"\nmsgid_plural "Two"\nmsgstr[0] ""\nmsgstr[1] ""\n'''
template_source = '''msgid "One"\nmsgid_plural "Two"\nmsgstr[0] "Een"\nmsgstr[1] "Twee"\nmsgstr[2] "Drie"\n'''
newpo = self.pretranslatepo(input_source, template_source)
print newpo
newpounit = self.singleunit(newpo)
assert str(newpounit) == template_source
def test_merging_resurect_obsolete_messages(self):
"""check that we can reuse old obsolete messages if the message comes
back"""
input_source = '''#: resurect.c\nmsgid "&About"\nmsgstr ""\n'''
template_source = '''#~ msgid "&About"\n#~ msgstr "&Omtrent"\n'''
expected = '''#: resurect.c\nmsgid "&About"\nmsgstr "&Omtrent"\n'''
newpo = self.pretranslatepo(input_source, template_source)
print newpo
assert str(newpo) == expected
def test_merging_comments(self):
"""Test that we can merge comments correctly"""
input_source = '''#. Don't do it!\n#: file.py:1\nmsgid "One"\nmsgstr ""\n'''
template_source = '''#. Don't do it!\n#: file.py:2\nmsgid "One"\nmsgstr "Een"\n'''
poexpected = '''#. Don't do it!\n#: file.py:1\nmsgid "One"\nmsgstr "Een"\n'''
newpo = self.pretranslatepo(input_source, template_source)
print newpo
newpounit = self.singleunit(newpo)
assert str(newpounit) == poexpected
def test_merging_typecomments(self):
"""Test that we can merge with typecomments"""
input_source = '''#: file.c:1\n#, c-format\nmsgid "%d pipes"\nmsgstr ""\n'''
template_source = '''#: file.c:2\nmsgid "%d pipes"\nmsgstr "%d pype"\n'''
poexpected = '''#: file.c:1\n#, c-format\nmsgid "%d pipes"\nmsgstr "%d pype"\n'''
newpo = self.pretranslatepo(input_source, template_source)
newpounit = self.singleunit(newpo)
print newpounit
assert str(newpounit) == poexpected
input_source = '''#: file.c:1\n#, c-format\nmsgid "%d computers"\nmsgstr ""\n'''
template_source = '''#: file.c:2\n#, c-format\nmsgid "%s computers "\nmsgstr "%s-rekenaars"\n'''
poexpected = '''#: file.c:1\n#, fuzzy, c-format\nmsgid "%d computers"\nmsgstr "%s-rekenaars"\n'''
newpo = self.pretranslatepo(input_source, template_source)
newpounit = self.singleunit(newpo)
assert newpounit.isfuzzy()
assert newpounit.hastypecomment("c-format")
def test_xliff_states(self):
"""Test correct maintenance of XLIFF states."""
xlf_template = self.xliff_skeleton \
% '''<trans-unit id="1" xml:space="preserve">
<source> File 1 </source>
</trans-unit>'''
xlf_old = self.xliff_skeleton \
% '''<trans-unit id="1" xml:space="preserve" approved="yes">
<source> File 1 </source>
<target> Lêer 1 </target>
</trans-unit>'''
template = xliff.xlifffile.parsestring(xlf_template)
old = xliff.xlifffile.parsestring(xlf_old)
new = self.pretranslatexliff(template, old)
print str(old)
print '---'
print str(new)
assert new.units[0].isapproved()
# Layout might have changed, so we won't compare the serialised
# versions
class TestPretranslateCommand(test_convert.TestConvertCommand, TestPretranslate):
"""Tests running actual pretranslate commands on files"""
convertmodule = pretranslate
def test_help(self):
"""tests getting help"""
options = test_convert.TestConvertCommand.test_help(self)
options = self.help_check(options, "-t TEMPLATE, --template=TEMPLATE")
options = self.help_check(options, "--tm")
options = self.help_check(options, "-s MIN_SIMILARITY, --similarity=MIN_SIMILARITY")
options = self.help_check(options, "--nofuzzymatching", last=True)
| bsd-3-clause |
lombritz/odoo | addons/base_report_designer/plugin/openerp_report_designer/bin/script/compile_all.py | 384 | 1193 | #########################################################################
#
# Copyright (c) 2003-2004 Danny Brewer [email protected]
# Copyright (C) 2004-2010 OpenERP SA (<http://openerp.com>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See: http://www.gnu.org/licenses/lgpl.html
#
#############################################################################
import compileall
compileall.compile_dir('package')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
40423136/2016fallcadp_hw | plugin/liquid_tags/audio.py | 277 | 2161 | """
Audio Tag
---------
This implements a Liquid-style audio tag for Pelican,
based on the pelican video plugin [1]_
Syntax
------
{% audio url/to/audio [url/to/audio] [/url/to/audio] %}
Example
-------
{% audio http://example.tld/foo.mp3 http://example.tld/foo.ogg %}
Output
------
<audio controls><source src="http://example.tld/foo.mp3" type="audio/mpeg"><source src="http://example.tld/foo.ogg" type="audio/ogg">Your browser does not support the audio element.</audio>
[1] https://github.com/getpelican/pelican-plugins/blob/master/liquid_tags/video.py
"""
import os
import re
from .mdx_liquid_tags import LiquidTags
SYNTAX = "{% audio url/to/audio [url/to/audio] [/url/to/audio] %}"
AUDIO = re.compile(r'(/\S+|https?:\S+)(?:\s+(/\S+|https?:\S+))?(?:\s+(/\S+|https?:\S+))?')
AUDIO_TYPEDICT = {'.mp3': 'audio/mpeg',
'.ogg': 'audio/ogg',
'.oga': 'audio/ogg',
'.opus': 'audio/ogg',
'.wav': 'audio/wav',
'.mp4': 'audio/mp4'}
def create_html(markup):
match = AUDIO.search(markup)
if match:
groups = match.groups()
audio_files = [g for g in groups if g]
if any(audio_files):
audio_out = '<audio controls>'
for audio_file in audio_files:
base, ext = os.path.splitext(audio_file)
if ext not in AUDIO_TYPEDICT:
raise ValueError("Unrecognized audio extension: "
"{0}".format(ext))
# add audio source
audio_out += '<source src="{}" type="{}">'.format(
audio_file, AUDIO_TYPEDICT[ext])
# close audio tag
audio_out += 'Your browser does not support the audio element.'
audio_out += '</audio>'
else:
raise ValueError("Error processing input, "
"expected syntax: {0}".format(SYNTAX))
return audio_out
@LiquidTags.register('audio')
def audio(preprocessor, tag, markup):
return create_html(markup)
# ---------------------------------------------------
# This import allows image tag to be a Pelican plugin
from liquid_tags import register
| agpl-3.0 |
caisq/tensorflow | tensorflow/contrib/gan/python/features/python/random_tensor_pool.py | 55 | 1552 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A tensor pool stores values from an input tensor and returns a stored one.
See the following papers for more details.
1) `Learning from simulated and unsupervised images through adversarial
training` (https://arxiv.org/abs/1612.07828).
2) `Unpaired Image-to-Image Translation using Cycle-Consistent Adversarial
Networks` (https://arxiv.org/abs/1703.10593).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.gan.python.features.python import random_tensor_pool_impl
# pylint: disable=wildcard-import
from tensorflow.contrib.gan.python.features.python.random_tensor_pool_impl import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
__all__ = random_tensor_pool_impl.__all__
remove_undocumented(__name__, __all__)
| apache-2.0 |
sukwon0709/equip | equip/rewriter/simple.py | 1 | 11974 | # -*- coding: utf-8 -*-
"""
equip.rewriter.simple
~~~~~~~~~~~~~~~~~~~~~
A simplified interface (yet the main one) to handle the injection
of instrumentation code.
:copyright: (c) 2014 by Romain Gaucher (@rgaucher)
:license: Apache 2, see LICENSE for more details.
"""
import os
import copy
from ..utils.log import logger
from ..bytecode.decl import ModuleDeclaration, \
MethodDeclaration, \
TypeDeclaration
from ..bytecode.code import BytecodeObject
from ..bytecode.utils import show_bytecode, \
get_debug_code_object_info
from .merger import Merger, RETURN_CANARY_NAME, LOAD_GLOBAL
# A global tracking what file we added the imports to. This should be refactored
# and we should inspect the module/method for imports.
GLOBAL_IMPORTS_ADDED = set()
EXIT_ENTER_CODE_TEMPLATE = """
if __name__ == '__main__':
%s
"""
class SimpleRewriter(object):
"""
The current main rewriter that works for one ``Declaration`` object. Using this
rewriter will modify the given declaration object by possibly replacing all of
its associated code object.
"""
#: List of the parameters that can be used for formatting the code
#: to inject.
#: The values are:
#:
#: * ``method_name``: The name of the method that is being called.
#:
#: * ``lineno``: The start line number of the declaration object being
#: instrumented.
#:
#: * ``file_name``: The file name of the current module.
#:
#: * ``class_name``: The name of the class a method belongs to.
#:
KNOWN_FIELDS = ('method_name', 'lineno', 'file_name', 'class_name',
'arg0', 'arg1', 'arg2', 'arg3', 'arg4',
'arg5', 'arg6', 'arg7', 'arg8', 'arg9',
'arg10', 'arg11', 'arg12', 'arg13', 'arg14',
'arguments', 'return_value')
def __init__(self, decl):
self.decl = decl
self.original_decl = copy.deepcopy(self.decl)
self.module = None
if isinstance(self.module, ModuleDeclaration):
self.module = self.decl
else:
self.module = self.decl.parent_module
self.import_lives = set()
def insert_before(self, python_code):
"""
Insert code at the beginning of the method's body.
The submitted code can be formatted using ``fields`` declared in ``KNOWN_FIELDS``.
Since ``string.format`` is used once the values are dumped, the injected code should
be property structured.
:param python_code: The python code to be formatted, compiled, and inserted
at the beginning of the method body.
"""
if not isinstance(self.decl, MethodDeclaration):
raise TypeError('Can only insert before/after in a method')
return self.insert_generic(python_code, location=Merger.BEFORE)
def insert_after(self, python_code):
"""
Insert code at each `RETURN_VALUE` opcode. See `insert_before`.
"""
if not isinstance(self.decl, MethodDeclaration):
raise TypeError('Can only insert before/after in a method')
return self.insert_generic(python_code, location=Merger.AFTER)
def insert_generic(self, python_code, location=Merger.UNKNOWN, \
ins_lineno=-1, ins_offset=-1, ins_module=False, ins_import=False):
"""
Generic code injection utils. It first formats the supplied ``python_code``,
compiles it to get the `code_object`, and merge this new `code_object` with
the one of the current declaration object (``decl``). The insertion is done by
the ``Merger``.
When the injection is done, this method will go and recursively update all
references to the old `code_object` in the parents (when a parent changes, it is
as well updated and its new ``code_object`` propagated upwards). This process is
required as Python's code objects are nested in parent's code objects, and they
are all read-only. This process breaks any references that were hold on previously
used code objects (e.g., don't do that when the instrumented code is running).
:param python_code: The code to be formatted and inserted.
:param location: The kind of insertion to perform.
:param ins_lineno: When an insertion should occur at one given line of code,
use this parameter. Defaults to -1.
:param ins_offset: When an insertion should occur at one given bytecode offset,
use this parameter. Defaults to -1.
:param ins_module: Specify the code insertion should happen in the module
itself and not the current declaration.
:param ins_import: True of the method is called for inserting an import statement.
"""
target_decl = self.decl if not ins_module else self.module
original_decl = self.original_decl
if ins_module and not isinstance(original_decl, ModuleDeclaration):
original_decl = original_decl.parent_module
formatted_code = SimpleRewriter.format_code(target_decl, python_code, location)
injected_co = SimpleRewriter.get_code_object(formatted_code)
if ins_import:
# Parse the import statement to extract the imported names.
bc_import = BytecodeObject.get_parsed_code(injected_co)
import_stmts = BytecodeObject.get_imports_from_bytecode(injected_co, bc_import)
for import_stmt in import_stmts:
self.import_lives = self.import_lives | import_stmt.live_names
self.inspect_all_globals()
working_co = target_decl.code_object
new_co = Merger.merge(working_co,
injected_co,
location,
ins_lineno,
ins_offset,
self.import_lives)
if not new_co:
return self
original_co = target_decl.code_object
target_decl.code_object = new_co
target_decl.has_changes = True
# Recursively apply this to the parent cos
parent = target_decl.parent
original_parent = original_decl.parent
while parent is not None:
# inspect the parent cos and update the consts for
# the original to the current sub-CO
parent.update_nested_code_object(original_co, new_co)
original_co = original_parent.code_object
new_co = parent.code_object
original_parent = original_parent.parent
parent = parent.parent
return self
def insert_import(self, import_code, module_import=True):
"""
Insert an import statement in the current bytecode. The import is added
in front of every other imports.
"""
logger.debug("Insert import on: %s", self.decl)
if not module_import:
return self.insert_generic(import_code, location=Merger.BEFORE, ins_import=True)
else:
global GLOBAL_IMPORTS_ADDED
if self.module.module_path in GLOBAL_IMPORTS_ADDED:
logger.debug("Already added imports in %s" % self.module.module_path)
return
self.insert_generic(import_code, location=Merger.BEFORE,
ins_module=True, ins_import=True)
GLOBAL_IMPORTS_ADDED.add(self.module.module_path)
return self
def insert_enter_code(self, python_code, import_code=None):
"""
Insert generic code at the beginning of the module. The code is wrapped
in a ``if __name__ == '__main__'`` statement.
:param python_code: The python code to compile and inject.
:param import_code: The import statements, if any, to add before the
insertion of `python_code`. Defaults to None.
"""
return self.insert_enter_exit_code(python_code,
import_code,
location=Merger.MODULE_ENTER)
def insert_exit_code(self, python_code, import_code=None):
"""
Insert generic code at the end of the module. The code is wrapped
in a ``if __name__ == '__main__'`` statement.
:param python_code: The python code to compile and inject.
:param import_code: The import statements, if any, to add before the
insertion of `python_code`. Defaults to None.
"""
return self.insert_enter_exit_code(python_code,
import_code,
location=Merger.MODULE_EXIT)
def insert_enter_exit_code(self, python_code, import_code=None, location=Merger.MODULE_EXIT):
indented_python_code = SimpleRewriter.indent(python_code, indent_level=1)
if import_code:
indented_import_code = SimpleRewriter.indent(import_code, indent_level=1)
indented_python_code = indented_import_code + '\n' + indented_python_code
new_code = EXIT_ENTER_CODE_TEMPLATE % indented_python_code
return self.insert_generic(new_code, location)
def inspect_all_globals(self):
if not self.module:
return
co_module = self.module.code_object
bc_module = BytecodeObject.get_parsed_code(co_module)
for bc_tpl in bc_module:
if bc_tpl[2] == LOAD_GLOBAL:
self.import_lives.add(bc_tpl[3])
@staticmethod
def indent(original_code, indent_level=0):
"""
Lousy helper that indents the supplied python code, so that it will fit under
an if statement.
"""
new_code = []
indent = ' ' * 4 * indent_level
for l in original_code.split('\n'):
new_code.append(indent + l)
return '\n'.join(new_code)
@staticmethod
def get_code_object(python_code):
"""
Actually compiles the supplied code and return the ``code_object`` to be
merged with the source ``code_object``.
:param python_code: The python code to compile.
"""
try:
co = compile(python_code, '<string>', 'exec')
return co
except Exception, ex:
logger.error(str(ex))
logger.error('Compilation error:\n%s', python_code)
return None
# We know of some fields in KNOWN_FIELDS, and we inject them
# using the format string
@staticmethod
def format_code(decl, python_code, location):
"""
Formats the supplied ``python_code`` with format string, and values listed
in `KNOWN_FIELDS`.
:param decl: The declaration object (e.g., ``MethodDeclaration``, ``TypeDeclaration``, etc.).
:param python_code: The python code to format.
:param location: The kind of insertion to perform (e.g., ``Merger.BEFORE``).
"""
values = SimpleRewriter.get_formatting_values(decl, location)
return python_code.format(**values)
@staticmethod
def get_formatting_values(decl, location):
"""
Retrieves the dynamic values to be added in the format string. All values
are statically computed, but formal parameters (of methods) are passed by name so
it is possible to dereference them in the inserted code (same for the return value).
:param decl: The declaration object.
:param location: The kind of insertion to perform (e.g., ``Merger.BEFORE``).
"""
values = {}
values['lineno'] = decl.start_lineno
values['file_name'] = os.path.basename(decl.parent_module.module_path) \
if not isinstance(decl, ModuleDeclaration) \
else decl.module_path
values['class_name'] = decl.parent_class.type_name \
if decl.parent_class is not None \
else None
# Method specific arguments
if isinstance(decl, MethodDeclaration):
values['method_name'] = decl.method_name
values['arguments'] = ', '.join(decl.formal_parameters) if decl.formal_parameters else None
values['return_value'] = RETURN_CANARY_NAME if location == Merger.AFTER else None
args = decl.formal_parameters
length = len(args)
for arg_cnt in range(15):
if arg_cnt >= length:
values['arg%d' % arg_cnt] = None
else:
values['arg%d' % arg_cnt] = args[arg_cnt]
return values
| apache-2.0 |
ioos/compliance-checker | compliance_checker/tests/test_feature_detection.py | 2 | 31793 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
compliance_checker/tests/test_feature_detection.py
"""
from unittest import TestCase
from netCDF4 import Dataset
from compliance_checker import cfutil as util
from compliance_checker.tests import resources
from compliance_checker.tests.helpers import MockRaggedArrayRepr
class TestFeatureDetection(TestCase):
"""
Tests the feature type detection of cdftools
"""
def test_point(self):
"""
Ensures point detection works
"""
with Dataset(resources.STATIC_FILES["point"]) as nc:
for variable in util.get_geophysical_variables(nc):
assert util.is_point(nc, variable), "{} is point".format(variable)
def test_timeseries(self):
"""
Ensures timeseries detection works
"""
with Dataset(resources.STATIC_FILES["timeseries"]) as nc:
for variable in util.get_geophysical_variables(nc):
assert util.is_timeseries(nc, variable), "{} is timeseries".format(
variable
)
def test_multi_timeseries_orthogonal(self):
"""
Ensures multi-timeseries-orthogonal detection works
"""
with Dataset(resources.STATIC_FILES["multi-timeseries-orthogonal"]) as nc:
for variable in util.get_geophysical_variables(nc):
assert util.is_multi_timeseries_orthogonal(
nc, variable
), "{} is multi-timeseries orthogonal".format(variable)
def test_multi_timeseries_incomplete(self):
"""
Ensures multi-timeseries-incomplete detection works
"""
with Dataset(resources.STATIC_FILES["multi-timeseries-incomplete"]) as nc:
for variable in util.get_geophysical_variables(nc):
assert util.is_multi_timeseries_incomplete(
nc, variable
), "{} is multi-timeseries incomplete".format(variable)
def test_trajectory(self):
"""
Ensures trajectory detection works
"""
with Dataset(resources.STATIC_FILES["trajectory"]) as nc:
for variable in util.get_geophysical_variables(nc):
assert util.is_cf_trajectory(nc, variable), "{} is trajectory".format(
variable
)
def test_trajectory_single(self):
"""
Ensures trajectory-single detection works
"""
with Dataset(resources.STATIC_FILES["trajectory-single"]) as nc:
for variable in util.get_geophysical_variables(nc):
assert util.is_single_trajectory(
nc, variable
), "{} is trajectory-single".format(variable)
def test_profile_orthogonal(self):
"""
Ensures profile-orthogonal detection works
"""
with Dataset(resources.STATIC_FILES["profile-orthogonal"]) as nc:
for variable in util.get_geophysical_variables(nc):
assert util.is_profile_orthogonal(
nc, variable
), "{} is profile-orthogonal".format(variable)
def test_profile_incomplete(self):
"""
Ensures profile-incomplete detection works
"""
with Dataset(resources.STATIC_FILES["profile-incomplete"]) as nc:
for variable in util.get_geophysical_variables(nc):
assert util.is_profile_incomplete(
nc, variable
), "{} is profile-incomplete".format(variable)
def test_timeseries_profile_single_station(self):
"""
Ensures timeseries profile single station detection works
"""
with Dataset(resources.STATIC_FILES["timeseries-profile-single-station"]) as nc:
for variable in util.get_geophysical_variables(nc):
assert util.is_timeseries_profile_single_station(
nc, variable
), "{} is timeseries-profile-single-station".format(variable)
def test_timeseries_profile_multi_station(self):
"""
Ensures timeseries profile multi station detection works
"""
with Dataset(resources.STATIC_FILES["timeseries-profile-multi-station"]) as nc:
for variable in util.get_geophysical_variables(nc):
assert util.is_timeseries_profile_multi_station(
nc, variable
), "{} is timeseries-profile-multi-station".format(variable)
def test_timeseries_profile_single_ortho_time(self):
"""
Ensures timeseries profile single station ortho time detection works
"""
with Dataset(
resources.STATIC_FILES["timeseries-profile-single-ortho-time"]
) as nc:
for variable in util.get_geophysical_variables(nc):
assert util.is_timeseries_profile_single_ortho_time(
nc, variable
), "{} is timeseries-profile-single-ortho-time".format(variable)
def test_timeseries_profile_multi_ortho_time(self):
"""
Ensures timeseries profile multi station ortho time detection works
"""
with Dataset(
resources.STATIC_FILES["timeseries-profile-multi-ortho-time"]
) as nc:
for variable in util.get_geophysical_variables(nc):
assert util.is_timeseries_profile_multi_ortho_time(
nc, variable
), "{} is timeseries-profile-multi-ortho-time".format(variable)
def test_timeseries_profile_ortho_depth(self):
"""
Ensures timeseries profile ortho depth detection works
"""
with Dataset(resources.STATIC_FILES["timeseries-profile-ortho-depth"]) as nc:
for variable in util.get_geophysical_variables(nc):
assert util.is_timeseries_profile_ortho_depth(
nc, variable
), "{} is timeseries-profile-ortho-depth".format(variable)
def test_timeseries_profile_incomplete(self):
"""
Ensures timeseries profile station incomplete detection works
"""
with Dataset(resources.STATIC_FILES["timeseries-profile-incomplete"]) as nc:
for variable in util.get_geophysical_variables(nc):
assert util.is_timeseries_profile_incomplete(
nc, variable
), "{} is timeseries-profile-incomplete".format(variable)
def test_trajectory_profile_orthogonal(self):
"""
Ensures trajectory profile orthogonal detection works
"""
with Dataset(resources.STATIC_FILES["trajectory-profile-orthogonal"]) as nc:
for variable in util.get_geophysical_variables(nc):
assert util.is_trajectory_profile_orthogonal(
nc, variable
), "{} is trajectory profile orthogonal".format(variable)
def test_trajectory_profile_incomplete(self):
"""
Ensures trajectory profile incomplete detection works
"""
with Dataset(resources.STATIC_FILES["trajectory-profile-incomplete"]) as nc:
for variable in util.get_geophysical_variables(nc):
assert util.is_trajectory_profile_incomplete(
nc, variable
), "{} is trajectory profile incomplete".format(variable)
def test_2d_regular_grid(self):
"""
Ensures 2D Regular Grid detection works
"""
with Dataset(resources.STATIC_FILES["2d-regular-grid"]) as nc:
for variable in util.get_geophysical_variables(nc):
assert util.is_2d_regular_grid(
nc, variable
), "{} is 2D regular grid".format(variable)
def test_2d_static_grid(self):
"""
Ensures 2D Static Grid detection works
"""
with Dataset(resources.STATIC_FILES["2d-static-grid"]) as nc:
for variable in util.get_geophysical_variables(nc):
assert util.is_2d_static_grid(
nc, variable
), "{} is a 2D static grid".format(variable)
def test_3d_regular_grid(self):
"""
Ensures 2U Regular Grid detection works
"""
with Dataset(resources.STATIC_FILES["3d-regular-grid"]) as nc:
for variable in util.get_geophysical_variables(nc):
assert util.is_3d_regular_grid(
nc, variable
), "{} is 3d regular grid".format(variable)
def test_3d_static_grid(self):
"""
Ensures 3D Static Grid detection works
"""
with Dataset(resources.STATIC_FILES["3d-static-grid"]) as nc:
for variable in util.get_geophysical_variables(nc):
assert util.is_3d_static_grid(
nc, variable
), "{} is a 3D static grid".format(variable)
def test_boundaries(self):
"""
Ensures that boundary variables are not listed as geophysical variables
"""
with Dataset(resources.STATIC_FILES["grid-boundaries"]) as nc:
assert "lat_bnds" not in util.get_geophysical_variables(nc)
assert "lon_bnds" not in util.get_geophysical_variables(nc)
assert "lat_bnds" in util.get_cell_boundary_variables(nc)
assert "lon_bnds" in util.get_cell_boundary_variables(nc)
boundary_map = util.get_cell_boundary_map(nc)
assert boundary_map["lat"] == "lat_bnds"
assert boundary_map["lon"] == "lon_bnds"
def test_climatology(self):
"""
Ensures that climatology variables are identified as climatology variables and not geophysical variables
"""
with Dataset(resources.STATIC_FILES["climatology"]) as nc:
geophysical_variables = util.get_geophysical_variables(nc)
climatology_variable = util.get_climatology_variable(nc)
assert "temperature" in geophysical_variables
assert "climatology_bounds" not in geophysical_variables
assert "climatology_bounds" == climatology_variable
def test_grid_mapping(self):
"""
Ensures that grid mapping variables are properly identified
"""
with Dataset(resources.STATIC_FILES["rotated_pole_grid"]) as nc:
grid_mapping = util.get_grid_mapping_variables(nc)
coordinate_variables = util.get_coordinate_variables(nc)
axis_variables = util.get_axis_variables(nc)
assert "rotated_pole" in grid_mapping
assert set(["rlon", "rlat", "lev"]) == set(coordinate_variables)
assert set(["rlon", "rlat", "lev"]) == set(axis_variables)
assert "lat" == util.get_lat_variable(nc)
assert "lon" == util.get_lon_variable(nc)
def test_auxiliary_coordinates(self):
"""
Ensures variables are classified as auxiliary coordinate variables
"""
with Dataset(resources.STATIC_FILES["bad_units"]) as nc:
coordinate_variables = util.get_coordinate_variables(nc)
assert set(["time"]) == set(coordinate_variables)
aux_coord_vards = util.get_auxiliary_coordinate_variables(nc)
assert set(["lat", "lon"]) == set(aux_coord_vards)
def test_forecast_reference_metadata(self):
"""
Tests variables used for forecast reference metadata to ensure they are
not misclassified as geophysical variables.
"""
with Dataset(resources.STATIC_FILES["forecast_reference"]) as nc:
self.assertFalse(util.is_geophysical(nc, "forecast_reference_time"))
self.assertFalse(util.is_geophysical(nc, "forecast_hour"))
self.assertTrue(util.is_geophysical(nc, "air_temp"))
self.assertFalse(util.is_geophysical(nc, "time"))
assert len(util.get_coordinate_variables(nc)) == 3
assert len(util.get_geophysical_variables(nc)) == 1
def test_rotated_pole_grid(self):
with Dataset(resources.STATIC_FILES["rotated_pole_grid"]) as nc:
latitudes = util.get_latitude_variables(nc)
assert latitudes == ["lat", "rlat"]
assert util.is_mapped_grid(nc, "temperature") is True
def test_vertical_coords(self):
with Dataset(resources.STATIC_FILES["vertical_coords"]) as nc:
vertical = util.get_z_variables(nc)
assert vertical == ["height"]
def test_reduced_grid(self):
with Dataset(resources.STATIC_FILES["reduced_horizontal_grid"]) as nc:
assert util.guess_feature_type(nc, "PS") == "reduced-grid"
def test_global_feature_detection(self):
with Dataset(resources.STATIC_FILES["reduced_horizontal_grid"]) as nc:
assert util.guess_feature_type(nc, "PS") == "reduced-grid"
with Dataset(resources.STATIC_FILES["vertical_coords"]) as nc:
assert util.guess_feature_type(nc, "temperature") == "point"
axis_map = util.get_axis_map(nc, "temperature")
assert axis_map["Z"] == ["height"]
assert axis_map["T"] == ["time"]
with Dataset(resources.STATIC_FILES["2d-regular-grid"]) as nc:
assert util.guess_feature_type(nc, "temperature") == "2d-regular-grid"
axis_map = util.get_axis_map(nc, "temperature")
assert axis_map["T"] == ["time"]
assert axis_map["Z"] == ["z"]
assert axis_map["X"] == ["lon"]
assert axis_map["Y"] == ["lat"]
with Dataset(resources.STATIC_FILES["2dim"]) as nc:
assert util.guess_feature_type(nc, "T") == "mapped-grid"
axis_map = util.get_axis_map(nc, "T")
assert axis_map["Z"] == ["lev"]
assert axis_map["Y"] == ["yc", "lat"]
assert axis_map["X"] == ["xc", "lon"]
with Dataset(resources.STATIC_FILES["3d-regular-grid"]) as nc:
assert util.guess_feature_type(nc, "temperature") == "3d-regular-grid"
axis_map = util.get_axis_map(nc, "temperature")
assert axis_map["T"] == ["time"]
assert axis_map["Z"] == ["z"]
assert axis_map["Y"] == ["lat"]
assert axis_map["X"] == ["lon"]
with Dataset(resources.STATIC_FILES["climatology"]) as nc:
assert util.guess_feature_type(nc, "temperature") == "timeseries"
axis_map = util.get_axis_map(nc, "temperature")
assert axis_map["T"] == ["time"]
assert axis_map["Z"] == []
assert axis_map["Y"] == []
assert axis_map["X"] == []
with Dataset(resources.STATIC_FILES["index_ragged"]) as nc:
assert util.guess_feature_type(nc, "temperature") == "trajectory"
axis_map = util.get_axis_map(nc, "temperature")
assert axis_map["T"] == ["time"]
assert axis_map["Z"] == ["z"]
assert axis_map["Y"] == ["lat"]
assert axis_map["X"] == ["lon"]
with Dataset(resources.STATIC_FILES["mapping"]) as nc:
assert (
util.guess_feature_type(nc, "sea_surface_height")
== "timeseries"
)
axis_map = util.get_axis_map(nc, "sea_surface_height")
assert axis_map["T"] == ["time"]
assert axis_map["Z"] == []
assert axis_map["Y"] == ["lat"]
assert axis_map["X"] == ["lon"]
with Dataset(resources.STATIC_FILES["rotated_pole_grid"]) as nc:
assert util.guess_feature_type(nc, "temperature") == "mapped-grid"
axis_map = util.get_axis_map(nc, "temperature")
assert axis_map["T"] == []
assert axis_map["Z"] == ["lev"]
assert axis_map["Y"] == ["rlat", "lat"]
assert axis_map["X"] == ["rlon", "lon"]
with Dataset(resources.STATIC_FILES["rutgers"]) as nc:
assert util.guess_feature_type(nc, "temperature") == "trajectory"
axis_map = util.get_axis_map(nc, "temperature")
assert axis_map["T"] == ["time"]
assert axis_map["Z"] == ["depth"]
assert axis_map["Y"] == ["lat"]
assert axis_map["X"] == ["lon"]
with Dataset(resources.STATIC_FILES["self-referencing-var"]) as nc:
assert util.guess_feature_type(nc, "TEMP") == "point"
axis_map = util.get_axis_map(nc, "TEMP")
assert axis_map["T"] == ["TIME"]
assert axis_map["Z"] == ["DEPTH"]
assert axis_map["Y"] == []
assert axis_map["X"] == []
with Dataset(resources.STATIC_FILES["2d-static-grid"]) as nc:
assert util.guess_feature_type(nc, "T") == "2d-static-grid"
axis_map = util.get_axis_map(nc, "T")
assert axis_map["X"] == ["lon"]
assert axis_map["Y"] == ["lat"]
assert axis_map["T"] == []
assert axis_map["Z"] == []
with Dataset(resources.STATIC_FILES["3d-static-grid"]) as nc:
assert util.guess_feature_type(nc, "T") == "3d-static-grid"
axis_map = util.get_axis_map(nc, "T")
assert axis_map["X"] == ["lon"]
assert axis_map["Y"] == ["lat"]
assert axis_map["T"] == []
assert axis_map["Z"] == ["depth"]
def test_is_variable_valid_ragged_array_repr_featureType(self):
nc = MockRaggedArrayRepr(
"timeseries",
"indexed"
)
# add a variable that isn't recognized as geophysical
v = nc.createVariable(
"data1",
"d",
("SAMPLE_DIMENSION",),
fill_value=None
)
v.setncattr("cf_role", "blah")
self.assertFalse(util.is_variable_valid_ragged_array_repr_featureType(nc, "data1"))
# add geophysical variable with correct dimension
nc = MockRaggedArrayRepr(
"timeseries",
"indexed"
)
v = nc.createVariable(
"data1",
"d",
("SAMPLE_DIMENSION",),
fill_value=None
)
v.setncattr("standard_name", "sea_water_pressure")
# test the variable
self.assertTrue(util.is_variable_valid_ragged_array_repr_featureType(nc, "data1"))
# add good variable and another variable, this time with the improper dimension
nc = MockRaggedArrayRepr(
"timeseries",
"indexed"
)
v = nc.createVariable(
"data1",
"d",
("SAMPLE_DIMENSION",),
fill_value=None
)
v.setncattr("standard_name", "sea_water_pressure")
v2 = nc.createVariable(
"data2",
"d",
("INSTANCE_DIMENSION",),
fill_value=None
)
v2.setncattr("standard_name", "sea_water_salinity")
# good variable should pass, second should fail
self.assertTrue(util.is_variable_valid_ragged_array_repr_featureType(nc, "data1"))
self.assertFalse(util.is_variable_valid_ragged_array_repr_featureType(nc, "data2"))
def test_is_dataset_valid_ragged_array_repr_featureType(self):
# first test single featureType
# ----- timeseries, indexed ----- #
nc = MockRaggedArrayRepr(
"timeseries",
"indexed"
)
self.assertTrue(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "timeseries")
)
# we'll add another cf_role variable
nc = MockRaggedArrayRepr(
"timeseries",
"indexed"
)
v = nc.createVariable(
"var2",
"i",
("INSTANCE_DIMENSION",),
fill_value=None)
v.setncattr("cf_role", "yeetyeet_id")
self.assertFalse(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "timeseries")
)
# we'll add another index variable, also bad
nc = MockRaggedArrayRepr(
"timeseries",
"indexed"
)
v = nc.createVariable(
"index_var2",
"i",
("SAMPLE_DIMENSION",),
fill_value=None)
v.setncattr("instance_dimension", "INSTANCE_DIMENSION")
self.assertFalse(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "timeseries")
)
# ----- timeseries, contiguous ----- #
nc = MockRaggedArrayRepr(
"timeseries",
"contiguous"
)
self.assertTrue(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "timeseries")
)
# add another cf_role var, bad
nc = MockRaggedArrayRepr(
"timeseries",
"contiguous"
)
v = nc.createVariable(
"var2",
"i",
("INSTANCE_DIMENSION",),
fill_value=None)
v.setncattr("cf_role", "yeetyeet_id")
self.assertFalse(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "timeseries")
)
# add another count variable, bad
v = nc.createVariable(
"count_var2",
"i",
("INSTANCE_DIMENSION",),
fill_value=None)
v.setncattr("sample_dimension", "SAMPLE_DIMENSION")
self.assertFalse(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "timeseries")
)
# ----- profile, indexed ----- #
nc = MockRaggedArrayRepr(
"profile",
"indexed"
)
self.assertTrue(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "profile")
)
# add another cf_role var
nc = MockRaggedArrayRepr(
"profile",
"indexed"
)
v = nc.createVariable(
"var2",
"i",
("INSTANCE_DIMENSION",),
fill_value=None)
v.setncattr("cf_role", "yeetyeet_id")
self.assertFalse(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "profile")
)
# we'll add another index variable, also bad
nc = MockRaggedArrayRepr(
"profile",
"indexed"
)
v = nc.createVariable(
"index_var2",
"i",
("SAMPLE_DIMENSION",),
fill_value=None)
v.setncattr("instance_dimension", "INSTANCE_DIMENSION")
self.assertFalse(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "profile")
)
# ----- profile, contiguous ----- #
nc = MockRaggedArrayRepr(
"profile",
"contiguous"
)
self.assertTrue(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "profile")
)
# add another cf_role var
nc = MockRaggedArrayRepr(
"profile",
"contiguous"
)
v = nc.createVariable(
"var2",
"i",
("INSTANCE_DIMENSION",),
fill_value=None)
v.setncattr("cf_role", "yeetyeet_id")
self.assertFalse(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "profile")
)
# we'll add another count variable, also bad
nc = MockRaggedArrayRepr(
"profile",
"contiguous"
)
v = nc.createVariable(
"index_var2",
"i",
("INSTANCE_DIMENSION",),
fill_value=None)
v.setncattr("sample_dimension", "SAMPLE_DIMENSION")
self.assertFalse(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "profile")
)
# ----- trajectory, indexed ----- #
nc = MockRaggedArrayRepr(
"trajectory",
"indexed"
)
self.assertTrue(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "trajectory")
)
# add another cf_role var
nc = MockRaggedArrayRepr(
"trajectory",
"indexed"
)
v = nc.createVariable(
"var2",
"i",
("INSTANCE_DIMENSION",),
fill_value=None)
v.setncattr("cf_role", "yeetyeet_id")
self.assertFalse(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "trajectory")
)
# we'll add another index variable, also bad
nc = MockRaggedArrayRepr(
"trajectory",
"indexed"
)
v = nc.createVariable(
"index_var2",
"i",
("SAMPLE_DIMENSION",),
fill_value=None)
v.setncattr("instance_dimension", "INSTANCE_DIMENSION")
self.assertFalse(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "trajectory")
)
# ----- trajectory, contiguous ----- #
nc = MockRaggedArrayRepr(
"trajectory",
"contiguous"
)
self.assertTrue(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "trajectory")
)
# add another cf_role var
nc = MockRaggedArrayRepr(
"trajectory",
"contiguous"
)
v = nc.createVariable(
"var2",
"i",
("INSTANCE_DIMENSION",),
fill_value=None)
v.setncattr("cf_role", "yeetyeet_id")
self.assertFalse(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "trajectory")
)
# we'll add another count variable, also bad
nc = MockRaggedArrayRepr(
"trajectory",
"contiguous"
)
v = nc.createVariable(
"index_var2",
"i",
("INSTANCE_DIMENSION",),
fill_value=None)
v.setncattr("sample_dimension", "SAMPLE_DIMENSION")
self.assertFalse(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "trajectory")
)
# ----- now test compound featureType ----- #
# ----- timeSeriesProfile ----- #
nc = MockRaggedArrayRepr(
"timeSeriesProfile"
)
# NOTE
# has no geophysical vars, so should (?) (will) fail
self.assertFalse(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "timeseriesprofile")
)
# add a geophysical variable and test again
nc = MockRaggedArrayRepr(
"timeSeriesProfile"
)
v1 = nc.createVariable(
"data1",
"i",
("SAMPLE_DIMENSION",),
fill_value=None
)
v1.setncattr("standard_name", "pressure")
self.assertTrue(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "timeseriesprofile")
)
nc = MockRaggedArrayRepr(
"timeSeriesProfile"
)
v1 = nc.createVariable(
"data1",
"i",
("SAMPLE_DIMENSION",),
fill_value=None
)
# add a thid cf_role variable - this should fail
v = nc.createVariable(
"cf_role_var3",
"i",
("INSTANCE_DIMENSION",),
fill_value=None)
v.setncattr("cf_role", "yeetyeet_id")
self.assertFalse(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "timeseriesprofile")
)
# set the index variable to have an incorrect attr
nc = MockRaggedArrayRepr(
"timeSeriesProfile"
)
v1 = nc.createVariable(
"data1",
"i",
("SAMPLE_DIMENSION",),
fill_value=None
)
nc.variables["station_index_variable"].instance_dimension = "SIKE!"
self.assertFalse(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "timeseriesprofile")
)
# change the sample_dimension attr on the count variable, bad
nc = MockRaggedArrayRepr(
"timeSeriesProfile"
)
v1 = nc.createVariable(
"data1",
"i",
("SAMPLE_DIMENSION",),
fill_value=None
)
nc.variables["counter_var"].sample_dimension = "SIKE!"
self.assertFalse(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "timeseriesprofile")
)
# give another geophysical data variable a different dimension
nc = MockRaggedArrayRepr(
"timeSeriesProfile"
)
v1 = nc.createVariable(
"data1",
"i",
("SAMPLE_DIMENSION",),
fill_value=None
)
v1 = nc.createVariable(
"data2",
"i",
("STATION_DIMENSION",), # bad!
fill_value=None
)
self.assertFalse(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "timeseriesprofile")
)
# ----- trajectoryProfile ----- #
nc = MockRaggedArrayRepr(
"trajectoryProfile"
)
# NOTE
# has no geophysical vars, so should (?) (will) fail
self.assertFalse(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "trajectoryprofile")
)
# add a geophysical variable and test again
nc = MockRaggedArrayRepr(
"trajectoryProfile"
)
v1 = nc.createVariable(
"data1",
"i",
("SAMPLE_DIMENSION",),
fill_value=None
)
v1.setncattr("standard_name", "pressure")
self.assertTrue(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "trajectoryprofile")
)
nc = MockRaggedArrayRepr(
"trajectoryProfile"
)
v1 = nc.createVariable(
"data1",
"i",
("SAMPLE_DIMENSION",),
fill_value=None
)
# add a thid cf_role variable - this should fail
v = nc.createVariable(
"cf_role_var3",
"i",
("INSTANCE_DIMENSION",),
fill_value=None)
v.setncattr("cf_role", "yeetyeet_id")
self.assertFalse(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "trajectoryprofile")
)
# set the index variable to have an incorrect attr
nc = MockRaggedArrayRepr(
"trajectoryProfile"
)
v1 = nc.createVariable(
"data1",
"i",
("SAMPLE_DIMENSION",),
fill_value=None
)
nc.variables["station_index_variable"].instance_dimension = "SIKE!"
self.assertFalse(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "trajectoryprofile")
)
# change the sample_dimension attr on the count variable, bad
nc = MockRaggedArrayRepr(
"trajectoryProfile"
)
v1 = nc.createVariable(
"data1",
"i",
("SAMPLE_DIMENSION",),
fill_value=None
)
nc.variables["counter_var"].sample_dimension = "SIKE!"
self.assertFalse(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "trajectoryprofile")
)
# give another geophysical data variable a different dimension
nc = MockRaggedArrayRepr(
"trajectoryProfile"
)
v1 = nc.createVariable(
"data1",
"i",
("SAMPLE_DIMENSION",),
fill_value=None
)
v1 = nc.createVariable(
"data2",
"i",
("STATION_DIMENSION",), # bad!
fill_value=None
)
self.assertFalse(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "trajectoryprofile")
)
| apache-2.0 |
wilebeast/FireFox-OS | B2G/external/wpa_supplicant_8/wpa_supplicant/examples/wpas-dbus-new-wps.py | 114 | 2221 | #!/usr/bin/python
import dbus
import sys, os
import time
import gobject
from dbus.mainloop.glib import DBusGMainLoop
WPAS_DBUS_SERVICE = "fi.w1.wpa_supplicant1"
WPAS_DBUS_INTERFACE = "fi.w1.wpa_supplicant1"
WPAS_DBUS_OPATH = "/fi/w1/wpa_supplicant1"
WPAS_DBUS_INTERFACES_INTERFACE = "fi.w1.wpa_supplicant1.Interface"
WPAS_DBUS_WPS_INTERFACE = "fi.w1.wpa_supplicant1.Interface.WPS"
def propertiesChanged(properties):
if properties.has_key("State"):
print "PropertiesChanged: State: %s" % (properties["State"])
def scanDone(success):
print "Scan done: success=%s" % success
def bssAdded(bss, properties):
print "BSS added: %s" % (bss)
def bssRemoved(bss):
print "BSS removed: %s" % (bss)
def wpsEvent(name, args):
print "WPS event: %s" % (name)
print args
def credentials(cred):
print "WPS credentials: %s" % (cred)
def main():
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
global bus
bus = dbus.SystemBus()
wpas_obj = bus.get_object(WPAS_DBUS_SERVICE, WPAS_DBUS_OPATH)
if len(sys.argv) != 2:
print "Missing ifname argument"
os._exit(1)
wpas = dbus.Interface(wpas_obj, WPAS_DBUS_INTERFACE)
bus.add_signal_receiver(scanDone,
dbus_interface=WPAS_DBUS_INTERFACES_INTERFACE,
signal_name="ScanDone")
bus.add_signal_receiver(bssAdded,
dbus_interface=WPAS_DBUS_INTERFACES_INTERFACE,
signal_name="BSSAdded")
bus.add_signal_receiver(bssRemoved,
dbus_interface=WPAS_DBUS_INTERFACES_INTERFACE,
signal_name="BSSRemoved")
bus.add_signal_receiver(propertiesChanged,
dbus_interface=WPAS_DBUS_INTERFACES_INTERFACE,
signal_name="PropertiesChanged")
bus.add_signal_receiver(wpsEvent,
dbus_interface=WPAS_DBUS_WPS_INTERFACE,
signal_name="Event")
bus.add_signal_receiver(credentials,
dbus_interface=WPAS_DBUS_WPS_INTERFACE,
signal_name="Credentials")
ifname = sys.argv[1]
path = wpas.GetInterface(ifname)
if_obj = bus.get_object(WPAS_DBUS_SERVICE, path)
if_obj.Set(WPAS_DBUS_WPS_INTERFACE, 'ProcessCredentials',
dbus.Boolean(1),
dbus_interface=dbus.PROPERTIES_IFACE)
wps = dbus.Interface(if_obj, WPAS_DBUS_WPS_INTERFACE)
wps.Start({'Role': 'enrollee', 'Type': 'pbc'})
gobject.MainLoop().run()
if __name__ == "__main__":
main()
| apache-2.0 |
jtyr/ansible-modules-extras | messaging/rabbitmq_queue.py | 29 | 9468 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Manuel Sousa <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: rabbitmq_queue
author: "Manuel Sousa (@manuel-sousa)"
version_added: "2.0"
short_description: This module manages rabbitMQ queues
description:
- This module uses rabbitMQ Rest API to create/delete queues
requirements: [ "requests >= 1.0.0" ]
options:
name:
description:
- Name of the queue to create
required: true
state:
description:
- Whether the queue should be present or absent
- Only present implemented atm
choices: [ "present", "absent" ]
required: false
default: present
login_user:
description:
- rabbitMQ user for connection
required: false
default: guest
login_password:
description:
- rabbitMQ password for connection
required: false
default: false
login_host:
description:
- rabbitMQ host for connection
required: false
default: localhost
login_port:
description:
- rabbitMQ management api port
required: false
default: 15672
vhost:
description:
- rabbitMQ virtual host
required: false
default: "/"
durable:
description:
- whether queue is durable or not
required: false
choices: [ "yes", "no" ]
default: yes
auto_delete:
description:
- if the queue should delete itself after all queues/queues unbound from it
required: false
choices: [ "yes", "no" ]
default: no
message_ttl:
description:
- How long a message can live in queue before it is discarded (milliseconds)
required: False
default: forever
auto_expires:
description:
- How long a queue can be unused before it is automatically deleted (milliseconds)
required: false
default: forever
max_length:
description:
- How many messages can the queue contain before it starts rejecting
required: false
default: no limit
dead_letter_exchange:
description:
- Optional name of an exchange to which messages will be republished if they
- are rejected or expire
required: false
default: None
dead_letter_routing_key:
description:
- Optional replacement routing key to use when a message is dead-lettered.
- Original routing key will be used if unset
required: false
default: None
arguments:
description:
- extra arguments for queue. If defined this argument is a key/value dictionary
required: false
default: {}
'''
EXAMPLES = '''
# Create a queue
- rabbitmq_queue: name=myQueue
# Create a queue on remote host
- rabbitmq_queue: name=myRemoteQueue login_user=user login_password=secret login_host=remote.example.org
'''
import requests
import urllib
import json
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(default='present', choices=['present', 'absent'], type='str'),
name = dict(required=True, type='str'),
login_user = dict(default='guest', type='str'),
login_password = dict(default='guest', type='str', no_log=True),
login_host = dict(default='localhost', type='str'),
login_port = dict(default='15672', type='str'),
vhost = dict(default='/', type='str'),
durable = dict(default=True, type='bool'),
auto_delete = dict(default=False, type='bool'),
message_ttl = dict(default=None, type='int'),
auto_expires = dict(default=None, type='int'),
max_length = dict(default=None, type='int'),
dead_letter_exchange = dict(default=None, type='str'),
dead_letter_routing_key = dict(default=None, type='str'),
arguments = dict(default=dict(), type='dict')
),
supports_check_mode = True
)
url = "http://%s:%s/api/queues/%s/%s" % (
module.params['login_host'],
module.params['login_port'],
urllib.quote(module.params['vhost'],''),
module.params['name']
)
# Check if queue already exists
r = requests.get( url, auth=(module.params['login_user'],module.params['login_password']))
if r.status_code==200:
queue_exists = True
response = r.json()
elif r.status_code==404:
queue_exists = False
response = r.text
else:
module.fail_json(
msg = "Invalid response from RESTAPI when trying to check if queue exists",
details = r.text
)
if module.params['state']=='present':
change_required = not queue_exists
else:
change_required = queue_exists
# Check if attributes change on existing queue
if not change_required and r.status_code==200 and module.params['state'] == 'present':
if not (
response['durable'] == module.params['durable'] and
response['auto_delete'] == module.params['auto_delete'] and
(
( 'x-message-ttl' in response['arguments'] and response['arguments']['x-message-ttl'] == module.params['message_ttl'] ) or
( 'x-message-ttl' not in response['arguments'] and module.params['message_ttl'] is None )
) and
(
( 'x-expires' in response['arguments'] and response['arguments']['x-expires'] == module.params['auto_expires'] ) or
( 'x-expires' not in response['arguments'] and module.params['auto_expires'] is None )
) and
(
( 'x-max-length' in response['arguments'] and response['arguments']['x-max-length'] == module.params['max_length'] ) or
( 'x-max-length' not in response['arguments'] and module.params['max_length'] is None )
) and
(
( 'x-dead-letter-exchange' in response['arguments'] and response['arguments']['x-dead-letter-exchange'] == module.params['dead_letter_exchange'] ) or
( 'x-dead-letter-exchange' not in response['arguments'] and module.params['dead_letter_exchange'] is None )
) and
(
( 'x-dead-letter-routing-key' in response['arguments'] and response['arguments']['x-dead-letter-routing-key'] == module.params['dead_letter_routing_key'] ) or
( 'x-dead-letter-routing-key' not in response['arguments'] and module.params['dead_letter_routing_key'] is None )
)
):
module.fail_json(
msg = "RabbitMQ RESTAPI doesn't support attribute changes for existing queues",
)
# Copy parameters to arguments as used by RabbitMQ
for k,v in {
'message_ttl': 'x-message-ttl',
'auto_expires': 'x-expires',
'max_length': 'x-max-length',
'dead_letter_exchange': 'x-dead-letter-exchange',
'dead_letter_routing_key': 'x-dead-letter-routing-key'
}.items():
if module.params[k]:
module.params['arguments'][v] = module.params[k]
# Exit if check_mode
if module.check_mode:
module.exit_json(
changed= change_required,
name = module.params['name'],
details = response,
arguments = module.params['arguments']
)
# Do changes
if change_required:
if module.params['state'] == 'present':
r = requests.put(
url,
auth = (module.params['login_user'],module.params['login_password']),
headers = { "content-type": "application/json"},
data = json.dumps({
"durable": module.params['durable'],
"auto_delete": module.params['auto_delete'],
"arguments": module.params['arguments']
})
)
elif module.params['state'] == 'absent':
r = requests.delete( url, auth = (module.params['login_user'],module.params['login_password']))
if r.status_code == 204:
module.exit_json(
changed = True,
name = module.params['name']
)
else:
module.fail_json(
msg = "Error creating queue",
status = r.status_code,
details = r.text
)
else:
module.exit_json(
changed = False,
name = module.params['name']
)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
hujiajie/chromium-crosswalk | tools/telemetry/telemetry/internal/backends/android_command_line_backend_unittest.py | 13 | 3617 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import unittest
from telemetry import decorators
from telemetry.internal.backends import android_command_line_backend
from telemetry.testing import options_for_unittests
from devil.android import device_utils
class _MockBackendSettings(object):
pseudo_exec_name = 'chrome'
def __init__(self, path):
self._path = path
def GetCommandLineFile(self, _):
return self._path
class AndroidCommandLineBackendTest(unittest.TestCase):
def _GetDeviceForTest(self):
serial = options_for_unittests.GetCopy().device
if serial:
device = device_utils.DeviceUtils(serial)
return device
else:
devices = device_utils.DeviceUtils.HealthyDevices()
if not devices:
return None
return devices[0]
def testQuoteIfNeededNoEquals(self):
string = 'value'
self.assertEqual(string,
android_command_line_backend._QuoteIfNeeded(string))
def testQuoteIfNeededNoSpaces(self):
string = 'key=valueA'
self.assertEqual(string,
android_command_line_backend._QuoteIfNeeded(string))
def testQuoteIfNeededAlreadyQuoted(self):
string = "key='valueA valueB'"
self.assertEqual(string,
android_command_line_backend._QuoteIfNeeded(string))
def testQuoteIfNeeded(self):
string = 'key=valueA valueB'
expected_output = "key='valueA valueB'"
self.assertEqual(expected_output,
android_command_line_backend._QuoteIfNeeded(string))
@decorators.Enabled('android')
def testSetUpCommandLineFlagsCmdRestored(self):
"""Test that a previous command line file is restored.
Requires a device connected to the host.
"""
device = self._GetDeviceForTest()
if not device:
logging.warning('Skip the test because we cannot find any healthy device')
return
cmd_file = '/data/local/tmp/test_cmd2'
backend_settings = _MockBackendSettings(cmd_file)
startup_args = ['--some', '--test', '--args']
try:
device.WriteFile(cmd_file, 'chrome --args --to --save')
self.assertEqual('chrome --args --to --save',
device.ReadFile(cmd_file).strip())
with android_command_line_backend.SetUpCommandLineFlags(
device, backend_settings, startup_args):
self.assertEqual('chrome --some --test --args',
device.ReadFile(cmd_file).strip())
self.assertEqual('chrome --args --to --save',
device.ReadFile(cmd_file).strip())
finally:
device.RunShellCommand(['rm', '-f', cmd_file], check_return=True)
@decorators.Enabled('android')
def testSetUpCommandLineFlagsCmdRemoved(self):
"""Test that the command line file is removed if it did not exist before.
Requires a device connected to the host.
"""
device = self._GetDeviceForTest()
if not device:
logging.warning('Skip the test because we cannot find any healthy device')
return
cmd_file = '/data/local/tmp/test_cmd'
backend_settings = _MockBackendSettings(cmd_file)
startup_args = ['--some', '--test', '--args']
device.RunShellCommand(['rm', '-f', cmd_file], check_return=True)
with android_command_line_backend.SetUpCommandLineFlags(
device, backend_settings, startup_args):
self.assertEqual('chrome --some --test --args',
device.ReadFile(cmd_file).strip())
self.assertFalse(device.FileExists(cmd_file))
| bsd-3-clause |
Cinntax/home-assistant | homeassistant/components/lcn/scene.py | 7 | 2167 | """Support for LCN scenes."""
import pypck
from homeassistant.components.scene import Scene
from homeassistant.const import CONF_ADDRESS
from . import LcnDevice
from .const import (
CONF_CONNECTIONS,
CONF_OUTPUTS,
CONF_REGISTER,
CONF_SCENE,
CONF_TRANSITION,
DATA_LCN,
OUTPUT_PORTS,
)
from .helpers import get_connection
async def async_setup_platform(
hass, hass_config, async_add_entities, discovery_info=None
):
"""Set up the LCN scene platform."""
if discovery_info is None:
return
devices = []
for config in discovery_info:
address, connection_id = config[CONF_ADDRESS]
addr = pypck.lcn_addr.LcnAddr(*address)
connections = hass.data[DATA_LCN][CONF_CONNECTIONS]
connection = get_connection(connections, connection_id)
address_connection = connection.get_address_conn(addr)
devices.append(LcnScene(config, address_connection))
async_add_entities(devices)
class LcnScene(LcnDevice, Scene):
"""Representation of a LCN scene."""
def __init__(self, config, address_connection):
"""Initialize the LCN scene."""
super().__init__(config, address_connection)
self.register_id = config[CONF_REGISTER]
self.scene_id = config[CONF_SCENE]
self.output_ports = []
self.relay_ports = []
for port in config[CONF_OUTPUTS]:
if port in OUTPUT_PORTS:
self.output_ports.append(pypck.lcn_defs.OutputPort[port])
else: # in RELEAY_PORTS
self.relay_ports.append(pypck.lcn_defs.RelayPort[port])
if config[CONF_TRANSITION] is None:
self.transition = None
else:
self.transition = pypck.lcn_defs.time_to_ramp_value(config[CONF_TRANSITION])
async def async_added_to_hass(self):
"""Run when entity about to be added to hass."""
async def async_activate(self):
"""Activate scene."""
self.address_connection.activate_scene(
self.register_id,
self.scene_id,
self.output_ports,
self.relay_ports,
self.transition,
)
| apache-2.0 |
Ultimaker/Uranium | plugins/FileHandlers/STLWriter/STLWriter.py | 1 | 5501 | # Copyright (c) 2016 Ultimaker B.V.
# Copyright (c) 2013 David Braam
# Uranium is released under the terms of the LGPLv3 or higher.
import struct
import time
from UM.Logger import Logger
from UM.Mesh.MeshWriter import MeshWriter
from UM.i18n import i18nCatalog
catalog = i18nCatalog("uranium")
class STLWriter(MeshWriter):
def write(self, stream, nodes, mode = MeshWriter.OutputMode.TextMode):
"""Write the specified sequence of nodes to a stream in the STL format.
:param stream: The output stream to write to.
:param nodes: A sequence of scene nodes to write to the output stream.
:param mode: The output mode to use for writing scene nodes. Text mode
causes the writer to write in STL's ASCII format. Binary mode causes the
writer to write in STL's binary format. Any other mode is invalid.
"""
try:
MeshWriter._meshNodes(nodes).__next__()
except StopIteration:
Logger.log("e", "There is no mesh to write.")
self.setInformation(catalog.i18nc("@error:no mesh", "There is no mesh to write."))
return False # Don't try to write a file if there is no mesh.
if mode == MeshWriter.OutputMode.TextMode:
self._writeAscii(stream, MeshWriter._meshNodes(nodes))
elif mode == MeshWriter.OutputMode.BinaryMode:
self._writeBinary(stream, MeshWriter._meshNodes(nodes))
else:
Logger.log("e", "Unsupported output mode writing STL to stream")
self.setInformation(catalog.i18nc("@error:not supported", "Unsupported output mode writing STL to stream."))
return False
return True
def _writeAscii(self, stream, nodes):
name = "Uranium STLWriter {0}".format(time.strftime("%a %d %b %Y %H:%M:%S"))
stream.write("solid {0}\n".format(name))
for node in nodes:
mesh_data = node.getMeshData().getTransformed(node.getWorldTransformation())
verts = mesh_data.getVertices()
if verts is None:
continue # No mesh data, nothing to do.
if mesh_data.hasIndices():
for face in mesh_data.getIndices():
stream.write("facet normal 0.0 0.0 0.0\n")
stream.write(" outer loop\n")
v1 = verts[face[0]]
v2 = verts[face[1]]
v3 = verts[face[2]]
stream.write(" vertex {0} {1} {2}\n".format(v1[0], -v1[2], v1[1]))
stream.write(" vertex {0} {1} {2}\n".format(v2[0], -v2[2], v2[1]))
stream.write(" vertex {0} {1} {2}\n".format(v3[0], -v3[2], v3[1]))
stream.write(" endloop\n")
stream.write("endfacet\n")
else:
num_verts = mesh_data.getVertexCount()
for index in range(0, num_verts - 2, 3):
stream.write("facet normal 0.0 0.0 0.0\n")
stream.write(" outer loop\n")
v1 = verts[index]
v2 = verts[index + 1]
v3 = verts[index + 2]
stream.write(" vertex {0} {1} {2}\n".format(v1[0], -v1[2], v1[1]))
stream.write(" vertex {0} {1} {2}\n".format(v2[0], -v2[2], v2[1]))
stream.write(" vertex {0} {1} {2}\n".format(v3[0], -v3[2], v3[1]))
stream.write(" endloop\n")
stream.write("endfacet\n")
stream.write("endsolid {0}\n".format(name))
def _writeBinary(self, stream, nodes):
stream.write("Uranium STLWriter {0}".format(time.strftime("%a %d %b %Y %H:%M:%S")).encode().ljust(80, b"\000"))
face_count = 0
nodes = list(nodes)
for node in nodes:
if node.getMeshData().hasIndices():
face_count += node.getMeshData().getFaceCount()
else:
face_count += node.getMeshData().getVertexCount() / 3
stream.write(struct.pack("<I", int(face_count))) #Write number of faces to STL
for node in nodes:
mesh_data = node.getMeshData().getTransformed(node.getWorldTransformation())
if mesh_data.hasIndices():
verts = mesh_data.getVertices()
for face in mesh_data.getIndices():
v1 = verts[face[0]]
v2 = verts[face[1]]
v3 = verts[face[2]]
stream.write(struct.pack("<fff", 0.0, 0.0, 0.0))
stream.write(struct.pack("<fff", v1[0], -v1[2], v1[1]))
stream.write(struct.pack("<fff", v2[0], -v2[2], v2[1]))
stream.write(struct.pack("<fff", v3[0], -v3[2], v3[1]))
stream.write(struct.pack("<H", 0))
else:
num_verts = mesh_data.getVertexCount()
verts = mesh_data.getVertices()
for index in range(0, num_verts - 1, 3):
v1 = verts[index]
v2 = verts[index + 1]
v3 = verts[index + 2]
stream.write(struct.pack("<fff", 0.0, 0.0, 0.0))
stream.write(struct.pack("<fff", v1[0], -v1[2], v1[1]))
stream.write(struct.pack("<fff", v2[0], -v2[2], v2[1]))
stream.write(struct.pack("<fff", v3[0], -v3[2], v3[1]))
stream.write(struct.pack("<H", 0)) | lgpl-3.0 |
kevinwilde/WildeBot | src/mybot/Lib/site-packages/requests/packages/chardet/chardetect.py | 1786 | 2504 | #!/usr/bin/env python
"""
Script which takes one or more file paths and reports on their detected
encodings
Example::
% chardetect somefile someotherfile
somefile: windows-1252 with confidence 0.5
someotherfile: ascii with confidence 1.0
If no paths are provided, it takes its input from stdin.
"""
from __future__ import absolute_import, print_function, unicode_literals
import argparse
import sys
from io import open
from chardet import __version__
from chardet.universaldetector import UniversalDetector
def description_of(lines, name='stdin'):
"""
Return a string describing the probable encoding of a file or
list of strings.
:param lines: The lines to get the encoding of.
:type lines: Iterable of bytes
:param name: Name of file or collection of lines
:type name: str
"""
u = UniversalDetector()
for line in lines:
u.feed(line)
u.close()
result = u.result
if result['encoding']:
return '{0}: {1} with confidence {2}'.format(name, result['encoding'],
result['confidence'])
else:
return '{0}: no result'.format(name)
def main(argv=None):
'''
Handles command line arguments and gets things started.
:param argv: List of arguments, as if specified on the command-line.
If None, ``sys.argv[1:]`` is used instead.
:type argv: list of str
'''
# Get command line arguments
parser = argparse.ArgumentParser(
description="Takes one or more file paths and reports their detected \
encodings",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
conflict_handler='resolve')
parser.add_argument('input',
help='File whose encoding we would like to determine.',
type=argparse.FileType('rb'), nargs='*',
default=[sys.stdin])
parser.add_argument('--version', action='version',
version='%(prog)s {0}'.format(__version__))
args = parser.parse_args(argv)
for f in args.input:
if f.isatty():
print("You are running chardetect interactively. Press " +
"CTRL-D twice at the start of a blank line to signal the " +
"end of your input. If you want help, run chardetect " +
"--help\n", file=sys.stderr)
print(description_of(f, f.name))
if __name__ == '__main__':
main()
| mit |
dati91/servo | tests/wpt/web-platform-tests/xhr/resources/access-control-basic-preflight-cache.py | 46 | 1709 | def main(request, response):
def fail(message):
response.content = "FAIL " + request.method + ": " + str(message)
response.status = 400
def getState(token):
server_state = request.server.stash.take(token)
if not server_state:
return "Uninitialized"
return server_state
def setState(state, token):
request.server.stash.put(token, state)
response.headers.set("Access-Control-Allow-Origin", request.headers.get("origin"))
response.headers.set("Access-Control-Allow-Credentials", "true")
token = request.GET.first("token", None)
state = getState(token)
if state == "Uninitialized":
if request.method == "OPTIONS":
response.headers.set("Access-Control-Allow-Methods", "PUT")
response.headers.set("Access-Control-Max-Age", 10)
setState("OPTIONSSent", token)
else:
fail(state)
elif state == "OPTIONSSent":
if request.method == "PUT":
response.content = "PASS: First PUT request."
setState("FirstPUTSent", token)
else:
fail(state)
elif state == "FirstPUTSent":
if request.method == "PUT":
response.content = "PASS: Second PUT request. Preflight worked."
elif request.method == "OPTIONS":
response.headers.set("Access-Control-Allow-Methods", "PUT")
setState("FAILSecondOPTIONSSent", token)
else:
fail(state)
elif state == "FAILSecondOPTIONSSent":
if request.method == "PUT":
fail("Second OPTIONS request was sent. Preflight failed.")
else:
fail(state)
else:
fail(state)
| mpl-2.0 |
magnushiie/geopy | geopy/format.py | 24 | 3013 | """
Formatting...
"""
from geopy import units
from geopy.compat import py3k
if py3k:
unichr = chr # pylint: disable=W0622
# Unicode characters for symbols that appear in coordinate strings.
DEGREE = unichr(176)
PRIME = unichr(8242)
DOUBLE_PRIME = unichr(8243)
ASCII_DEGREE = ''
ASCII_PRIME = "'"
ASCII_DOUBLE_PRIME = '"'
LATIN1_DEGREE = chr(176)
HTML_DEGREE = '°'
HTML_PRIME = '′'
HTML_DOUBLE_PRIME = '″'
XML_DECIMAL_DEGREE = '°'
XML_DECIMAL_PRIME = '′'
XML_DECIMAL_DOUBLE_PRIME = '″'
XML_HEX_DEGREE = '&xB0;'
XML_HEX_PRIME = '&x2032;'
XML_HEX_DOUBLE_PRIME = '&x2033;'
ABBR_DEGREE = 'deg'
ABBR_ARCMIN = 'arcmin'
ABBR_ARCSEC = 'arcsec'
DEGREES_FORMAT = (
"%(degrees)d%(deg)s %(minutes)d%(arcmin)s %(seconds)g%(arcsec)s"
)
UNICODE_SYMBOLS = {
'deg': DEGREE,
'arcmin': PRIME,
'arcsec': DOUBLE_PRIME
}
ASCII_SYMBOLS = {
'deg': ASCII_DEGREE,
'arcmin': ASCII_PRIME,
'arcsec': ASCII_DOUBLE_PRIME
}
LATIN1_SYMBOLS = {
'deg': LATIN1_DEGREE,
'arcmin': ASCII_PRIME,
'arcsec': ASCII_DOUBLE_PRIME
}
HTML_SYMBOLS = {
'deg': HTML_DEGREE,
'arcmin': HTML_PRIME,
'arcsec': HTML_DOUBLE_PRIME
}
XML_SYMBOLS = {
'deg': XML_DECIMAL_DEGREE,
'arcmin': XML_DECIMAL_PRIME,
'arcsec': XML_DECIMAL_DOUBLE_PRIME
}
ABBR_SYMBOLS = {
'deg': ABBR_DEGREE,
'arcmin': ABBR_ARCMIN,
'arcsec': ABBR_ARCSEC
}
def format_degrees(degrees, fmt=DEGREES_FORMAT, symbols=None):
"""
TODO docs.
"""
symbols = symbols or ASCII_SYMBOLS
arcminutes = units.arcminutes(degrees=degrees - int(degrees))
arcseconds = units.arcseconds(arcminutes=arcminutes - int(arcminutes))
format_dict = dict(
symbols,
degrees=degrees,
minutes=abs(arcminutes),
seconds=abs(arcseconds)
)
return fmt % format_dict
DISTANCE_FORMAT = "%(magnitude)s%(unit)s"
DISTANCE_UNITS = {
'km': lambda d: d,
'm': lambda d: units.meters(kilometers=d),
'mi': lambda d: units.miles(kilometers=d),
'ft': lambda d: units.feet(kilometers=d),
'nm': lambda d: units.nautical(kilometers=d),
'nmi': lambda d: units.nautical(kilometers=d)
}
def format_distance(kilometers, fmt=DISTANCE_FORMAT, unit='km'):
"""
TODO docs.
"""
magnitude = DISTANCE_UNITS[unit](kilometers)
return fmt % {'magnitude': magnitude, 'unit': unit}
_DIRECTIONS = [
('north', 'N'),
('north by east', 'NbE'),
('north-northeast', 'NNE'),
('northeast by north', 'NEbN'),
('northeast', 'NE'),
('northeast by east', 'NEbE'),
('east-northeast', 'ENE'),
('east by north', 'EbN'),
('east', 'E'),
('east by south', 'EbS'),
('east-southeast', 'ESE'),
('southeast by east', 'SEbE'),
('southeast', 'SE'),
('southeast by south', 'SEbS'),
]
DIRECTIONS, DIRECTIONS_ABBR = zip(*_DIRECTIONS)
ANGLE_DIRECTIONS = {
n * 11.25: d
for n, d
in enumerate(DIRECTIONS)
}
ANGLE_DIRECTIONS_ABBR = {
n * 11.25: d
for n, d
in enumerate(DIRECTIONS_ABBR)
}
| mit |
tkaitchuck/nupic | external/darwin64/lib/python2.6/site-packages/numpy/core/tests/test_errstate.py | 23 | 1783 | # The following exec statement (or something like it) is needed to
# prevent SyntaxError on Python < 2.5. Even though this is a test,
# SyntaxErrors are not acceptable; on Debian systems, they block
# byte-compilation during install and thus cause the package to fail
# to install.
import sys
if sys.version_info[:2] >= (2, 5):
exec """
from __future__ import with_statement
from numpy.core import *
from numpy.random import rand, randint
from numpy.testing import *
class TestErrstate(TestCase):
def test_invalid(self):
with errstate(all='raise', under='ignore'):
a = -arange(3)
# This should work
with errstate(invalid='ignore'):
sqrt(a)
# While this should fail!
try:
sqrt(a)
except FloatingPointError:
pass
else:
self.fail("Did not raise an invalid error")
def test_divide(self):
with errstate(all='raise', under='ignore'):
a = -arange(3)
# This should work
with errstate(divide='ignore'):
a // 0
# While this should fail!
try:
a // 0
except FloatingPointError:
pass
else:
self.fail("Did not raise divide by zero error")
def test_errcall(self):
def foo(*args):
print(args)
olderrcall = geterrcall()
with errstate(call=foo):
assert(geterrcall() is foo), 'call is not foo'
with errstate(call=None):
assert(geterrcall() is None), 'call is not None'
assert(geterrcall() is olderrcall), 'call is not olderrcall'
"""
if __name__ == "__main__":
run_module_suite()
| gpl-3.0 |
boone/ansible-modules-core | cloud/rackspace/rax_scaling_policy.py | 157 | 9070 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax_scaling_policy
short_description: Manipulate Rackspace Cloud Autoscale Scaling Policy
description:
- Manipulate Rackspace Cloud Autoscale Scaling Policy
version_added: 1.7
options:
at:
description:
- The UTC time when this policy will be executed. The time must be
formatted according to C(yyyy-MM-dd'T'HH:mm:ss.SSS) such as
C(2013-05-19T08:07:08Z)
change:
description:
- The change, either as a number of servers or as a percentage, to make
in the scaling group. If this is a percentage, you must set
I(is_percent) to C(true) also.
cron:
description:
- The time when the policy will be executed, as a cron entry. For
example, if this is parameter is set to C(1 0 * * *)
cooldown:
description:
- The period of time, in seconds, that must pass before any scaling can
occur after the previous scaling. Must be an integer between 0 and
86400 (24 hrs).
desired_capacity:
description:
- The desired server capacity of the scaling the group; that is, how
many servers should be in the scaling group.
is_percent:
description:
- Whether the value in I(change) is a percent value
default: false
name:
description:
- Name to give the policy
required: true
policy_type:
description:
- The type of policy that will be executed for the current release.
choices:
- webhook
- schedule
required: true
scaling_group:
description:
- Name of the scaling group that this policy will be added to
required: true
state:
description:
- Indicate desired state of the resource
choices:
- present
- absent
default: present
author: "Matt Martz (@sivel)"
extends_documentation_fragment: rackspace
'''
EXAMPLES = '''
---
- hosts: localhost
gather_facts: false
connection: local
tasks:
- rax_scaling_policy:
credentials: ~/.raxpub
region: ORD
at: '2013-05-19T08:07:08Z'
change: 25
cooldown: 300
is_percent: true
name: ASG Test Policy - at
policy_type: schedule
scaling_group: ASG Test
register: asps_at
- rax_scaling_policy:
credentials: ~/.raxpub
region: ORD
cron: '1 0 * * *'
change: 25
cooldown: 300
is_percent: true
name: ASG Test Policy - cron
policy_type: schedule
scaling_group: ASG Test
register: asp_cron
- rax_scaling_policy:
credentials: ~/.raxpub
region: ORD
cooldown: 300
desired_capacity: 5
name: ASG Test Policy - webhook
policy_type: webhook
scaling_group: ASG Test
register: asp_webhook
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def rax_asp(module, at=None, change=0, cron=None, cooldown=300,
desired_capacity=0, is_percent=False, name=None,
policy_type=None, scaling_group=None, state='present'):
changed = False
au = pyrax.autoscale
if not au:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
try:
UUID(scaling_group)
except ValueError:
try:
sg = au.find(name=scaling_group)
except Exception, e:
module.fail_json(msg='%s' % e.message)
else:
try:
sg = au.get(scaling_group)
except Exception, e:
module.fail_json(msg='%s' % e.message)
if state == 'present':
policies = filter(lambda p: name == p.name, sg.list_policies())
if len(policies) > 1:
module.fail_json(msg='No unique policy match found by name')
if at:
args = dict(at=at)
elif cron:
args = dict(cron=cron)
else:
args = None
if not policies:
try:
policy = sg.add_policy(name, policy_type=policy_type,
cooldown=cooldown, change=change,
is_percent=is_percent,
desired_capacity=desired_capacity,
args=args)
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
else:
policy = policies[0]
kwargs = {}
if policy_type != policy.type:
kwargs['policy_type'] = policy_type
if cooldown != policy.cooldown:
kwargs['cooldown'] = cooldown
if hasattr(policy, 'change') and change != policy.change:
kwargs['change'] = change
if hasattr(policy, 'changePercent') and is_percent is False:
kwargs['change'] = change
kwargs['is_percent'] = False
elif hasattr(policy, 'change') and is_percent is True:
kwargs['change'] = change
kwargs['is_percent'] = True
if hasattr(policy, 'desiredCapacity') and change:
kwargs['change'] = change
elif ((hasattr(policy, 'change') or
hasattr(policy, 'changePercent')) and desired_capacity):
kwargs['desired_capacity'] = desired_capacity
if hasattr(policy, 'args') and args != policy.args:
kwargs['args'] = args
if kwargs:
policy.update(**kwargs)
changed = True
policy.get()
module.exit_json(changed=changed, autoscale_policy=rax_to_dict(policy))
else:
try:
policies = filter(lambda p: name == p.name, sg.list_policies())
if len(policies) > 1:
module.fail_json(msg='No unique policy match found by name')
elif not policies:
policy = {}
else:
policy.delete()
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
module.exit_json(changed=changed, autoscale_policy=rax_to_dict(policy))
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
at=dict(),
change=dict(type='int'),
cron=dict(),
cooldown=dict(type='int', default=300),
desired_capacity=dict(type='int'),
is_percent=dict(type='bool', default=False),
name=dict(required=True),
policy_type=dict(required=True, choices=['webhook', 'schedule']),
scaling_group=dict(required=True),
state=dict(default='present', choices=['present', 'absent']),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together(),
mutually_exclusive=[
['cron', 'at'],
['change', 'desired_capacity'],
]
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
at = module.params.get('at')
change = module.params.get('change')
cron = module.params.get('cron')
cooldown = module.params.get('cooldown')
desired_capacity = module.params.get('desired_capacity')
is_percent = module.params.get('is_percent')
name = module.params.get('name')
policy_type = module.params.get('policy_type')
scaling_group = module.params.get('scaling_group')
state = module.params.get('state')
if (at or cron) and policy_type == 'webhook':
module.fail_json(msg='policy_type=schedule is required for a time '
'based policy')
setup_rax_module(module, pyrax)
rax_asp(module, at=at, change=change, cron=cron, cooldown=cooldown,
desired_capacity=desired_capacity, is_percent=is_percent,
name=name, policy_type=policy_type, scaling_group=scaling_group,
state=state)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
# invoke the module
main()
| gpl-3.0 |
uni-peter-zheng/tp-libvirt | libvirt/tests/src/virsh_cmd/host/virsh_capabilities.py | 7 | 5687 | import logging
import re
from autotest.client.shared import utils, error
from autotest.client import os_dep
from virttest import libvirt_vm, virsh, utils_libvirtd, utils_misc
from virttest.libvirt_xml import capability_xml
def run(test, params, env):
"""
Test the command virsh capabilities
(1) Call virsh capabilities
(2) Call virsh capabilities with an unexpected option
(3) Call virsh capabilities with libvirtd service stop
"""
def compare_capabilities_xml(source):
cap_xml = capability_xml.CapabilityXML()
cap_xml.xml = source
# Check that host has a non-empty UUID tag.
xml_uuid = cap_xml.uuid
logging.debug("Host UUID (capabilities_xml): %s" % xml_uuid)
if xml_uuid == "":
raise error.TestFail("The host uuid in capabilities_xml is none!")
# Check the host arch.
xml_arch = cap_xml.arch
logging.debug("Host arch (capabilities_xml): %s", xml_arch)
exp_arch = utils.run("arch", ignore_status=True).stdout.strip()
if cmp(xml_arch, exp_arch) != 0:
raise error.TestFail("The host arch in capabilities_xml is expected"
" to be %s, but get %s" % (exp_arch, xml_arch))
# Check the host cpu count.
xml_cpu_count = cap_xml.cpu_count
logging.debug("Host cpus count (capabilities_xml): %s", xml_cpu_count)
cmd = "grep processor /proc/cpuinfo | wc -l"
exp_cpu_count = int(utils.run(cmd, ignore_status=True).stdout.strip())
if xml_cpu_count != exp_cpu_count:
raise error.TestFail("Host cpus count is expected to be %s, but get "
"%s" % (exp_cpu_count, xml_cpu_count))
# Check the arch of guest supported.
guest_capa = cap_xml.get_guest_capabilities()
logging.debug(guest_capa)
try:
img = utils_misc.find_command("qemu-kvm")
except ValueError:
raise error.TestNAError("Cannot find qemu-kvm")
if re.search("ppc", utils.run("arch").stdout):
cmd = img + " --cpu ? | grep ppc"
else:
cmd = img + " --cpu ? | grep qemu"
cmd_result = utils.run(cmd, ignore_status=True)
for guest in cap_xml.xmltreefile.findall('guest'):
guest_wordsize = guest.find('arch').find('wordsize').text
logging.debug("Arch of guest supported (capabilities_xml):%s",
guest_wordsize)
if not re.search(guest_wordsize, cmd_result.stdout.strip()):
raise error.TestFail("The capabilities_xml gives an extra arch "
"of guest to support!")
# Check the type of hypervisor.
first_guest = cap_xml.xmltreefile.findall('guest')[0]
first_domain = first_guest.find('arch').findall('domain')[0]
guest_domain_type = first_domain.get('type')
logging.debug("Hypervisor (capabilities_xml):%s", guest_domain_type)
cmd_result = utils.run("virsh uri", ignore_status=True)
if not re.search(guest_domain_type, cmd_result.stdout.strip()):
raise error.TestFail("The capabilities_xml gives an different "
"hypervisor")
# check power management support.
try:
pm_cmd = os_dep.command('pm-is-supported')
pm_cap_map = {'suspend': 'suspend_mem',
'hibernate': 'suspend_disk',
'suspend-hybrid': 'suspend_hybrid',
}
exp_pms = []
for opt in pm_cap_map:
cmd = '%s --%s' % (pm_cmd, opt)
res = utils.run(cmd, ignore_status=True)
if res.exit_status == 0:
exp_pms.append(pm_cap_map[opt])
pms = cap_xml.power_management_list
if set(exp_pms) != set(pms):
raise error.TestFail("Expected supported PMs are %s, got %s "
"instead." % (exp_pms, pms))
except ValueError:
logging.debug('Power management checking is skipped, since command '
'pm-is-supported is not found.')
connect_uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri",
"default"))
# Prepare libvirtd service
if "libvirtd" in params:
libvirtd = params.get("libvirtd")
if libvirtd == "off":
utils_libvirtd.libvirtd_stop()
# Run test case
option = params.get("virsh_cap_options")
try:
output = virsh.capabilities(option, uri=connect_uri,
ignore_status=False, debug=True)
status = 0 # good
except error.CmdError:
status = 1 # bad
output = ''
# Recover libvirtd service start
if libvirtd == "off":
utils_libvirtd.libvirtd_start()
# Check status_error
status_error = params.get("status_error")
if status_error == "yes":
if status == 0:
if libvirtd == "off":
raise error.TestFail("Command 'virsh capabilities' succeeded "
"with libvirtd service stopped, incorrect")
else:
raise error.TestFail("Command 'virsh capabilities %s' succeeded "
"(incorrect command)" % option)
elif status_error == "no":
compare_capabilities_xml(output)
if status != 0:
raise error.TestFail("Command 'virsh capabilities %s' failed "
"(correct command)" % option)
| gpl-2.0 |
CasparLi/calibre | src/calibre/ebooks/pdf/render/engine.py | 10 | 14304 | #!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import sys, traceback, math
from collections import namedtuple
from functools import wraps, partial
from future_builtins import map, zip
from PyQt5.Qt import (QPaintEngine, QPaintDevice, Qt, QTransform, QBrush)
from calibre.constants import plugins
from calibre.ebooks.pdf.render.serialize import (PDFStream, Path)
from calibre.ebooks.pdf.render.common import inch, A4, fmtnum
from calibre.ebooks.pdf.render.graphics import convert_path, Graphics
from calibre.utils.fonts.sfnt.container import Sfnt, UnsupportedFont
from calibre.utils.fonts.sfnt.metrics import FontMetrics
Point = namedtuple('Point', 'x y')
ColorState = namedtuple('ColorState', 'color opacity do')
GlyphInfo = namedtuple('GlyphInfo', 'name size stretch positions indices')
def repr_transform(t):
vals = map(fmtnum, (t.m11(), t.m12(), t.m21(), t.m22(), t.dx(), t.dy()))
return '[%s]'%' '.join(vals)
def store_error(func):
@wraps(func)
def errh(self, *args, **kwargs):
try:
func(self, *args, **kwargs)
except:
self.errors_occurred = True
self.errors(traceback.format_exc())
return errh
class Font(FontMetrics):
def __init__(self, sfnt):
FontMetrics.__init__(self, sfnt)
self.glyph_map = {}
class PdfEngine(QPaintEngine):
FEATURES = QPaintEngine.AllFeatures & ~(
QPaintEngine.PorterDuff | QPaintEngine.PerspectiveTransform |
QPaintEngine.ObjectBoundingModeGradients |
QPaintEngine.RadialGradientFill |
QPaintEngine.ConicalGradientFill
)
def __init__(self, file_object, page_width, page_height, left_margin,
top_margin, right_margin, bottom_margin, width, height,
errors=print, debug=print, compress=True,
mark_links=False):
QPaintEngine.__init__(self, self.FEATURES)
self.file_object = file_object
self.compress, self.mark_links = compress, mark_links
self.page_height, self.page_width = page_height, page_width
self.left_margin, self.top_margin = left_margin, top_margin
self.right_margin, self.bottom_margin = right_margin, bottom_margin
self.pixel_width, self.pixel_height = width, height
# Setup a co-ordinate transform that allows us to use co-ords
# from Qt's pixel based co-ordinate system with its origin at the top
# left corner. PDF's co-ordinate system is based on pts and has its
# origin in the bottom left corner. We also have to implement the page
# margins. Therefore, we need to translate, scale and reflect about the
# x-axis.
dy = self.page_height - self.top_margin
dx = self.left_margin
sx = (self.page_width - self.left_margin -
self.right_margin) / self.pixel_width
sy = (self.page_height - self.top_margin -
self.bottom_margin) / self.pixel_height
self.pdf_system = QTransform(sx, 0, 0, -sy, dx, dy)
self.graphics = Graphics(self.pixel_width, self.pixel_height)
self.errors_occurred = False
self.errors, self.debug = errors, debug
self.fonts = {}
self.current_page_num = 1
self.current_page_inited = False
self.qt_hack, err = plugins['qt_hack']
if err:
raise RuntimeError('Failed to load qt_hack with err: %s'%err)
def apply_graphics_state(self):
self.graphics(self.pdf_system, self.painter())
def resolve_fill(self, rect):
self.graphics.resolve_fill(rect, self.pdf_system,
self.painter().transform())
@property
def do_fill(self):
return self.graphics.current_state.do_fill
@property
def do_stroke(self):
return self.graphics.current_state.do_stroke
def init_page(self):
self.pdf.transform(self.pdf_system)
self.pdf.apply_fill(color=(1, 1, 1)) # QPainter has a default background brush of white
self.graphics.reset()
self.pdf.save_stack()
self.current_page_inited = True
def begin(self, device):
if not hasattr(self, 'pdf'):
try:
self.pdf = PDFStream(self.file_object, (self.page_width,
self.page_height), compress=self.compress,
mark_links=self.mark_links,
debug=self.debug)
self.graphics.begin(self.pdf)
except:
self.errors(traceback.format_exc())
self.errors_occurred = True
return False
return True
def end_page(self):
if self.current_page_inited:
self.pdf.restore_stack()
self.pdf.end_page()
self.current_page_inited = False
self.current_page_num += 1
def end(self):
try:
self.end_page()
self.pdf.end()
except:
self.errors(traceback.format_exc())
self.errors_occurred = True
return False
finally:
self.pdf = self.file_object = None
return True
def type(self):
return QPaintEngine.Pdf
def add_image(self, img, cache_key):
if img.isNull():
return
return self.pdf.add_image(img, cache_key)
@store_error
def drawTiledPixmap(self, rect, pixmap, point):
self.apply_graphics_state()
brush = QBrush(pixmap)
bl = rect.topLeft()
color, opacity, pattern, do_fill = self.graphics.convert_brush(
brush, bl-point, 1.0, self.pdf_system,
self.painter().transform())
self.pdf.save_stack()
self.pdf.apply_fill(color, pattern)
self.pdf.draw_rect(bl.x(), bl.y(), rect.width(), rect.height(),
stroke=False, fill=True)
self.pdf.restore_stack()
@store_error
def drawPixmap(self, rect, pixmap, source_rect):
self.apply_graphics_state()
source_rect = source_rect.toRect()
pixmap = (pixmap if source_rect == pixmap.rect() else
pixmap.copy(source_rect))
image = pixmap.toImage()
ref = self.add_image(image, pixmap.cacheKey())
if ref is not None:
self.pdf.draw_image(rect.x(), rect.y(), rect.width(),
rect.height(), ref)
@store_error
def drawImage(self, rect, image, source_rect, flags=Qt.AutoColor):
self.apply_graphics_state()
source_rect = source_rect.toRect()
image = (image if source_rect == image.rect() else
image.copy(source_rect))
ref = self.add_image(image, image.cacheKey())
if ref is not None:
self.pdf.draw_image(rect.x(), rect.y(), rect.width(),
rect.height(), ref)
@store_error
def updateState(self, state):
self.graphics.update_state(state, self.painter())
@store_error
def drawPath(self, path):
self.apply_graphics_state()
p = convert_path(path)
fill_rule = {Qt.OddEvenFill:'evenodd',
Qt.WindingFill:'winding'}[path.fillRule()]
self.pdf.draw_path(p, stroke=self.do_stroke,
fill=self.do_fill, fill_rule=fill_rule)
@store_error
def drawPoints(self, points):
self.apply_graphics_state()
p = Path()
for point in points:
p.move_to(point.x(), point.y())
p.line_to(point.x(), point.y() + 0.001)
self.pdf.draw_path(p, stroke=self.do_stroke, fill=False)
@store_error
def drawRects(self, rects):
self.apply_graphics_state()
with self.graphics:
for rect in rects:
self.resolve_fill(rect)
bl = rect.topLeft()
self.pdf.draw_rect(bl.x(), bl.y(), rect.width(), rect.height(),
stroke=self.do_stroke, fill=self.do_fill)
def create_sfnt(self, text_item):
get_table = partial(self.qt_hack.get_sfnt_table, text_item)
try:
ans = Font(Sfnt(get_table))
except UnsupportedFont as e:
raise UnsupportedFont('The font %s is not a valid sfnt. Error: %s'%(
text_item.font().family(), e))
glyph_map = self.qt_hack.get_glyph_map(text_item)
gm = {}
for uc, glyph_id in enumerate(glyph_map):
if glyph_id not in gm:
gm[glyph_id] = unichr(uc)
ans.full_glyph_map = gm
return ans
@store_error
def drawTextItem(self, point, text_item):
# return super(PdfEngine, self).drawTextItem(point, text_item)
self.apply_graphics_state()
gi = GlyphInfo(*self.qt_hack.get_glyphs(point, text_item))
if not gi.indices:
return
name = hash(gi.name)
if name not in self.fonts:
try:
self.fonts[name] = self.create_sfnt(text_item)
except UnsupportedFont:
return super(PdfEngine, self).drawTextItem(point, text_item)
metrics = self.fonts[name]
for glyph_id in gi.indices:
try:
metrics.glyph_map[glyph_id] = metrics.full_glyph_map[glyph_id]
except (KeyError, ValueError):
pass
glyphs = []
last_x = last_y = 0
for glyph_index, (x, y) in zip(gi.indices, gi.positions):
glyphs.append((x-last_x, last_y - y, glyph_index))
last_x, last_y = x, y
self.pdf.draw_glyph_run([gi.stretch, 0, 0, -1, 0, 0], gi.size, metrics,
glyphs)
@store_error
def drawPolygon(self, points, mode):
self.apply_graphics_state()
if not points:
return
p = Path()
p.move_to(points[0].x(), points[0].y())
for point in points[1:]:
p.line_to(point.x(), point.y())
p.close()
fill_rule = {self.OddEvenMode:'evenodd',
self.WindingMode:'winding'}.get(mode, 'evenodd')
self.pdf.draw_path(p, stroke=True, fill_rule=fill_rule,
fill=(mode in (self.OddEvenMode, self.WindingMode, self.ConvexMode)))
def set_metadata(self, *args, **kwargs):
self.pdf.set_metadata(*args, **kwargs)
def add_outline(self, toc):
self.pdf.links.add_outline(toc)
def add_links(self, current_item, start_page, links, anchors):
for pos in anchors.itervalues():
pos['left'], pos['top'] = self.pdf_system.map(pos['left'], pos['top'])
for link in links:
pos = link[1]
llx = pos['left']
lly = pos['top'] + pos['height']
urx = pos['left'] + pos['width']
ury = pos['top']
llx, lly = self.pdf_system.map(llx, lly)
urx, ury = self.pdf_system.map(urx, ury)
link[1] = pos['column'] + start_page
link.append((llx, lly, urx, ury))
self.pdf.links.add(current_item, start_page, links, anchors)
class PdfDevice(QPaintDevice): # {{{
def __init__(self, file_object, page_size=A4, left_margin=inch,
top_margin=inch, right_margin=inch, bottom_margin=inch,
xdpi=1200, ydpi=1200, errors=print, debug=print,
compress=True, mark_links=False):
QPaintDevice.__init__(self)
self.xdpi, self.ydpi = xdpi, ydpi
self.page_width, self.page_height = page_size
self.body_width = self.page_width - left_margin - right_margin
self.body_height = self.page_height - top_margin - bottom_margin
self.left_margin, self.right_margin = left_margin, right_margin
self.top_margin, self.bottom_margin = top_margin, bottom_margin
self.engine = PdfEngine(file_object, self.page_width, self.page_height,
left_margin, top_margin, right_margin,
bottom_margin, self.width(), self.height(),
errors=errors, debug=debug, compress=compress,
mark_links=mark_links)
self.add_outline = self.engine.add_outline
self.add_links = self.engine.add_links
def paintEngine(self):
return self.engine
def metric(self, m):
if m in (self.PdmDpiX, self.PdmPhysicalDpiX):
return self.xdpi
if m in (self.PdmDpiY, self.PdmPhysicalDpiY):
return self.ydpi
if m == self.PdmDepth:
return 32
if m == self.PdmNumColors:
return sys.maxint
if m == self.PdmWidthMM:
return int(round(self.body_width * 0.35277777777778))
if m == self.PdmHeightMM:
return int(round(self.body_height * 0.35277777777778))
if m == self.PdmWidth:
return int(round(self.body_width * self.xdpi / 72.0))
if m == self.PdmHeight:
return int(round(self.body_height * self.ydpi / 72.0))
return 0
def end_page(self, *args, **kwargs):
self.engine.end_page(*args, **kwargs)
def init_page(self):
self.engine.init_page()
@property
def full_page_rect(self):
page_width = int(math.ceil(self.page_width * self.xdpi / 72.0))
lm = int(math.ceil(self.left_margin * self.xdpi / 72.0))
page_height = int(math.ceil(self.page_height * self.ydpi / 72.0))
tm = int(math.ceil(self.top_margin * self.ydpi / 72.0))
return (-lm, -tm, page_width+1, page_height+1)
@property
def current_page_num(self):
return self.engine.current_page_num
@property
def errors_occurred(self):
return self.engine.errors_occurred
def to_px(self, pt, vertical=True):
return pt * (self.height()/self.page_height if vertical else
self.width()/self.page_width)
def set_metadata(self, *args, **kwargs):
self.engine.set_metadata(*args, **kwargs)
# }}}
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.