repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
SKA-ScienceDataProcessor/legion-sdp-clone | language/test.py | 1 | 8005 | #!/usr/bin/env python
# Copyright 2015 Stanford University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse, json, multiprocessing, os, optparse, re, subprocess, sys, traceback
from collections import OrderedDict
import legion
class TestFailure(Exception):
def __init__(self, command, output):
Exception.__init__(self, command, output)
self.command = command
self.output = output
def run(filename, verbose, flags):
args = [os.path.basename(filename)] + flags + (
[] if verbose else ['-level', '5'])
proc = legion.legion(
args,
stdout = None if verbose else subprocess.PIPE,
stderr = None if verbose else subprocess.STDOUT,
cwd = os.path.dirname(os.path.abspath(filename)))
output, _ = proc.communicate()
retcode = proc.wait()
if retcode != 0:
raise TestFailure(' '.join(args), str(output))
_re_label = r'^[ \t\r]*--[ \t]+{label}:[ \t\r]*$\n((^[ \t\r]*--.*$\n)+)'
def find_labeled_prefix(filename, label):
re_label = re.compile(_re_label.format(label = label), re.MULTILINE)
with open(filename, 'rb') as f:
program_text = f.read()
match = re.search(re_label, program_text)
if match is None:
return None
match_lines = match.group(1).strip().split('\n')
match_text = '\n'.join([line.strip()[2:].strip() for line in match_lines])
return match_text
def test_compile_fail(filename, verbose, flags):
expected_failure = find_labeled_prefix(filename, 'fails-with')
if expected_failure is None:
raise Exception('No fails-with declaration in compile_fail test')
try:
run(filename, False, flags)
except TestFailure as e:
failure = '\n'.join(line.strip() for line in e.output.strip().split('\n'))
if failure != expected_failure:
raise Exception('Expected failure:\n%s\n\nInstead got:\n%s' % (expected_failure, failure))
else:
raise Exception('Expected failure, but test passed')
def test_run_pass(filename, verbose, flags):
runs_with = [[]]
runs_with_text = find_labeled_prefix(filename, 'runs-with')
if runs_with_text is not None:
runs_with = json.loads(runs_with_text)
for params in runs_with:
run(filename, verbose, flags + params)
red = "\033[1;31m"
green = "\033[1;32m"
clear = "\033[0m"
PASS = 'pass'
FAIL = 'fail'
INTERRUPT = 'interrupt'
def test_runner(test_name, test_closure, verbose, filename):
test_fn, test_args = test_closure
saved_temps = []
try:
test_fn(filename, verbose, *test_args)
except KeyboardInterrupt:
return test_name, filename, [], INTERRUPT, None
# except driver.CompilerException as e:
# if verbose:
# return test_name, filename, e.saved_temps, FAIL, ''.join(traceback.format_exception(*sys.exc_info()))
# return test_name, filename, e.saved_temps, FAIL, None
except Exception as e:
if verbose:
return test_name, filename, [], FAIL, ''.join(traceback.format_exception_only(*sys.exc_info()[:2]))
return test_name, filename, [], FAIL, None
else:
return test_name, filename, [], PASS, None
class Counter:
def __init__(self):
self.passed = 0
self.failed = 0
tests = [
('compile_fail', (test_compile_fail, ([],)),
(os.path.join('tests', 'compile_fail'),)),
('run_pass', (test_run_pass, ([],)),
(os.path.join('tests', 'run_pass'),
os.path.join('examples'),
)),
]
def run_all_tests(thread_count, verbose):
thread_pool = multiprocessing.Pool(thread_count)
results = []
# Run tests asynchronously.
for test_name, test_fn, test_dirs in tests:
test_paths = []
for test_dir in test_dirs:
if os.path.isfile(test_dir):
test_paths.append(test_dir)
else:
os.path.walk(
test_dir,
lambda args, dirname, names: test_paths.extend(
path
for name in sorted(names)
for path in [os.path.join(dirname, name)]
if os.path.isfile(path) and os.path.splitext(path)[1] in ('.lg', '.md')),
())
for test_path in test_paths:
results.append(thread_pool.apply_async(test_runner, (test_name, test_fn, verbose, test_path)))
test_counters = OrderedDict()
for test_name, test_fn, test_dirs in tests:
test_counter = Counter()
test_counters[test_name] = test_counter
all_saved_temps = []
try:
for result in results:
test_name, filename, saved_temps, outcome, output = result.get()
if len(saved_temps) > 0:
all_saved_temps.append((test_name, filename, saved_temps))
if outcome == PASS:
print '[%sPASS%s] (%s) %s' % (green, clear, test_name, filename)
if output is not None: print output
test_counters[test_name].passed += 1
elif outcome == FAIL:
print '[%sFAIL%s] (%s) %s' % (red, clear, test_name, filename)
if output is not None: print output
test_counters[test_name].failed += 1
else:
raise Exception('Unexpected test outcome %s' % outcome)
except KeyboardInterrupt:
raise
global_counter = Counter()
for test_counter in test_counters.itervalues():
global_counter.passed += test_counter.passed
global_counter.failed += test_counter.failed
global_total = global_counter.passed + global_counter.failed
if len(all_saved_temps) > 0:
print
print 'The following temporary files have been saved:'
print
for test_name, filename, saved_temps in all_saved_temps:
print '[%sFAIL%s] (%s) %s' % (red, clear, test_name, filename)
for saved_temp in saved_temps:
print ' %s' % saved_temp
if global_total > 0:
print
print 'Summary of test results by category:'
for test_name, test_counter in test_counters.iteritems():
test_total = test_counter.passed + test_counter.failed
if test_total > 0:
print '%24s: Passed %3d of %3d tests (%5.1f%%)' % (
'%s' % test_name, test_counter.passed, test_total,
float(100*test_counter.passed)/test_total)
print ' ' + '~'*54
print '%24s: Passed %3d of %3d tests (%5.1f%%)' % (
'total', global_counter.passed, global_total,
(float(100*global_counter.passed)/global_total))
if not verbose and global_counter.failed > 0:
print
print 'For detailed information on test failures, run:'
print ' ./test.py -j1 -v'
sys.exit(1)
def test_driver(argv):
parser = argparse.ArgumentParser(description = 'Legion compiler test suite')
parser.add_argument('-j',
nargs = '?',
type = int,
help = 'number threads used to compile',
dest = 'thread_count')
parser.add_argument('-v',
action = 'store_true',
help = 'display verbose output',
dest = 'verbose')
args = parser.parse_args(argv[1:])
run_all_tests(
args.thread_count,
args.verbose)
if __name__ == '__main__':
test_driver(sys.argv)
| apache-2.0 | -2,264,833,039,945,196,000 | 36.060185 | 115 | 0.587008 | false |
wdbm/shijian | shijian_examples_clocks.py | 1 | 5541 | #!/usr/bin/env python
"""
################################################################################
# #
# shijian_examples_clocks #
# #
################################################################################
# #
# LICENCE INFORMATION #
# #
# This program is shijian examples. #
# #
# copyright (C) 2014 Will Breaden Madden, [email protected] #
# #
# This software is released under the terms of the GNU General Public License #
# version 3 (GPLv3). #
# #
# This program is free software: you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# For a copy of the GNU General Public License, see #
# <http://www.gnu.org/licenses/>. #
# #
################################################################################
"""
import inspect
import time
import shijian
def main():
print("create clock alpha")
alpha = shijian.Clock(name = "alpha")
print("clock alpha start time: {time}".format(time = alpha.start_time()))
print("sleep 2 seconds")
time.sleep(2)
print("clock alpha current time (s): {time}".format(time = alpha.time()))
print("\ncreate clock beta")
beta = shijian.Clock(name = "beta")
print("clock beta start time: {time}".format(time = beta.start_time()))
print("clock beta stop time: {time}".format(time = beta.stop_time()))
print("sleep 2 seconds")
time.sleep(2)
print("clock beta current time (s): {time}".format(time = beta.time()))
print("stop clock beta")
beta.stop()
print("clock beta start time: {time}".format(time = beta.start_time()))
print("clock beta stop time: {time}".format(time = beta.stop_time()))
print("sleep 2 seconds")
time.sleep(2)
print("clock beta start time: {time}".format(time = beta.start_time()))
print("clock beta stop time: {time}".format(time = beta.stop_time()))
print("clock beta current time (s): {time}".format(time = beta.time()))
print("\nclock beta printout:\n")
beta.printout()
print("create two gamma clocks")
gamma = shijian.Clock(name = "gamma")
gamma = shijian.Clock(name = "gamma")
print("sleep 2 seconds")
time.sleep(2)
print("\ncreate two unnamed clocks")
delta = shijian.Clock()
epsilon = shijian.Clock()
print("sleep 2 seconds")
time.sleep(2)
print("\nrun function 1 (which is timed using internal clocks)")
print("result of function 1: {result}".format(result = function_1()))
print("\nrun function 2 (which is timed using a decorator)")
print("result of function 2: {result}".format(result = function_2()))
print("\ncreate clock zeta, to illustrate clock resets")
zeta = shijian.Clock(name = "zeta")
print("clock zeta start time: {time}".format(time = zeta.start_time()))
print("sleep 2 seconds")
time.sleep(2)
print("clock zeta current time (s): {time}".format(time = zeta.time()))
print("reset clock zeta and start it again")
zeta.reset()
zeta.start()
print("clock zeta start time: {time}".format(time = zeta.start_time()))
print("sleep 2 seconds")
time.sleep(2)
print("clock zeta current time (s): {time}".format(time = zeta.time()))
print("\nclocks full printout:\n")
shijian.clocks.printout(style = "full")
print("clocks statistics printout:\n")
shijian.clocks.printout()
def function_1():
function_name = inspect.stack()[0][3]
clock = shijian.Clock(name = function_name)
print("initiate {function_name}".format(function_name = function_name))
time.sleep(3)
print("terminate {function_name}".format(function_name = function_name))
clock.stop()
return(3)
@shijian.timer
def function_2():
function_name = inspect.stack()[0][3]
print("initiate {function_name}".format(function_name = function_name))
time.sleep(4)
print("terminate {function_name}".format(function_name = function_name))
return(4)
if __name__ == '__main__':
main()
| gpl-3.0 | -61,446,250,638,534,950 | 43.328 | 80 | 0.484931 | false |
tstapler/Access-Plus-Schedule-Parser | web/ScheduleViewer/migrations/0001_initial.py | 1 | 2389 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Course',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),
('course_name', models.CharField(max_length=30)),
('course_number', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='MeetingTime',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),
('days', models.CharField(max_length=2, choices=[('MO', 'Monday'), ('TU', 'Tuesday'), ('WE', 'WEDNESDAY'), ('TH', 'Thursday'), ('FR', 'Friday'), ('SA', 'Saturday')])),
('time', models.DateField()),
('instructor', models.CharField(max_length=30)),
('location', models.CharField(max_length=30)),
('course', models.ForeignKey(to='ScheduleViewer.Course')),
],
),
migrations.CreateModel(
name='Schedule',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),
('semester', models.CharField(max_length=2, choices=[('fall', 'Fall'), ('spring', 'Spring'), ('summer', 'Summer')], default='fall')),
('year', models.DateTimeField()),
],
),
migrations.CreateModel(
name='Student',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),
('first_name', models.CharField(max_length=30)),
('last_name', models.CharField(max_length=30)),
('email', models.EmailField(max_length=254)),
],
),
migrations.AddField(
model_name='schedule',
name='student',
field=models.ForeignKey(to='ScheduleViewer.Student'),
),
migrations.AddField(
model_name='course',
name='schedule',
field=models.ForeignKey(to='ScheduleViewer.Schedule'),
),
]
| mit | 3,464,933,900,472,990,700 | 39.491525 | 183 | 0.52951 | false |
cumc-dbmi/pmi_sprint_reporter | webapi.py | 1 | 2220 | """
Utilities to configure WebAPI (backend for Atlas) to work with the database(s) loaded by reporter and achilles.
This module makes the following assumptions:
* WebAPI section of settings is valid
* The WebAPI database (referred to by `settings.webapi_conn_str`) already contains the application tables
"""
from sqlalchemy import MetaData
from sqlalchemy import Table
from sqlalchemy import create_engine
import run_config
import settings
engine = create_engine(settings.webapi_conn_str)
metadata = MetaData(bind=engine, reflect=True)
source_table = Table('source', metadata, autoload=True)
source_daimon_table = Table('source_daimon', metadata, autoload=True)
def delete_sources():
"""
Remove all records from source and source_daimon tables
"""
delete_source_daimon = source_daimon_table.delete()
delete_source = source_table.delete()
engine.execute(delete_source_daimon)
engine.execute(delete_source)
def create_source(hpo_id, hpo_name):
"""
Insert source and source_daimon records associated with an HPO
:param hpo_id: ID of the HPO (see hpo.csv)
:param hpo_name: Name of the HPO (see hpo.csv)
"""
source_row = dict(SOURCE_NAME=hpo_name,
SOURCE_KEY=hpo_id,
SOURCE_CONNECTION=settings.cdm_jdbc_conn_str,
SOURCE_DIALECT=run_config.cdm_dialect)
insert_source = source_table.insert().returning(source_table.c.SOURCE_ID).values(source_row)
source_id = engine.execute(insert_source).lastrowid
cdm_daimon_row = dict(source_id=source_id, daimon_type=0, table_qualifier=hpo_id, priority=1)
vocab_daimon_row = dict(source_id=source_id, daimon_type=1, table_qualifier='dbo', priority=1)
results_daimon_row = dict(source_id=source_id, daimon_type=2, table_qualifier=hpo_id, priority=1)
source_daimon_rows = [cdm_daimon_row, vocab_daimon_row, results_daimon_row]
insert_source_daimon = source_daimon_table.insert().values(source_daimon_rows)
engine.execute(insert_source_daimon)
def main():
delete_sources()
for hpo in run_config.all_hpos.to_dict(orient='records'):
create_source(hpo['hpo_id'], hpo['name'])
if __name__ == '__main__':
main()
| mit | 1,704,027,627,111,090,700 | 36 | 111 | 0.703604 | false |
Yelp/pgctl | pgctl/fuser.py | 1 | 2200 | #!/usr/bin/env python2.7
"""\
usage: pgctl-fuser [-d] file [file ...]
Shows the pids (of the current user) that have this file opened.
This is useful for finding which processes hold a file lock (flock).
This has the same behavior as `lsof -t file`, but is *much* faster.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from .debug import trace
def stat(path):
from os import stat
try:
return stat(path)
except EnvironmentError as error:
trace('fuser suppressed: %s', error)
return None
def listdir(path):
from os import listdir
try:
return listdir(path)
except EnvironmentError as error:
trace('fuser suppressed: %s', error)
return ()
def fuser(path, allow_deleted=False):
"""Return the list of pids that have 'path' open, for the current user"""
search = stat(path)
if search is None and not allow_deleted:
return
from glob import glob
for fddir in glob('/proc/*/fd/'):
try:
pid = int(fddir.split('/', 3)[2])
except ValueError:
continue
fds = listdir(fddir)
for fd in fds:
from os.path import join
fd = join(fddir, fd)
found = stat(fd)
if found is None:
# fd disappeared since we listed
continue
if found == search:
yield pid
break
if allow_deleted and found.st_nlink == 0:
from os import readlink
if readlink(fd) == path + ' (deleted)':
yield pid
break
def main(args=None):
from argparse import ArgumentParser
from sys import argv
args = args or argv
parser = ArgumentParser(description=__doc__)
parser.add_argument('-d', '--allow-deleted', action='store_true', help='allow deleted files')
parser.add_argument('file', nargs='+')
args = parser.parse_args(args[1:])
for f in args.file:
for pid in fuser(f, allow_deleted=args.allow_deleted):
print(pid)
if __name__ == '__main__':
exit(main())
| mit | -5,979,769,641,141,754,000 | 25.506024 | 97 | 0.581818 | false |
skosukhin/spack | var/spack/repos/builtin/packages/ea-utils/package.py | 1 | 2144 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class EaUtils(MakefilePackage):
"""Command-line tools for processing biological sequencing data. Barcode
demultiplexing, adapter trimming, etc. Primarily written to support an
Illumina based pipeline - but should work with any FASTQs."""
homepage = "http://expressionanalysis.github.io/ea-utils/"
url = "https://github.com/ExpressionAnalysis/ea-utils/archive/1.04.807.tar.gz"
version('1.04.807', '5972b9f712920603b7527f46c0063a09')
depends_on('subversion')
depends_on('zlib')
depends_on('gsl')
depends_on('bamtools')
# perl module required for make check, which is included in the default
# target
depends_on('perl', type='build')
build_directory = 'clipper'
def edit(self, spec, prefix):
with working_dir('clipper'):
makefile = FileFilter('Makefile')
makefile.filter('/usr', prefix)
| lgpl-2.1 | 620,051,369,482,476,400 | 41.039216 | 82 | 0.67444 | false |
MehdiSfr/tensor-flow | tensorflow/python/training/input.py | 1 | 24193 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Input pipeline.
Please see the [reading data how-to](../../how_tos/reading_data/index.md)
for context.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import summary_ops
from tensorflow.python.ops import variables
from tensorflow.python.training import queue_runner
def match_filenames_once(pattern, name=None):
"""Save the list of files matching pattern, so it is only computed once.
Args:
pattern: A file pattern (glob).
name: A name for the operations (optional).
Returns:
A variable that is initialized to the list of files matching pattern.
"""
with ops.op_scope([pattern], name, "matching_filenames") as name:
return variables.Variable(io_ops.matching_files(pattern), trainable=False,
name=name, validate_shape=False)
def limit_epochs(tensor, num_epochs=None, name=None):
"""Returns tensor `num_epochs` times and then raises an `OutOfRange` error.
Args:
tensor: Any `Tensor`.
num_epochs: An integer (optional). If specified, limits the number
of steps the output tensor may be evaluated.
name: A name for the operations (optional).
Returns:
tensor or `OutOfRange`.
"""
if num_epochs is None:
return tensor
if num_epochs <= 0:
raise ValueError("num_epochs must be > 0 not %d." % num_epochs)
with ops.op_scope([tensor], name, "limit_epochs") as name:
zero64 = constant_op.constant(0, dtype=dtypes.int64)
epochs = variables.Variable(zero64, name="epochs")
counter = epochs.count_up_to(num_epochs)
with ops.control_dependencies([counter]):
return array_ops.identity(tensor, name=name)
def _input_producer(input_tensor, dtype, num_epochs, shuffle, seed, capacity,
name, summary_name):
if shuffle:
input_tensor = random_ops.random_shuffle(input_tensor, seed=seed)
input_tensor = limit_epochs(input_tensor, num_epochs)
q = data_flow_ops.FIFOQueue(capacity=capacity, dtypes=[dtype], shapes=[[]],
name=name)
enq = q.enqueue_many([input_tensor])
queue_runner.add_queue_runner(queue_runner.QueueRunner(q, [enq]))
summary_ops.scalar_summary("queue/%s/%s" % (q.name, summary_name),
math_ops.cast(q.size(), dtypes.float32) *
(1. / capacity))
return q
def string_input_producer(string_tensor, num_epochs=None, shuffle=True,
seed=None, capacity=32, name=None):
"""Output strings (e.g. filenames) to a queue for an input pipeline.
Args:
string_tensor: A 1-D string tensor with the strings to produce.
num_epochs: An integer (optional). If specified, `string_input_producer`
produces each string from `string_tensor` `num_epochs` times before
generating an OutOfRange error. If not specified, `string_input_producer`
can cycle through the strings in `string_tensor` an unlimited number of
times.
shuffle: Boolean. If true, the strings are randomly shuffled within each
epoch.
seed: An integer (optional). Seed used if shuffle == True.
capacity: An integer. Sets the queue capacity.
name: A name for the operations (optional).
Returns:
A queue with the output strings. A `QueueRunner` for the Queue
is added to the current `Graph`'s `QUEUE_RUNNER` collection.
"""
with ops.op_scope([string_tensor], name, "input_producer") as name:
return _input_producer(
string_tensor, dtypes.string, num_epochs, shuffle, seed, capacity, name,
"fraction_of_%d_full" % capacity)
def range_input_producer(limit, num_epochs=None, shuffle=True, seed=None,
capacity=32, name=None):
"""Produces the integers from 0 to limit-1 in a queue.
Args:
limit: An int32 scalar tensor.
num_epochs: An integer (optional). If specified, `range_input_producer`
produces each integer `num_epochs` times before generating an
OutOfRange error. If not specified, `range_input_producer` can cycle
through the integers an unlimited number of times.
shuffle: Boolean. If true, the integers are randomly shuffled within each
epoch.
seed: An integer (optional). Seed used if shuffle == True.
capacity: An integer. Sets the queue capacity.
name: A name for the operations (optional).
Returns:
A Queue with the output integers. A `QueueRunner` for the Queue
is added to the current `Graph`'s `QUEUE_RUNNER` collection.
"""
with ops.op_scope([limit], name, "input_producer") as name:
range_tensor = math_ops.range(limit)
return _input_producer(
range_tensor, dtypes.int32, num_epochs, shuffle, seed, capacity, name,
"fraction_of_%d_full" % capacity)
def slice_input_producer(tensor_list, num_epochs=None, shuffle=True, seed=None,
capacity=32, name=None):
"""Produces a slice of each `Tensor` in `tensor_list`.
Implemented using a Queue -- a `QueueRunner` for the Queue
is added to the current `Graph`'s `QUEUE_RUNNER` collection.
Args:
tensor_list: A list of `Tensor` objects. Every `Tensor` in
`tensor_list` must have the same size in the first dimension.
num_epochs: An integer (optional). If specified, `slice_input_producer`
produces each slice `num_epochs` times before generating
an `OutOfRange` error. If not specified, `slice_input_producer` can cycle
through the slices an unlimited number of times.
seed: An integer (optional). Seed used if shuffle == True.
capacity: An integer. Sets the queue capacity.
name: A name for the operations (optional).
Returns:
A list of tensors, one for each element of `tensor_list`. If the tensor
in `tensor_list` has shape `[N, a, b, .., z]`, then the corresponding output
tensor will have shape `[a, b, ..., z]`.
"""
with ops.op_scope(tensor_list, name, "input_producer"):
tensor_list = ops.convert_n_to_tensor_or_indexed_slices(tensor_list)
if not tensor_list:
raise ValueError(
"Expected at least one tensor in slice_input_producer().")
range_size = array_ops.shape(tensor_list[0])[0]
# TODO(josh11b): Add an assertion that the first dimension of
# everything in TensorList matches. Maybe just check the inferred shapes?
queue = range_input_producer(range_size, num_epochs=num_epochs,
shuffle=shuffle, seed=seed, capacity=capacity)
index = queue.dequeue()
output = [array_ops.gather(t, index) for t in tensor_list]
return output
# Helpers for the batching functions ------------------------------------------
def _flatten(tensor_list_list):
return [tensor for tensor_list in tensor_list_list for tensor in tensor_list]
def _validate(tensor_list):
tensor_list = ops.convert_n_to_tensor_or_indexed_slices(tensor_list)
if not tensor_list:
raise ValueError("Expected at least one tensor in batch().")
return tensor_list
def _validate_join(tensor_list_list):
tensor_list_list = [ops.convert_n_to_tensor_or_indexed_slices(tl)
for tl in tensor_list_list]
if not tensor_list_list:
raise ValueError("Expected at least one input in batch_join().")
return tensor_list_list
def _dtypes(tensor_list_list):
all_types = [[t.dtype for t in tl] for tl in tensor_list_list]
types = all_types[0]
for other_types in all_types[1:]:
if other_types != types:
raise TypeError("Expected types to be consistent: %s vs. %s." %
", ".join(x.name for x in types),
", ".join(x.name for x in other_types))
return types
def _merge_shapes(shape_list, enqueue_many):
shape_list = [tensor_shape.as_shape(s) for s in shape_list]
if enqueue_many:
# We want the shapes without the leading batch dimension.
shape_list = [s.with_rank_at_least(1)[1:] for s in shape_list]
merged_shape = shape_list[0]
for s in shape_list[1:]:
merged_shape.merge_with(s)
return merged_shape.as_list()
def _shapes(tensor_list_list, shapes, enqueue_many):
if shapes is None:
l = len(tensor_list_list[0])
shapes = [_merge_shapes(
[tl[i].get_shape().as_list() for tl in tensor_list_list], enqueue_many)
for i in xrange(l)]
return shapes
def _enqueue_join(queue, tensor_list_list, enqueue_many):
if enqueue_many:
enqueue_ops = [queue.enqueue_many(tl) for tl in tensor_list_list]
else:
enqueue_ops = [queue.enqueue(tl) for tl in tensor_list_list]
queue_runner.add_queue_runner(queue_runner.QueueRunner(queue, enqueue_ops))
def _enqueue(queue, tensor_list, threads, enqueue_many):
if enqueue_many:
enqueue_ops = [queue.enqueue_many(tensor_list)] * threads
else:
enqueue_ops = [queue.enqueue(tensor_list)] * threads
queue_runner.add_queue_runner(queue_runner.QueueRunner(queue, enqueue_ops))
# Batching functions ----------------------------------------------------------
def batch(tensor_list, batch_size, num_threads=1, capacity=32,
enqueue_many=False, shapes=None, name=None):
"""Creates batches of tensors in `tensor_list`.
This function is implemented using a queue. A `QueueRunner` for the
queue is added to the current `Graph`'s `QUEUE_RUNNER` collection.
If `enqueue_many` is `False`, `tensor_list` is assumed to represent a
single example. An input tensor with shape `[x, y, z]` will be output
as a tensor with shape `[batch_size, x, y, z]`.
If `enqueue_many` is `True`, `tensor_list` is assumed to represent a
batch of examples, where the first dimension is indexed by example,
and all members of `tensor_list` should have the same size in the
first dimension. If an input tensor has shape `[*, x, y, z]`, the
output will have shape `[batch_size, x, y, z]`. The `capacity` argument
controls the how long the prefetching is allowed to grow the queues.
The returned operation is a dequeue operation and will throw
`tf.errors.OutOfRangeError` if the input queue is exhausted. If this
operation is feeding another input queue, its queue runner will catch
this exception, however, if this operation is used in your main thread
you are responsible for catching this yourself.
*N.B.:* You must ensure that either (i) the `shapes` argument is
passed, or (ii) all of the tensors in `tensor_list` must have
fully-defined shapes. `ValueError` will be raised if neither of
these conditions holds.
Args:
tensor_list: The list of tensors to enqueue.
batch_size: The new batch size pulled from the queue.
num_threads: The number of threads enqueuing `tensor_list`.
capacity: An integer. The maximum number of elements in the queue.
enqueue_many: Whether each tensor in `tensor_list` is a single example.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensor_list`.
name: (Optional) A name for the operations.
Returns:
A list of tensors with the same number and types as `tensor_list`.
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensor_list`.
"""
with ops.op_scope(tensor_list, name, "batch") as name:
tensor_list = _validate(tensor_list)
types = _dtypes([tensor_list])
shapes = _shapes([tensor_list], shapes, enqueue_many)
# TODO(josh11b,mrry): Switch to BatchQueue once it is written.
queue = data_flow_ops.FIFOQueue(
capacity=capacity, dtypes=types, shapes=shapes)
_enqueue(queue, tensor_list, num_threads, enqueue_many)
summary_ops.scalar_summary(
"queue/%s/fraction_of_%d_full" % (queue.name, capacity),
math_ops.cast(queue.size(), dtypes.float32) * (1. / capacity))
return queue.dequeue_many(batch_size, name=name)
# TODO(josh11b): Add a thread_multiplier or num_threads (that has to be
# a multiple of len(tensor_list_list)?) parameter, to address the use
# case where you want more parallelism than you can support different
# readers (either because you don't have that many files or can't
# read that many files in parallel due to the number of seeks required).
# Once this is done, batch() can be written as a call to batch_join().
def batch_join(tensor_list_list, batch_size, capacity=32, enqueue_many=False,
shapes=None, name=None):
"""Runs a list of tensors to fill a queue to create batches of examples.
Enqueues a different list of tensors in different threads.
Implemented using a queue -- a `QueueRunner` for the queue
is added to the current `Graph`'s `QUEUE_RUNNER` collection.
`len(tensor_list_list)` threads will be started,
with thread `i` enqueuing the tensors from
`tensor_list_list[i]`. `tensor_list_list[i1][j]` must match
`tensor_list_list[i2][j]` in type and shape, except in the first
dimension if `enqueue_many` is true.
If `enqueue_many` is `False`, each `tensor_list_list[i]` is assumed
to represent a single example. An input tensor `x` will be output as a
tensor with shape `[batch_size] + x.shape`.
If `enqueue_many` is `True`, `tensor_list_list[i]` is assumed to
represent a batch of examples, where the first dimension is indexed
by example, and all members of `tensor_list_list[i]` should have the
same size in the first dimension. The slices of any input tensor
`x` are treated as examples, and the output tensors will have shape
`[batch_size] + x.shape[1:]`.
The `capacity` argument controls the how long the prefetching is allowed to
grow the queues.
The returned operation is a dequeue operation and will throw
`tf.errors.OutOfRangeError` if the input queue is exhausted. If this
operation is feeding another input queue, its queue runner will catch
this exception, however, if this operation is used in your main thread
you are responsible for catching this yourself.
*N.B.:* You must ensure that either (i) the `shapes` argument is
passed, or (ii) all of the tensors in `tensor_list_list` must have
fully-defined shapes. `ValueError` will be raised if neither of
these conditions holds.
Args:
tensor_list_list: A list of tuples of tensors to enqueue.
batch_size: An integer. The new batch size pulled from the queue.
capacity: An integer. The maximum number of elements in the queue.
enqueue_many: Whether each tensor in `tensor_list_list` is a single
example.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensor_list_list[i]`.
name: (Optional) A name for the operations.
Returns:
A list of tensors with the same number and types as
`tensor_list_list[i]`.
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensor_list_list`.
"""
with ops.op_scope(_flatten(tensor_list_list), name, "batch_join") as name:
tensor_list_list = _validate_join(tensor_list_list)
types = _dtypes(tensor_list_list)
shapes = _shapes(tensor_list_list, shapes, enqueue_many)
# TODO(josh11b,mrry): Switch to BatchQueue once it is written.
queue = data_flow_ops.FIFOQueue(
capacity=capacity, dtypes=types, shapes=shapes)
_enqueue_join(queue, tensor_list_list, enqueue_many)
summary_ops.scalar_summary(
"queue/%s/fraction_of_%d_full" % (queue.name, capacity),
math_ops.cast(queue.size(), dtypes.float32) * (1. / capacity))
return queue.dequeue_many(batch_size, name=name)
def shuffle_batch(tensor_list, batch_size, capacity, min_after_dequeue,
num_threads=1, seed=None, enqueue_many=False, shapes=None,
name=None):
"""Creates batches by randomly shuffling tensors.
This function adds the following to the current `Graph`:
* A shuffling queue into which tensors from `tensor_list` are enqueued.
* A `dequeue_many` operation to create batches from the queue.
* A `QueueRunner` to `QUEUE_RUNNER` collection, to enqueue the tensors
from `tensor_list`.
If `enqueue_many` is `False`, `tensor_list` is assumed to represent a
single example. An input tensor with shape `[x, y, z]` will be output
as a tensor with shape `[batch_size, x, y, z]`.
If `enqueue_many` is `True`, `tensor_list` is assumed to represent a
batch of examples, where the first dimension is indexed by example,
and all members of `tensor_list` should have the same size in the
first dimension. If an input tensor has shape `[*, x, y, z]`, the
output will have shape `[batch_size, x, y, z]`.
The `capacity` argument controls the how long the prefetching is allowed to
grow the queues.
The returned operation is a dequeue operation and will throw
`tf.errors.OutOfRangeError` if the input queue is exhausted. If this
operation is feeding another input queue, its queue runner will catch
this exception, however, if this operation is used in your main thread
you are responsible for catching this yourself.
For example:
```python
# Creates batches of 32 images and 32 labels.
image_batch, label_batch = tf.train.shuffle_batch(
[single_image, single_label],
batch_size=32,
num_threads=4,
capacity=50000,
min_after_dequeue=10000)
```
*N.B.:* You must ensure that either (i) the `shapes` argument is
passed, or (ii) all of the tensors in `tensor_list` must have
fully-defined shapes. `ValueError` will be raised if neither of
these conditions holds.
Args:
tensor_list: The list of tensors to enqueue.
batch_size: The new batch size pulled from the queue.
capacity: An integer. The maximum number of elements in the queue.
min_after_dequeue: Minimum number elements in the queue after a
dequeue, used to ensure a level of mixing of elements.
num_threads: The number of threads enqueuing `tensor_list`.
seed: Seed for the random shuffling within the queue.
enqueue_many: Whether each tensor in `tensor_list` is a single example.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensor_list`.
name: (Optional) A name for the operations.
Returns:
A list of tensors with the same number and types as `tensor_list`.
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensor_list`.
"""
with ops.op_scope(tensor_list, name, "shuffle_batch") as name:
tensor_list = _validate(tensor_list)
types = _dtypes([tensor_list])
shapes = _shapes([tensor_list], shapes, enqueue_many)
queue = data_flow_ops.RandomShuffleQueue(
capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed,
dtypes=types, shapes=shapes)
_enqueue(queue, tensor_list, num_threads, enqueue_many)
full = (math_ops.cast(math_ops.maximum(0, queue.size() - min_after_dequeue),
dtypes.float32) *
(1. / (capacity - min_after_dequeue)))
# Note that name contains a '/' at the end so we intentionally do not place
# a '/' after %s below.
summary_name = (
"queue/%sfraction_over_%d_of_%d_full" %
(name, min_after_dequeue, capacity - min_after_dequeue))
summary_ops.scalar_summary(summary_name, full)
return queue.dequeue_many(batch_size, name=name)
def shuffle_batch_join(tensor_list_list, batch_size, capacity,
min_after_dequeue, seed=None, enqueue_many=False,
shapes=None, name=None):
"""Create batches by randomly shuffling tensors.
This version enqueues a different list of tensors in different threads.
It adds the following to the current `Graph`:
* A shuffling queue into which tensors from `tensor_list_list` are enqueued.
* A `dequeue_many` operation to create batches from the queue.
* A `QueueRunner` to `QUEUE_RUNNER` collection, to enqueue the tensors
from `tensor_list_list`.
`len(tensor_list_list)` threads will be started, with thread `i` enqueuing
the tensors from `tensor_list_list[i]`. `tensor_list_list[i1][j]` must match
`tensor_list_list[i2][j]` in type and shape, except in the first dimension if
`enqueue_many` is true.
If `enqueue_many` is `False`, each `tensor_list_list[i]` is assumed
to represent a single example. An input tensor with shape `[x, y,
z]` will be output as a tensor with shape `[batch_size, x, y, z]`.
If `enqueue_many` is `True`, `tensor_list_list[i]` is assumed to
represent a batch of examples, where the first dimension is indexed
by example, and all members of `tensor_list_list[i]` should have the
same size in the first dimension. If an input tensor has shape `[*, x,
y, z]`, the output will have shape `[batch_size, x, y, z]`.
The `capacity` argument controls the how long the prefetching is allowed to
grow the queues.
The returned operation is a dequeue operation and will throw
`tf.errors.OutOfRangeError` if the input queue is exhausted. If this
operation is feeding another input queue, its queue runner will catch
this exception, however, if this operation is used in your main thread
you are responsible for catching this yourself.
Args:
tensor_list_list: A list of tuples of tensors to enqueue.
batch_size: An integer. The new batch size pulled from the queue.
capacity: An integer. The maximum number of elements in the queue.
min_after_dequeue: Minimum number elements in the queue after a
dequeue, used to ensure a level of mixing of elements.
seed: Seed for the random shuffling within the queue.
enqueue_many: Whether each tensor in `tensor_list_list` is a single
example.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensor_list_list[i]`.
name: (Optional) A name for the operations.
Returns:
A list of tensors with the same number and types as `tensor_list_list[i]`.
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensor_list_list`.
"""
with ops.op_scope(
_flatten(tensor_list_list), name, "shuffle_batch_join") as name:
tensor_list_list = _validate_join(tensor_list_list)
types = _dtypes(tensor_list_list)
shapes = _shapes(tensor_list_list, shapes, enqueue_many)
queue = data_flow_ops.RandomShuffleQueue(
capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed,
dtypes=types, shapes=shapes)
_enqueue_join(queue, tensor_list_list, enqueue_many)
full = (math_ops.cast(math_ops.maximum(0, queue.size() - min_after_dequeue),
dtypes.float32) *
(1. / (capacity - min_after_dequeue)))
# Note that name contains a '/' at the end so we intentionally do not place
# a '/' after %s below.
summary_name = (
"queue/%sfraction_over_%d_of_%d_full" %
(name, min_after_dequeue, capacity - min_after_dequeue))
summary_ops.scalar_summary(summary_name, full)
return queue.dequeue_many(batch_size, name=name)
| apache-2.0 | -4,704,024,588,663,581,000 | 42.201786 | 80 | 0.69144 | false |
rsenk330/Flask-Cake | flask_cake/tests/test_cake.py | 1 | 1572 | import pytest
try:
from unittest import mock # Python 3
except ImportError:
import mock
@pytest.fixture
def app(tmpdir):
from flask import Flask
root_path = tmpdir.ensure("test-proj", dir=True)
tmpdir.ensure("test-proj/static/coffee", dir=True)
p = tmpdir.join("test-proj/static/coffee", "Cakefile")
p.write("")
app = Flask(__name__)
app.root_path = root_path.strpath
return app
def test_cake_init(app):
from flask_cake import Cake
cake = Cake(app)
assert cake.app == app
assert cake.tasks == ["build"]
assert cake.cake_parent == "coffee"
def test_watchdog(app, tmpdir):
from flask_cake import Cake
with mock.patch("watchdog.observers.Observer.schedule") as mock_schedule:
Cake(app)
cake_dir = tmpdir.join("test-proj/static/coffee").strpath
mock_schedule.assert_called_once_with(mock.ANY, path=cake_dir, recursive=True)
def test_events_on_any_event(app):
from flask_cake.cake import Events
e = Events(app.root_path, tasks=["build"])
with mock.patch("flask_cake.cake.subprocess") as subprocess:
e.on_any_event(None)
subprocess.Popen.assert_called_once_with(["cake", "build"], cwd=app.root_path, stdout=mock.ANY)
def test_events_on_any_event_str(app):
from flask_cake.cake import Events
e = Events(app.root_path, tasks="build")
with mock.patch("flask_cake.cake.subprocess") as subprocess:
e.on_any_event(None)
subprocess.Popen.assert_called_once_with(["cake", "build"], cwd=app.root_path, stdout=mock.ANY)
| bsd-2-clause | 9,065,584,534,477,928,000 | 26.103448 | 103 | 0.670483 | false |
OpenTrons/opentrons_sdk | api/tests/opentrons/calibration/tip_length/test_tip_length_calibration.py | 1 | 1461 | import pytest
from typing import List, Tuple
from opentrons.calibration.tip_length import state_machine
valid_transitions: List[Tuple[str, str, str]] = [
('loadLabware', 'sessionStarted', 'labwareLoaded'),
('moveToMeasureNozzleOffset', 'labwareLoaded', 'measuringNozzleOffset'),
('jog', 'measuringNozzleOffset', 'measuringNozzleOffset'),
('saveNozzlePosition', 'measuringNozzleOffset', 'preparingPipette'),
('jog', 'preparingPipette', 'preparingPipette'),
('pickUpTip', 'preparingPipette', 'inspectingTip'),
('invalidateTip', 'inspectingTip', 'preparingPipette'),
('confirmTip', 'inspectingTip', 'measuringTipOffset'),
('jog', 'measuringTipOffset', 'measuringTipOffset'),
('saveTipPosition', 'measuringTipOffset', 'calibrationComplete'),
('exitSession', 'calibrationComplete', 'sessionExited'),
('exitSession', 'sessionStarted', 'sessionExited'),
('exitSession', 'labwareLoaded', 'sessionExited'),
('exitSession', 'measuringNozzleOffset', 'sessionExited'),
('exitSession', 'preparingPipette', 'sessionExited'),
('exitSession', 'inspectingTip', 'sessionExited'),
('exitSession', 'measuringTipOffset', 'sessionExited'),
]
@pytest.mark.parametrize('trigger,from_state,to_state', valid_transitions)
async def test_valid_transitions(trigger, from_state, to_state):
sm = state_machine.TipCalibrationStateMachine(initial_state=from_state)
await sm.trigger_transition(trigger)
assert sm.current_state_name == to_state
| apache-2.0 | 4,413,712,137,367,218,000 | 44.65625 | 75 | 0.740589 | false |
jackliusr/scrapy-crawlers | crawlers/crawlers/spiders/lq7m.py | 1 | 2098 |
import scrapy
from selenium import webdriver
from scrapy.http import JsonRequest, Request
from scrapy import Selector
import time
import json
class Lq7mSpider(scrapy.Spider):
name = 'lq7m'
start_urls = ['http://lq.7m.com.cn/list/3/2.shtml']
custom_settings = {
'DATABASEPIPELINE_ENABLED': True,
}
def start_requests(self):
for i in range(2,200):
yield Request(url=f"http://lq.7m.com.cn/list/3/{i}.shtml", callback=self.parseList)
def parseList(self, response):
sel = Selector(response)
urls = sel.xpath("//div[@class=\"cb_l\"]//a[contains(@href, '/news/')]/@href").extract()
for url in urls:
yield Request(url=f"http://lq.7m.com.cn{url}",callback=self.parsePage)
def parsePage(self,response):
sel = Selector(response)
title = ''.join(sel.xpath("//div[@class=\"pa_tec\"]/h1/text()").extract()).strip ()
content = ''.join(sel.xpath("//div[@class=\"n_zi\"]//text()").extract()).strip()
pubTimeTmp = (sel.xpath("//div[@class=\"pa_tek\"]/div[@class=\"pa_tec\"]/p[1]/text()").extract_first())
pubTime = pubTimeTmp[15:26]
keywords = sel.xpath("//meta[@name='keywords']/@content")[0].extract()
description = sel.xpath("//meta[@name='description']/@content")[0].extract()
image= sel.xpath("//div[@class=\"n_zi\"]//img[1]/@src")
category = 2
if image:
image_url = f"http://lq.7m.com.cn{image[0].extract()}"
yield {
"title": title,
"content": content,
"pubTime": pubTime,
"keywords": keywords,
"description": description,
'category': category,
"images": [image_url],
"image_urls": [image_url],
}
else:
yield {
"title": title,
"content": content,
"pubTime": pubTime,
"keywords": keywords,
"description": description,
'category': category,
}
| apache-2.0 | 6,463,469,120,809,064,000 | 36.464286 | 111 | 0.531935 | false |
ColdenCullen/d2dl | enki2/enkilib/python/parser.py | 1 | 5204 | #
# Copyright (c) 2008 Eric Anderton
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
class Parser:
def parse(self,input):
self.input = input
self.position = 0
return self.parse_Syntax()
def eoi(self):
if self.position >= len(self.input):
return True
return False
def any(self):
if self.position >= len(self.input):
return False
self.position = self.position + 1
return True
def peek(self):
if self.position >= len(self.input):
return ""
else:
return self.input[self.position]
def DEBUG(self,text=""):
def inner():
print text,self.position,self.input[self.position:]
return True
return inner
def REQUIRED(self,text,term=None):
def inner():
if term != None and term():
return True
raise ParseException(text,self.position,self.peek())
return inner
def TERMINAL(self,value,err=None):
def inner():
#print "len: ",len(self.input)," pos: ",self.position,"(",self.input[self.position:],") val: ",value
if self.position == len(self.input):
if err != None:
raise ParseException(text,self.position,self.peek())
return False
if self.input[self.position:].startswith(value):
self.position += len(value);
#print "matched: ",value," moved to: ",self.position
return True
return False
return inner
def RANGE(self,start,end):
def inner():
#print "len: ",len(self.input)," pos: ",self.position,"(",self.input[self.position:],") range: ",start,"-",end,
if self.position == len(self.input):
return False
ch = self.input[self.position]
if ch >= start[0] and ch <= end[0]:
self.position = self.position + 1
#print "matched: ",start,"-",end," moved to: ",self.position
return True
return False
return inner
def AND(self,*args):
def inner():
pos = self.position
for term in args:
if not term():
self.position = pos
return False
return True
return inner
def OR(self,*args):
def inner():
for term in args:
if term():
return True
return False
return inner
def OPTIONAL(self,term):
def inner():
term()
return True
return inner
def NOT(self,term):
def inner():
pos = self.position
if term():
self.position = pos
return False
return True
return inner
def ZEROORMORE(self,term,terminator = None,err=None):
def inner():
if terminator == None:
while(not self.eoi() and term()):
pass
else:
while(not self.eoi() and not terminator() and term()):
pass
return True
return inner
def ONEORMORE(self,term,terminator = None):
def inner():
pos = self.position
if terminator and terminator():
self.position = pos
return False
if not term():
self.position = pos
return False
if terminator == None:
while(not self.eoi() and term()):
pass
else:
while(not self.eoi() and not terminator() and term()):
pass
return True
return inner | mit | 5,033,403,532,751,314,000 | 32.939597 | 123 | 0.511337 | false |
davibe/cerbero | cerbero/build/filesprovider.py | 1 | 10801 | # cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import os
import inspect
from cerbero.config import Platform
from cerbero.utils import shell
class FilesProvider(object):
'''
List files by categories using class attributes named files_$category and
platform_files_$category
'''
LIBS_CAT = 'libs'
BINS_CAT = 'bins'
PY_CAT = 'python'
DEVEL_CAT = 'devel'
LANG_CAT = 'lang'
TYPELIB_CAT = 'typelibs'
EXTENSIONS = {
Platform.WINDOWS: {'bext': '.exe', 'sext': '*-*.dll', 'sdir': 'bin',
'mext': '.dll', 'smext': '.a', 'pext': '.pyd', 'srext': '.dll'},
Platform.LINUX: {'bext': '', 'sext': '.so.*', 'sdir': 'lib',
'mext': '.so', 'smext': '.a', 'pext': '.so', 'srext': '.so'},
Platform.ANDROID: {'bext': '', 'sext': '.so.*', 'sdir': 'lib',
'mext': '.so', 'smext': '.a', 'pext': '.so', 'srext': '.so'},
Platform.DARWIN: {'bext': '', 'sext': '.*.dylib', 'sdir': 'lib',
'mext': '.so', 'smext': '.a', 'pext': '.so', 'srext': '.dylib'},
Platform.IOS: {'bext': '', 'sext': '.*.dylib', 'sdir': 'lib',
'mext': '.so', 'smext': '.a', 'pext': '.so', 'srext': '.dylib'}}
def __init__(self, config):
self.config = config
self.platform = config.target_platform
self.extensions = self.EXTENSIONS[self.platform]
self.py_prefix = config.py_prefix
self.categories = self._files_categories()
self._searchfuncs = {self.LIBS_CAT: self._search_libraries,
self.BINS_CAT: self._search_binaries,
self.PY_CAT: self._search_pyfiles,
self.LANG_CAT: self._search_langfiles,
self.TYPELIB_CAT: self._search_typelibfiles,
'default': self._search_files}
def devel_files_list(self):
'''
Return the list of development files, which consists in the files and
directories listed in the 'devel' category and the link libraries .a,
.la and .so from the 'libs' category
'''
devfiles = self.files_list_by_category(self.DEVEL_CAT)
devfiles.extend(self.files_list_by_category(self.TYPELIB_CAT))
devfiles.extend(self._search_devel_libraries())
return sorted(list(set(devfiles)))
def dist_files_list(self):
'''
Return the list of files that should be included in a distribution
tarball, which include all files except the development files
'''
return self.files_list_by_categories(
[x for x in self.categories if x != self.DEVEL_CAT])
def files_list(self):
'''
Return the complete list of files
'''
files = self.dist_files_list()
files.extend(self.devel_files_list())
return sorted(list(set(files)))
def files_list_by_categories(self, categories):
'''
Return the list of files in a list categories
'''
files = []
for cat in categories:
files.extend(self._list_files_by_category(cat))
return sorted(list(set(files)))
def files_list_by_category(self, category):
'''
Return the list of files in a given category
'''
return self.files_list_by_categories([category])
def libraries(self):
'''
Return a list of the libraries
'''
return self.files_list_by_category(self.LIBS_CAT)
def use_gobject_introspection(self):
return self.TYPELIB_CAT in self._files_categories()
def _files_categories(self):
''' Get the list of categories available '''
categories = []
for name, value in inspect.getmembers(self):
if (isinstance(value, list) or isinstance(value, dict)):
if name.startswith('files_'):
categories.append(name.split('files_')[1])
if name.startswith('platform_files_'):
categories.append(name.split('platform_files_')[1])
return sorted(list(set(categories)))
def _get_category_files_list(self, category):
'''
Get the raw list of files in a category, without pattern match nor
extensions replacement, which should be done in the search function
'''
files = []
for attr in dir(self):
if attr.startswith('files_') and attr.endswith('_' + category):
files.extend(getattr(self, attr))
if attr.startswith('platform_files_') and \
attr.endswith('_' + category):
files.extend(getattr(self, attr).get(self.platform, []))
return files
def _list_files_by_category(self, category):
search_category = category
if category.startswith(self.LIBS_CAT + '_'):
search_category = self.LIBS_CAT
search = self._searchfuncs.get(search_category,
self._searchfuncs['default'])
return search(self._get_category_files_list(category))
def _search_files(self, files):
'''
Search files in the prefix, doing the extension replacements and
listing directories
'''
# replace extensions
fs = [f % self.extensions for f in files]
# fill directories
dirs = [x for x in fs if
os.path.isdir(os.path.join(self.config.prefix, x))]
for directory in dirs:
fs.remove(directory)
fs.extend(self._ls_dir(os.path.join(self.config.prefix,
directory)))
# fill paths with pattern expansion *
paths = [x for x in fs if '*' in x]
if len(paths) != 0:
for path in paths:
fs.remove(path)
fs.extend(shell.ls_files(paths, self.config.prefix))
return fs
def _search_binaries(self, files):
'''
Search binaries in the prefix. This function doesn't do any real serach
like the others, it only preprend the bin/ path and add the binary
extension to the given list of files
'''
binaries = []
for f in files:
self.extensions['file'] = f
binaries.append('bin/%(file)s%(bext)s' % self.extensions)
return binaries
def _search_libraries(self, files):
'''
Search libraries in the prefix. Unfortunately the filename might vary
depending on the platform and we need to match the library name and
it's extension. There is a corner case on windows where a libray might
be named libfoo.dll or libfoo-1.dll
'''
if len(files) == 0:
return []
dlls = []
# on windows check libfoo.dll too instead of only libfoo-x.dll
if self.config.target_platform == Platform.WINDOWS:
pattern = '%(sdir)s/%%s.dll' % self.extensions
for f in files:
path = os.path.join(self.config.prefix, pattern % f)
if os.path.exists(path):
dlls.append(pattern % f)
files = list(set(files) - set(dlls))
pattern = '%(sdir)s/%(file)s%(sext)s'
libsmatch = []
for f in files:
self.extensions['file'] = f
libsmatch.append(pattern % self.extensions)
return shell.ls_files(libsmatch, self.config.prefix) + dlls
def _search_pyfiles(self, files):
'''
Search for python files in the prefix. This function doesn't do any
real search, it only preprend the lib/Python$PYVERSION/site-packages/
path to the given list of files
'''
pyfiles = []
for f in files:
f = f % self.extensions
f = '%s/%s' % (self.py_prefix, f)
pyfiles.append(f)
if f.endswith('.py'):
for e in ['o', 'c']:
fe = f + e
if os.path.exists(os.path.join(self.config.prefix, fe)):
pyfiles.append(fe)
return pyfiles
def _search_langfiles(self, files):
'''
Search for translations in share/locale/*/LC_MESSAGES/ '
'''
pattern = 'share/locale/*/LC_MESSAGES/%s.mo'
return shell.ls_files([pattern % x for x in files],
self.config.prefix)
def _search_typelibfiles(self, files):
'''
Search for typelibs in lib/girepository-1.0/
'''
if not self.config.variants.gi:
return []
pattern = 'lib/girepository-1.0/%s.typelib'
return shell.ls_files([pattern % x for x in files],
self.config.prefix)
def _search_devel_libraries(self):
devel_libs = []
for category in self.categories:
if category != self.LIBS_CAT and \
not category.startswith(self.LIBS_CAT + '_'):
continue
pattern = 'lib/%(f)s.a lib/%(f)s.la '
if self.platform == Platform.LINUX:
pattern += 'lib/%(f)s.so '
elif self.platform == Platform.WINDOWS:
pattern += 'lib/%(f)s.dll.a '
pattern += 'lib/%(f)s.def '
pattern += 'lib/%(fnolib)s.lib '
elif self.platform in [Platform.DARWIN, Platform.IOS]:
pattern += 'lib/%(f)s.dylib '
libsmatch = [pattern % {'f': x, 'fnolib': x[3:]} for x in
self._get_category_files_list(category)]
devel_libs.extend(shell.ls_files(libsmatch, self.config.prefix))
return devel_libs
def _ls_dir(self, dirpath):
files = []
for root, dirnames, filenames in os.walk(dirpath):
_root = root.split(self.config.prefix)[1]
if _root[0] == '/':
_root = _root[1:]
files.extend([os.path.join(_root, x) for x in filenames])
return files
| lgpl-2.1 | -8,284,127,028,015,997,000 | 37.852518 | 79 | 0.561522 | false |
unioslo/cerebrum | Cerebrum/modules/cis/NMHIndividuation.py | 1 | 5117 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011 University of Oslo, Norway
#
# This file is part of Cerebrum.
#
# Cerebrum is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Cerebrum is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cerebrum; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Functionality for the Individuation project that is specific to NMH.
"""
from __future__ import unicode_literals
import cereconf
from Cerebrum import Errors
from Cerebrum.Utils import Factory
from Cerebrum.modules.cis import Individuation
class Individuation(Individuation.Individuation):
"""NMH specific behaviour for the individuation service."""
# The subject of the warning e-mails
email_subject = 'Failed password recovery attempt'
# The signature of the warning e-mails
email_signature = 'Høgskolen i Harstad'
# The from address
email_from = '[email protected]'
# The feedback messages for NMH
messages = {
'error_unknown': {'en': u'An unknown error occured',
'no': u'En ukjent feil oppstod'},
'person_notfound': {'en': u'Could not find a person by given data, please try again. Please note that you will ' +
u'not be able to use this service if you are reserved from being published on NMH\'s ' +
u'web pages.',
'no': (u'Kunne ikke finne personen ut fra oppgitte data, vennligst prøv igjen. Merk at du ' +
u'ikke kan bruke denne tjenesten om du har reservert deg fra å bli publisert på NMH ' +
u'sine nettsider.')},
'person_notfound_usernames':
{'en': u'You are either reserved or have given wrong information.'
u' If you are reserved, an SMS have been sent to you, as'
u' long as your cell phone number is registered in our'
u' systems.',
'no': u'Du er reservert eller har gitt feil info. Hvis du er'
u' reservert skal du nå ha mottatt en SMS, såfremt ditt'
u' mobilnummer er registrert i våre systemer.'},
'person_miss_info': {'en': u'Not all your information is available. Please contact your HR department or student office.',
'no': u'Ikke all din informasjon er tilgjengelig. Vennligst ta kontakt med din personalavdeling eller studentkontor.'},
'account_blocked': {'en': u'This account is inactive. Please contact your local IT.',
'no': u'Denne brukerkontoen er ikke aktiv. Vennligst ta kontakt med din lokale IT-avdeling.'},
'account_reserved': {'en': u'You are reserved from using this service. Please contact your local IT.',
'no': u'Du er reservert fra å bruke denne tjenesten. Vennligst ta kontakt med din lokale IT-avdeling.'},
'account_self_reserved': {'en': u'You have reserved yourself from using this service. Please contact your local IT.',
'no': u'Du har reservert deg fra å bruke denne tjenesten. Vennligst ta kontakt med din lokale IT-avdeling.'},
'token_notsent': {'en': u'Could not send the one time password to your phone',
'no': u'Kunne ikke sende engangspassord til telefonen'},
'toomanyattempts': {'en': u'Too many attempts. You have temporarily been blocked from this service',
'no': u'For mange forsøk. Du er midlertidig utestengt fra denne tjenesten'},
'toomanyattempts_check': {'en': u'Too many attempts, one time password got invalid',
'no': u'For mange forsøk, engangspassordet er blitt gjort ugyldig'},
'timeout_check': {'en': u'Timeout, one time password got invalid',
'no': u'Tidsavbrudd, engangspassord ble gjort ugyldig'},
'fresh_phonenumber': {'en': u'Your phone number has recently been changed, which can not, due to security reasons, be used ' +
u'in a few days. Please contact your local IT-department.',
'no': u'Ditt mobilnummer er nylig byttet, og kan av sikkerhetsmessige årsaker ikke ' +
u'benyttes før etter noen dager. Vennlighst ta kontakt med din lokale IT-avdeling.'},
'password_invalid': {'en': u'Bad password: %s',
'no': u'Ugyldig passord: %s'},
}
| gpl-2.0 | 7,199,006,522,862,516,000 | 58.348837 | 149 | 0.619122 | false |
benwebber/ansible-tap | library/callback_plugins/tap.py | 1 | 4807 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (
absolute_import,
unicode_literals,
)
import collections
from enum import Enum
import sys
import yaml
from ansible import constants as C
from ansible.executor.task_queue_manager import TaskQueueManager
from ansible.parsing.yaml.dumper import AnsibleDumper
from ansible.plugins.callback import CallbackBase
from ansible.utils.color import stringc
__metaclass__ = type
def indent(text, indent=2, char=' '):
return '\n'.join('{indent}{line}'.format(indent=indent*char, line=line)
for line in text.splitlines())
def dump_yaml(data, **kwargs):
return yaml.dump(data, Dumper=AnsibleDumper, allow_unicode=True,
default_flow_style=False, explicit_start=True,
explicit_end=True, **kwargs).strip()
def clean_tags(tags):
return [tag.lower() for tag in tags]
def is_todo(task):
return Tag.TODO.value in clean_tags(task.tags)
def is_diagnostic(task):
return Tag.DIAGNOSTIC.value in clean_tags(task.tags)
class TestResult(Enum):
PASSED = ('passed',)
FAILED = ('failed',)
EXPECTED = ('expected',)
UNEXPECTED = ('unexpected',)
SKIPPED = ('skipped',)
class Tag(Enum):
TODO = 'todo'
DIAGNOSTIC = 'diagnostic'
class CallbackModule(CallbackBase):
"""
TAP output for Ansible.
"""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'tap'
if sys.stdout.isatty():
OK = stringc('ok', C.COLOR_OK)
NOT_OK = stringc('not ok', C.COLOR_ERROR)
else:
OK = 'ok'
NOT_OK = 'not ok'
def __init__(self):
super(CallbackModule, self).__init__()
# Play stats will include all tasks. We want to exclude setup/teardown
# tasks (tagged with 'diagnostic') from the final test count.
self.counter = collections.Counter()
@classmethod
def ok(cls, result):
"""
Render a passed test.
"""
directive = '# TODO' if is_todo(result._task) else None
description = cls._describe(result)
return cls._tap(cls.OK, description, directive=directive)
@classmethod
def skip(cls, result):
"""
Render a skipped test.
"""
description = cls._describe(result)
reason = result._result.get('skip_reason', result._result.get('skipped_reason', None))
directive = '# SKIP {}'.format(reason) if reason else '# SKIP'
return cls._tap(cls.OK, description, directive=directive)
@classmethod
def not_ok(cls, result):
"""
Render a failed test.
"""
directive = '# TODO' if is_todo(result._task) else None
description = cls._describe(result)
return cls._tap(cls.NOT_OK, description, directive=directive)
@staticmethod
def _describe(result):
"""
Construct a test line description based on the name of the Ansible
module and task name.
"""
description = '{}'.format(result._task.action)
if result._task.name:
description = '{}: {}'.format(description, result._task.name)
return description
@staticmethod
def _tap(status, description, directive=None):
"""
Render a TAP test line.
"""
test_line = '{} - {}'.format(status, description)
if directive:
test_line += ' {}'.format(directive)
lines = [test_line]
return '\n'.join(lines)
def v2_playbook_on_start(self, playbook):
self._display.display('TAP version 13')
def v2_runner_on_failed(self, result, ignore_errors=False):
self._display.display(self.not_ok(result))
# Print reason for failure if this was not an expected failure.
status = TestResult.EXPECTED if is_todo(result._task) else TestResult.FAILED
if status == TestResult.FAILED:
self._display.display(indent(dump_yaml(result._result)))
self.counter.update(status.value)
def v2_runner_on_ok(self, result):
if is_diagnostic(result._task):
self._display.display('# {}'.format(self._describe(result)))
return
status = TestResult.UNEXPECTED if is_todo(result._task) else TestResult.PASSED
self.counter.update(status.value)
self._display.display(self.ok(result))
def v2_runner_on_skipped(self, result):
self._display.display(self.skip(result))
self.counter.update(TestResult.SKIPPED.value)
def v2_playbook_on_stats(self, stats):
self._display.display('1..{}'.format(sum(self.counter.values())))
# Because tests set `ignore_errors`, we need to call exit() ourselves.
if self.counter['failed']:
sys.exit(TaskQueueManager.RUN_FAILED_HOSTS)
| mit | 1,714,229,715,713,902,000 | 29.232704 | 94 | 0.620137 | false |
benagricola/exabgp | lib/exabgp/bgp/message/update/nlri/qualifier/etag.py | 1 | 1397 | # encoding: utf-8
"""
etag.py
Created by Thomas Mangin on 2014-06-26.
Copyright (c) 2014-2015 Orange. All rights reserved.
Copyright (c) 2014-2015 Exa Networks. All rights reserved.
"""
# TODO: take into account E-VPN specs that specify the role of the first bit of ESI
# (since draft-ietf-l2vpn-evpn-05)
from struct import pack
from struct import unpack
class EthernetTag (object):
MAX = pow(2,32)-1
__slots__ = ['tag']
def __init__ (self, tag=0):
self.tag = tag
def __eq__ (self, other):
return self.tag == other.tag
def __neq__ (self, other):
return self.tag != other.tag
def __lt__ (self, other):
raise RuntimeError('comparing EthernetTag for ordering does not make sense')
def __le__ (self, other):
raise RuntimeError('comparing EthernetTag for ordering does not make sense')
def __gt__ (self, other):
raise RuntimeError('comparing EthernetTag for ordering does not make sense')
def __ge__ (self, other):
raise RuntimeError('comparing EthernetTag for ordering does not make sense')
def __str__ (self):
return repr(self.tag)
def __repr__ (self):
return repr(self.tag)
def pack (self):
return pack("!L",self.tag)
def __len__ (self):
return 4
def __hash__ (self):
return hash(self.tag)
@classmethod
def unpack (cls, data):
return cls(unpack("!L",data[:4])[0])
def json (self, compact=None):
return '"ethernet-tag": %s' % self.tag
| bsd-3-clause | 3,194,523,706,247,964,000 | 21.174603 | 83 | 0.67287 | false |
fbradyirl/home-assistant | homeassistant/components/smartthings/smartapp.py | 1 | 16136 | """SmartApp functionality to receive cloud-push notifications."""
import asyncio
import functools
import logging
from urllib.parse import urlparse
from uuid import uuid4
from aiohttp import web
from pysmartapp import Dispatcher, SmartAppManager
from pysmartapp.const import SETTINGS_APP_ID
from pysmartthings import (
APP_TYPE_WEBHOOK,
CAPABILITIES,
CLASSIFICATION_AUTOMATION,
App,
AppOAuth,
AppSettings,
InstalledAppStatus,
SmartThings,
SourceType,
Subscription,
SubscriptionEntity,
)
from homeassistant.components import cloud, webhook
from homeassistant.const import CONF_WEBHOOK_ID
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.typing import HomeAssistantType
from .const import (
APP_NAME_PREFIX,
APP_OAUTH_CLIENT_NAME,
APP_OAUTH_SCOPES,
CONF_APP_ID,
CONF_CLOUDHOOK_URL,
CONF_INSTALLED_APP_ID,
CONF_INSTALLED_APPS,
CONF_INSTANCE_ID,
CONF_LOCATION_ID,
CONF_REFRESH_TOKEN,
DATA_BROKERS,
DATA_MANAGER,
DOMAIN,
SETTINGS_INSTANCE_ID,
SIGNAL_SMARTAPP_PREFIX,
STORAGE_KEY,
STORAGE_VERSION,
)
_LOGGER = logging.getLogger(__name__)
async def find_app(hass: HomeAssistantType, api):
"""Find an existing SmartApp for this installation of hass."""
apps = await api.apps()
for app in [app for app in apps if app.app_name.startswith(APP_NAME_PREFIX)]:
# Load settings to compare instance id
settings = await app.settings()
if (
settings.settings.get(SETTINGS_INSTANCE_ID)
== hass.data[DOMAIN][CONF_INSTANCE_ID]
):
return app
async def validate_installed_app(api, installed_app_id: str):
"""
Ensure the specified installed SmartApp is valid and functioning.
Query the API for the installed SmartApp and validate that it is tied to
the specified app_id and is in an authorized state.
"""
installed_app = await api.installed_app(installed_app_id)
if installed_app.installed_app_status != InstalledAppStatus.AUTHORIZED:
raise RuntimeWarning(
"Installed SmartApp instance '{}' ({}) is not "
"AUTHORIZED but instead {}".format(
installed_app.display_name,
installed_app.installed_app_id,
installed_app.installed_app_status,
)
)
return installed_app
def validate_webhook_requirements(hass: HomeAssistantType) -> bool:
"""Ensure HASS is setup properly to receive webhooks."""
if cloud.async_active_subscription(hass):
return True
if hass.data[DOMAIN][CONF_CLOUDHOOK_URL] is not None:
return True
return get_webhook_url(hass).lower().startswith("https://")
def get_webhook_url(hass: HomeAssistantType) -> str:
"""
Get the URL of the webhook.
Return the cloudhook if available, otherwise local webhook.
"""
cloudhook_url = hass.data[DOMAIN][CONF_CLOUDHOOK_URL]
if cloud.async_active_subscription(hass) and cloudhook_url is not None:
return cloudhook_url
return webhook.async_generate_url(hass, hass.data[DOMAIN][CONF_WEBHOOK_ID])
def _get_app_template(hass: HomeAssistantType):
endpoint = "at " + hass.config.api.base_url
cloudhook_url = hass.data[DOMAIN][CONF_CLOUDHOOK_URL]
if cloudhook_url is not None:
endpoint = "via Nabu Casa"
description = "{} {}".format(hass.config.location_name, endpoint)
return {
"app_name": APP_NAME_PREFIX + str(uuid4()),
"display_name": "Home Assistant",
"description": description,
"webhook_target_url": get_webhook_url(hass),
"app_type": APP_TYPE_WEBHOOK,
"single_instance": True,
"classifications": [CLASSIFICATION_AUTOMATION],
}
async def create_app(hass: HomeAssistantType, api):
"""Create a SmartApp for this instance of hass."""
# Create app from template attributes
template = _get_app_template(hass)
app = App()
for key, value in template.items():
setattr(app, key, value)
app, client = await api.create_app(app)
_LOGGER.debug("Created SmartApp '%s' (%s)", app.app_name, app.app_id)
# Set unique hass id in settings
settings = AppSettings(app.app_id)
settings.settings[SETTINGS_APP_ID] = app.app_id
settings.settings[SETTINGS_INSTANCE_ID] = hass.data[DOMAIN][CONF_INSTANCE_ID]
await api.update_app_settings(settings)
_LOGGER.debug(
"Updated App Settings for SmartApp '%s' (%s)", app.app_name, app.app_id
)
# Set oauth scopes
oauth = AppOAuth(app.app_id)
oauth.client_name = APP_OAUTH_CLIENT_NAME
oauth.scope.extend(APP_OAUTH_SCOPES)
await api.update_app_oauth(oauth)
_LOGGER.debug("Updated App OAuth for SmartApp '%s' (%s)", app.app_name, app.app_id)
return app, client
async def update_app(hass: HomeAssistantType, app):
"""Ensure the SmartApp is up-to-date and update if necessary."""
template = _get_app_template(hass)
template.pop("app_name") # don't update this
update_required = False
for key, value in template.items():
if getattr(app, key) != value:
update_required = True
setattr(app, key, value)
if update_required:
await app.save()
_LOGGER.debug(
"SmartApp '%s' (%s) updated with latest settings", app.app_name, app.app_id
)
def setup_smartapp(hass, app):
"""
Configure an individual SmartApp in hass.
Register the SmartApp with the SmartAppManager so that hass will service
lifecycle events (install, event, etc...). A unique SmartApp is created
for each SmartThings account that is configured in hass.
"""
manager = hass.data[DOMAIN][DATA_MANAGER]
smartapp = manager.smartapps.get(app.app_id)
if smartapp:
# already setup
return smartapp
smartapp = manager.register(app.app_id, app.webhook_public_key)
smartapp.name = app.display_name
smartapp.description = app.description
smartapp.permissions.extend(APP_OAUTH_SCOPES)
return smartapp
async def setup_smartapp_endpoint(hass: HomeAssistantType):
"""
Configure the SmartApp webhook in hass.
SmartApps are an extension point within the SmartThings ecosystem and
is used to receive push updates (i.e. device updates) from the cloud.
"""
data = hass.data.get(DOMAIN)
if data:
# already setup
return
# Get/create config to store a unique id for this hass instance.
store = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY)
config = await store.async_load()
if not config:
# Create config
config = {
CONF_INSTANCE_ID: str(uuid4()),
CONF_WEBHOOK_ID: webhook.generate_secret(),
CONF_CLOUDHOOK_URL: None,
}
await store.async_save(config)
# Register webhook
webhook.async_register(
hass, DOMAIN, "SmartApp", config[CONF_WEBHOOK_ID], smartapp_webhook
)
# Create webhook if eligible
cloudhook_url = config.get(CONF_CLOUDHOOK_URL)
if (
cloudhook_url is None
and cloud.async_active_subscription(hass)
and not hass.config_entries.async_entries(DOMAIN)
):
cloudhook_url = await cloud.async_create_cloudhook(
hass, config[CONF_WEBHOOK_ID]
)
config[CONF_CLOUDHOOK_URL] = cloudhook_url
await store.async_save(config)
_LOGGER.debug("Created cloudhook '%s'", cloudhook_url)
# SmartAppManager uses a dispatcher to invoke callbacks when push events
# occur. Use hass' implementation instead of the built-in one.
dispatcher = Dispatcher(
signal_prefix=SIGNAL_SMARTAPP_PREFIX,
connect=functools.partial(async_dispatcher_connect, hass),
send=functools.partial(async_dispatcher_send, hass),
)
# Path is used in digital signature validation
path = (
urlparse(cloudhook_url).path
if cloudhook_url
else webhook.async_generate_path(config[CONF_WEBHOOK_ID])
)
manager = SmartAppManager(path, dispatcher=dispatcher)
manager.connect_install(functools.partial(smartapp_install, hass))
manager.connect_update(functools.partial(smartapp_update, hass))
manager.connect_uninstall(functools.partial(smartapp_uninstall, hass))
hass.data[DOMAIN] = {
DATA_MANAGER: manager,
CONF_INSTANCE_ID: config[CONF_INSTANCE_ID],
DATA_BROKERS: {},
CONF_WEBHOOK_ID: config[CONF_WEBHOOK_ID],
# Will not be present if not enabled
CONF_CLOUDHOOK_URL: config.get(CONF_CLOUDHOOK_URL),
CONF_INSTALLED_APPS: [],
}
_LOGGER.debug(
"Setup endpoint for %s",
cloudhook_url
if cloudhook_url
else webhook.async_generate_url(hass, config[CONF_WEBHOOK_ID]),
)
async def unload_smartapp_endpoint(hass: HomeAssistantType):
"""Tear down the component configuration."""
if DOMAIN not in hass.data:
return
# Remove the cloudhook if it was created
cloudhook_url = hass.data[DOMAIN][CONF_CLOUDHOOK_URL]
if cloudhook_url and cloud.async_is_logged_in(hass):
await cloud.async_delete_cloudhook(hass, hass.data[DOMAIN][CONF_WEBHOOK_ID])
# Remove cloudhook from storage
store = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY)
await store.async_save(
{
CONF_INSTANCE_ID: hass.data[DOMAIN][CONF_INSTANCE_ID],
CONF_WEBHOOK_ID: hass.data[DOMAIN][CONF_WEBHOOK_ID],
CONF_CLOUDHOOK_URL: None,
}
)
_LOGGER.debug("Cloudhook '%s' was removed", cloudhook_url)
# Remove the webhook
webhook.async_unregister(hass, hass.data[DOMAIN][CONF_WEBHOOK_ID])
# Disconnect all brokers
for broker in hass.data[DOMAIN][DATA_BROKERS].values():
broker.disconnect()
# Remove all handlers from manager
hass.data[DOMAIN][DATA_MANAGER].dispatcher.disconnect_all()
# Remove the component data
hass.data.pop(DOMAIN)
async def smartapp_sync_subscriptions(
hass: HomeAssistantType,
auth_token: str,
location_id: str,
installed_app_id: str,
devices,
):
"""Synchronize subscriptions of an installed up."""
api = SmartThings(async_get_clientsession(hass), auth_token)
tasks = []
async def create_subscription(target: str):
sub = Subscription()
sub.installed_app_id = installed_app_id
sub.location_id = location_id
sub.source_type = SourceType.CAPABILITY
sub.capability = target
try:
await api.create_subscription(sub)
_LOGGER.debug(
"Created subscription for '%s' under app '%s'", target, installed_app_id
)
except Exception as error: # pylint:disable=broad-except
_LOGGER.error(
"Failed to create subscription for '%s' under app " "'%s': %s",
target,
installed_app_id,
error,
)
async def delete_subscription(sub: SubscriptionEntity):
try:
await api.delete_subscription(installed_app_id, sub.subscription_id)
_LOGGER.debug(
"Removed subscription for '%s' under app '%s' "
"because it was no longer needed",
sub.capability,
installed_app_id,
)
except Exception as error: # pylint:disable=broad-except
_LOGGER.error(
"Failed to remove subscription for '%s' under app " "'%s': %s",
sub.capability,
installed_app_id,
error,
)
# Build set of capabilities and prune unsupported ones
capabilities = set()
for device in devices:
capabilities.update(device.capabilities)
capabilities.intersection_update(CAPABILITIES)
# Get current subscriptions and find differences
subscriptions = await api.subscriptions(installed_app_id)
for subscription in subscriptions:
if subscription.capability in capabilities:
capabilities.remove(subscription.capability)
else:
# Delete the subscription
tasks.append(delete_subscription(subscription))
# Remaining capabilities need subscriptions created
tasks.extend([create_subscription(c) for c in capabilities])
if tasks:
await asyncio.gather(*tasks)
else:
_LOGGER.debug("Subscriptions for app '%s' are up-to-date", installed_app_id)
async def smartapp_install(hass: HomeAssistantType, req, resp, app):
"""
Handle when a SmartApp is installed by the user into a location.
Create a config entry representing the installation if this is not
the first installation under the account, otherwise store the data
for the config flow.
"""
install_data = {
CONF_INSTALLED_APP_ID: req.installed_app_id,
CONF_LOCATION_ID: req.location_id,
CONF_REFRESH_TOKEN: req.refresh_token,
}
# App attributes (client id/secret, etc...) are copied from another entry
# with the same parent app_id. If one is not found, the install data is
# stored for the config flow to retrieve during the wait step.
entry = next(
(
entry
for entry in hass.config_entries.async_entries(DOMAIN)
if entry.data[CONF_APP_ID] == app.app_id
),
None,
)
if entry:
data = entry.data.copy()
data.update(install_data)
# Add as job not needed because the current coroutine was invoked
# from the dispatcher and is not being awaited.
await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "install"}, data=data
)
else:
# Store the data where the flow can find it
hass.data[DOMAIN][CONF_INSTALLED_APPS].append(install_data)
_LOGGER.debug(
"Installed SmartApp '%s' under parent app '%s'",
req.installed_app_id,
app.app_id,
)
async def smartapp_update(hass: HomeAssistantType, req, resp, app):
"""
Handle when a SmartApp is updated (reconfigured) by the user.
Store the refresh token in the config entry.
"""
# Update refresh token in config entry
entry = next(
(
entry
for entry in hass.config_entries.async_entries(DOMAIN)
if entry.data.get(CONF_INSTALLED_APP_ID) == req.installed_app_id
),
None,
)
if entry:
entry.data[CONF_REFRESH_TOKEN] = req.refresh_token
hass.config_entries.async_update_entry(entry)
_LOGGER.debug(
"Updated SmartApp '%s' under parent app '%s'", req.installed_app_id, app.app_id
)
async def smartapp_uninstall(hass: HomeAssistantType, req, resp, app):
"""
Handle when a SmartApp is removed from a location by the user.
Find and delete the config entry representing the integration.
"""
entry = next(
(
entry
for entry in hass.config_entries.async_entries(DOMAIN)
if entry.data.get(CONF_INSTALLED_APP_ID) == req.installed_app_id
),
None,
)
if entry:
# Add as job not needed because the current coroutine was invoked
# from the dispatcher and is not being awaited.
await hass.config_entries.async_remove(entry.entry_id)
_LOGGER.debug(
"Uninstalled SmartApp '%s' under parent app '%s'",
req.installed_app_id,
app.app_id,
)
async def smartapp_webhook(hass: HomeAssistantType, webhook_id: str, request):
"""
Handle a smartapp lifecycle event callback from SmartThings.
Requests from SmartThings are digitally signed and the SmartAppManager
validates the signature for authenticity.
"""
manager = hass.data[DOMAIN][DATA_MANAGER]
data = await request.json()
result = await manager.handle_request(data, request.headers)
return web.json_response(result)
| apache-2.0 | -1,182,386,507,876,220,700 | 33.042194 | 88 | 0.649417 | false |
kimberlythegeek/axe-selenium-python | axe_selenium_python/tests/conftest.py | 1 | 1081 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from datetime import datetime
import pytest
from py.xml import html
@pytest.mark.optionalhook
def pytest_html_results_table_header(cells):
"""Add description and sortable time header to HTML report."""
cells.insert(2, html.th("Description"))
cells.insert(0, html.th("Time", class_="sortable time", col="time"))
@pytest.mark.optionalhook
def pytest_html_results_table_row(report, cells):
"""Add description and sortable time column to HTML report."""
cells.insert(2, html.td(report.description))
cells.insert(1, html.td(datetime.utcnow(), class_="col-time"))
@pytest.mark.hookwrapper
def pytest_runtest_makereport(item, call):
"""Make HTML report using test-function docstrings as description."""
outcome = yield
report = outcome.get_result()
# add docstring to 'description' column
report.description = str(item.function.__doc__)
| mpl-2.0 | -323,944,129,504,433,200 | 33.870968 | 73 | 0.719704 | false |
deprecated/nebulio | nebulio/tests/test_synphot.py | 1 | 2699 | """These tests compare filter parameters from pysynphot with the ones
I have calculated myself."""
from __future__ import (print_function, absolute_import, division, unicode_literals)
import os
import pytest
import nebulio
from nebulio.tests.utils import this_func_name
from nebulio.legacy import wfc3_utils
from matplotlib import pyplot as plt
wfc3_filters_to_test = [
"F658N", "F656N", "F673N", "F547M",
"FQ575N", "FQ672N", "FQ674N"
]
multiplets_to_test = [
"[N II] 6583", "[O III] 5007"
]
def plot_compare_bandpass(bp0, bp, fn):
fig, ax = plt.subplots()
ax.plot(bp0.wave, bp0.T, '-', label='legacy')
ax.plot(bp.wave, bp.T, '-', label='synphot')
ax.set_xlim(bp.wav0 - bp.Wj, bp.wav0 + bp.Wj)
ax.set_title(fn)
ax.legend()
plotfile = os.path.join("plots", '{}-{}.pdf'.format(this_func_name(), fn))
fig.savefig(plotfile)
class LegacyBandpass(object):
"""Lightweight OO wrapper around `wfc3_utils` bandpass"""
pass
@pytest.fixture(scope="module", params=wfc3_filters_to_test)
def bandpass_by_both_methods(request):
"""Fixture to read in both the legacy and pysynphot bandpasses"""
fn = request.param
bp0 = LegacyBandpass()
bp0.wave, bp0.T = wfc3_utils.get_filter(fn, return_wavelength=True)
bp0.Tm = wfc3_utils.Tm(bp0.T)
bp = nebulio.Bandpass(','.join(['wfc3', 'uvis1', fn]))
plot_compare_bandpass(bp0, bp, fn)
return (bp0, bp, fn)
@pytest.fixture(scope="module", params=wfc3_filters_to_test)
def wfc3_bandpass(request):
"""Fixture to read in the pysynphot bandpass for a WFC3 filter"""
return nebulio.Bandpass(','.join(['wfc3', 'uvis1', request.param]))
@pytest.fixture(scope="module", params=multiplets_to_test)
def emission_line_multiplet(request):
lineid = request.param
return nebulio.EmissionLine(lineid, velocity=30.0, fwhm_kms=20.0)
def test_version():
"""Silly test just to test that tests work"""
assert nebulio.__version__ == "0.1a1", nebulio.__version__
def test_wfc3_utils(bandpass_by_both_methods):
"""Compare results from `nebulio.filterset` with results from wfc3_utils"""
bp0, bp, fn = bandpass_by_both_methods
allowed_tolerance = 0.015
assert abs(bp0.Tm - bp.Tm) < allowed_tolerance, "Tm({}) = {}, {}".format(fn, bp0.Tm, bp.Tm)
def test_multiplet(emission_line_multiplet):
em = emission_line_multiplet
assert em.multiplicity == 3
def test_gaussian_multiplet(emission_line_multiplet, wfc3_bandpass):
"""This is testing that we can find the transmission at the line
wavelength for each member of the multiplet
"""
em = emission_line_multiplet
bp = wfc3_bandpass
Ti = bp.Ti(em)
assert len(Ti) == 3
| mit | -8,268,557,955,996,119,000 | 30.752941 | 95 | 0.675806 | false |
ayepezv/GAD_ERP | openerp/report/render/rml2pdf/trml2pdf.py | 1 | 45686 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import sys
import copy
import reportlab
import re
from reportlab.pdfgen import canvas
from reportlab import platypus
import utils
import color
import os
import logging
import traceback
from lxml import etree
import base64
from distutils.version import LooseVersion
from reportlab.platypus.doctemplate import ActionFlowable
from openerp.tools.safe_eval import safe_eval
from reportlab.lib.units import inch,cm,mm
from openerp.tools.misc import file_open
from reportlab.pdfbase import pdfmetrics
from reportlab.lib.pagesizes import A4, letter
try:
from cStringIO import StringIO
_hush_pyflakes = [ StringIO ]
except ImportError:
from StringIO import StringIO
try:
from customfonts import SetCustomFonts
except ImportError:
SetCustomFonts=lambda x:None
_logger = logging.getLogger(__name__)
encoding = 'utf-8'
def select_fontname(fontname, default_fontname):
if fontname not in pdfmetrics.getRegisteredFontNames()\
or fontname not in pdfmetrics.standardFonts:
# let reportlab attempt to find it
try:
pdfmetrics.getFont(fontname)
except Exception:
addition = ""
if " " in fontname:
addition = ". Your font contains spaces which is not valid in RML."
_logger.warning('Could not locate font %s, substituting default: %s%s',
fontname, default_fontname, addition)
fontname = default_fontname
return fontname
def _open_image(filename, path=None):
"""Attempt to open a binary file and return the descriptor
"""
if os.path.isfile(filename):
return open(filename, 'rb')
for p in (path or []):
if p and os.path.isabs(p):
fullpath = os.path.join(p, filename)
if os.path.isfile(fullpath):
return open(fullpath, 'rb')
try:
if p:
fullpath = os.path.join(p, filename)
else:
fullpath = filename
return file_open(fullpath)
except IOError:
pass
raise IOError("File %s cannot be found in image path" % filename)
class NumberedCanvas(canvas.Canvas):
def __init__(self, *args, **kwargs):
canvas.Canvas.__init__(self, *args, **kwargs)
self._saved_page_states = []
def showPage(self):
self._startPage()
def save(self):
"""add page info to each page (page x of y)"""
for state in self._saved_page_states:
self.__dict__.update(state)
self.draw_page_number()
canvas.Canvas.showPage(self)
canvas.Canvas.save(self)
def draw_page_number(self):
page_count = len(self._saved_page_states)
self.setFont("Helvetica", 8)
self.drawRightString((self._pagesize[0]-30), (self._pagesize[1]-40),
" %(this)i / %(total)i" % {
'this': self._pageNumber,
'total': page_count,
}
)
class PageCount(platypus.Flowable):
def __init__(self, story_count=0):
platypus.Flowable.__init__(self)
self.story_count = story_count
def draw(self):
self.canv.beginForm("pageCount%d" % self.story_count)
self.canv.setFont("Helvetica", utils.unit_get(str(8)))
self.canv.drawString(0, 0, str(self.canv.getPageNumber()))
self.canv.endForm()
class PageReset(platypus.Flowable):
def draw(self):
"""Flag to close current story page numbering and prepare for the next
should be executed after the rendering of the full story"""
self.canv._doPageReset = True
class _rml_styles(object,):
def __init__(self, nodes, localcontext):
self.localcontext = localcontext
self.styles = {}
self.styles_obj = {}
self.names = {}
self.table_styles = {}
self.default_style = reportlab.lib.styles.getSampleStyleSheet()
for node in nodes:
for style in node.findall('blockTableStyle'):
self.table_styles[style.get('id')] = self._table_style_get(style)
for style in node.findall('paraStyle'):
sname = style.get('name')
self.styles[sname] = self._para_style_update(style)
if self.default_style.has_key(sname):
for key, value in self.styles[sname].items():
setattr(self.default_style[sname], key, value)
else:
self.styles_obj[sname] = reportlab.lib.styles.ParagraphStyle(sname, self.default_style["Normal"], **self.styles[sname])
for variable in node.findall('initialize'):
for name in variable.findall('name'):
self.names[ name.get('id')] = name.get('value')
def _para_style_update(self, node):
data = {}
for attr in ['textColor', 'backColor', 'bulletColor', 'borderColor']:
if node.get(attr):
data[attr] = color.get(node.get(attr))
for attr in ['bulletFontName', 'fontName']:
if node.get(attr):
fontname= select_fontname(node.get(attr), None)
if fontname is not None:
data['fontName'] = fontname
for attr in ['bulletText']:
if node.get(attr):
data[attr] = node.get(attr)
for attr in ['fontSize', 'leftIndent', 'rightIndent', 'spaceBefore', 'spaceAfter',
'firstLineIndent', 'bulletIndent', 'bulletFontSize', 'leading',
'borderWidth','borderPadding','borderRadius']:
if node.get(attr):
data[attr] = utils.unit_get(node.get(attr))
if node.get('alignment'):
align = {
'right':reportlab.lib.enums.TA_RIGHT,
'center':reportlab.lib.enums.TA_CENTER,
'justify':reportlab.lib.enums.TA_JUSTIFY
}
data['alignment'] = align.get(node.get('alignment').lower(), reportlab.lib.enums.TA_LEFT)
data['splitLongWords'] = 0
return data
def _table_style_get(self, style_node):
styles = []
for node in style_node:
start = utils.tuple_int_get(node, 'start', (0,0) )
stop = utils.tuple_int_get(node, 'stop', (-1,-1) )
if node.tag=='blockValign':
styles.append(('VALIGN', start, stop, str(node.get('value'))))
elif node.tag=='blockFont':
styles.append(('FONT', start, stop, str(node.get('name'))))
elif node.tag=='blockTextColor':
styles.append(('TEXTCOLOR', start, stop, color.get(str(node.get('colorName')))))
elif node.tag=='blockLeading':
styles.append(('LEADING', start, stop, utils.unit_get(node.get('length'))))
elif node.tag=='blockAlignment':
styles.append(('ALIGNMENT', start, stop, str(node.get('value'))))
elif node.tag=='blockSpan':
styles.append(('SPAN', start, stop))
elif node.tag=='blockLeftPadding':
styles.append(('LEFTPADDING', start, stop, utils.unit_get(node.get('length'))))
elif node.tag=='blockRightPadding':
styles.append(('RIGHTPADDING', start, stop, utils.unit_get(node.get('length'))))
elif node.tag=='blockTopPadding':
styles.append(('TOPPADDING', start, stop, utils.unit_get(node.get('length'))))
elif node.tag=='blockBottomPadding':
styles.append(('BOTTOMPADDING', start, stop, utils.unit_get(node.get('length'))))
elif node.tag=='blockBackground':
styles.append(('BACKGROUND', start, stop, color.get(node.get('colorName'))))
if node.get('size'):
styles.append(('FONTSIZE', start, stop, utils.unit_get(node.get('size'))))
elif node.tag=='lineStyle':
kind = node.get('kind')
kind_list = [ 'GRID', 'BOX', 'OUTLINE', 'INNERGRID', 'LINEBELOW', 'LINEABOVE','LINEBEFORE', 'LINEAFTER' ]
assert kind in kind_list
thick = 1
if node.get('thickness'):
thick = float(node.get('thickness'))
styles.append((kind, start, stop, thick, color.get(node.get('colorName'))))
return platypus.tables.TableStyle(styles)
def para_style_get(self, node):
style = False
sname = node.get('style')
if sname:
if sname in self.styles_obj:
style = self.styles_obj[sname]
else:
_logger.debug('Warning: style not found, %s - setting default!', node.get('style'))
if not style:
style = self.default_style['Normal']
para_update = self._para_style_update(node)
if para_update:
# update style only is necessary
style = copy.deepcopy(style)
style.__dict__.update(para_update)
return style
class _rml_doc(object):
def __init__(self, node, localcontext=None, images=None, path='.', title=None):
if images is None:
images = {}
if localcontext is None:
localcontext = {}
self.localcontext = localcontext
self.etree = node
self.filename = self.etree.get('filename')
self.images = images
self.path = path
self.title = title
def docinit(self, els):
from reportlab.lib.fonts import addMapping
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
for node in els:
for font in node.findall('registerFont'):
name = font.get('fontName').encode('ascii')
fname = font.get('fontFile').encode('ascii')
if name not in pdfmetrics._fonts:
pdfmetrics.registerFont(TTFont(name, fname))
#by default, we map the fontName to each style (bold, italic, bold and italic), so that
#if there isn't any font defined for one of these style (via a font family), the system
#will fallback on the normal font.
addMapping(name, 0, 0, name) #normal
addMapping(name, 0, 1, name) #italic
addMapping(name, 1, 0, name) #bold
addMapping(name, 1, 1, name) #italic and bold
#if registerFontFamily is defined, we register the mapping of the fontName to use for each style.
for font_family in node.findall('registerFontFamily'):
family_name = font_family.get('normal').encode('ascii')
if font_family.get('italic'):
addMapping(family_name, 0, 1, font_family.get('italic').encode('ascii'))
if font_family.get('bold'):
addMapping(family_name, 1, 0, font_family.get('bold').encode('ascii'))
if font_family.get('boldItalic'):
addMapping(family_name, 1, 1, font_family.get('boldItalic').encode('ascii'))
def setTTFontMapping(self,face, fontname, filename, mode='all'):
from reportlab.lib.fonts import addMapping
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
if mode:
mode = mode.lower()
if fontname not in pdfmetrics._fonts:
pdfmetrics.registerFont(TTFont(fontname, filename))
if mode == 'all':
addMapping(face, 0, 0, fontname) #normal
addMapping(face, 0, 1, fontname) #italic
addMapping(face, 1, 0, fontname) #bold
addMapping(face, 1, 1, fontname) #italic and bold
elif mode in ['italic', 'oblique']:
addMapping(face, 0, 1, fontname) #italic
elif mode == 'bold':
addMapping(face, 1, 0, fontname) #bold
elif mode in ('bolditalic', 'bold italic','boldoblique', 'bold oblique'):
addMapping(face, 1, 1, fontname) #italic and bold
else:
addMapping(face, 0, 0, fontname) #normal
def _textual_image(self, node):
rc = ''
for n in node:
rc +=( etree.tostring(n) or '') + n.tail
return base64.decodestring(node.tostring())
def _images(self, el):
result = {}
for node in el.findall('.//image'):
rc =( node.text or '')
result[node.get('name')] = base64.decodestring(rc)
return result
def render(self, out):
el = self.etree.findall('.//docinit')
if el:
self.docinit(el)
el = self.etree.findall('.//stylesheet')
self.styles = _rml_styles(el,self.localcontext)
el = self.etree.findall('.//images')
if el:
self.images.update( self._images(el[0]) )
el = self.etree.findall('.//template')
if len(el):
pt_obj = _rml_template(self.localcontext, out, el[0], self, images=self.images, path=self.path, title=self.title)
el = utils._child_get(self.etree, self, 'story')
pt_obj.render(el)
else:
self.canvas = canvas.Canvas(out)
pd = self.etree.find('pageDrawing')[0]
pd_obj = _rml_canvas(self.canvas, self.localcontext, None, self, self.images, path=self.path, title=self.title)
pd_obj.render(pd)
self.canvas.showPage()
self.canvas.save()
class _rml_canvas(object):
def __init__(self, canvas, localcontext, doc_tmpl=None, doc=None, images=None, path='.', title=None):
if images is None:
images = {}
self.localcontext = localcontext
self.canvas = canvas
self.styles = doc.styles
self.doc_tmpl = doc_tmpl
self.doc = doc
self.images = images
self.path = path
self.title = title
if self.title:
self.canvas.setTitle(self.title)
def _textual(self, node, x=0, y=0):
text = node.text and node.text.encode('utf-8') or ''
rc = utils._process_text(self, text)
for n in node:
if n.tag == 'seq':
from reportlab.lib.sequencer import getSequencer
seq = getSequencer()
rc += str(seq.next(n.get('id')))
if n.tag == 'pageCount':
if x or y:
self.canvas.translate(x,y)
self.canvas.doForm('pageCount%s' % (self.canvas._storyCount,))
if x or y:
self.canvas.translate(-x,-y)
if n.tag == 'pageNumber':
rc += str(self.canvas.getPageNumber())
rc += utils._process_text(self, n.tail)
return rc.replace('\n','')
def _drawString(self, node):
v = utils.attr_get(node, ['x','y'])
text=self._textual(node, **v)
text = utils.xml2str(text)
try:
self.canvas.drawString(text=text, **v)
except TypeError:
_logger.info("Bad RML: <drawString> tag requires attributes 'x' and 'y'!")
raise
def _drawCenteredString(self, node):
v = utils.attr_get(node, ['x','y'])
text=self._textual(node, **v)
text = utils.xml2str(text)
self.canvas.drawCentredString(text=text, **v)
def _drawRightString(self, node):
v = utils.attr_get(node, ['x','y'])
text=self._textual(node, **v)
text = utils.xml2str(text)
self.canvas.drawRightString(text=text, **v)
def _rect(self, node):
if node.get('round'):
self.canvas.roundRect(radius=utils.unit_get(node.get('round')), **utils.attr_get(node, ['x','y','width','height'], {'fill':'bool','stroke':'bool'}))
else:
self.canvas.rect(**utils.attr_get(node, ['x','y','width','height'], {'fill':'bool','stroke':'bool'}))
def _ellipse(self, node):
x1 = utils.unit_get(node.get('x'))
x2 = utils.unit_get(node.get('width'))
y1 = utils.unit_get(node.get('y'))
y2 = utils.unit_get(node.get('height'))
self.canvas.ellipse(x1,y1,x2,y2, **utils.attr_get(node, [], {'fill':'bool','stroke':'bool'}))
def _curves(self, node):
line_str = node.text.split()
lines = []
while len(line_str)>7:
self.canvas.bezier(*[utils.unit_get(l) for l in line_str[0:8]])
line_str = line_str[8:]
def _lines(self, node):
line_str = node.text.split()
lines = []
while len(line_str)>3:
lines.append([utils.unit_get(l) for l in line_str[0:4]])
line_str = line_str[4:]
self.canvas.lines(lines)
def _grid(self, node):
xlist = [utils.unit_get(s) for s in node.get('xs').split(',')]
ylist = [utils.unit_get(s) for s in node.get('ys').split(',')]
self.canvas.grid(xlist, ylist)
def _translate(self, node):
dx = utils.unit_get(node.get('dx')) or 0
dy = utils.unit_get(node.get('dy')) or 0
self.canvas.translate(dx,dy)
def _circle(self, node):
self.canvas.circle(x_cen=utils.unit_get(node.get('x')), y_cen=utils.unit_get(node.get('y')), r=utils.unit_get(node.get('radius')), **utils.attr_get(node, [], {'fill':'bool','stroke':'bool'}))
def _place(self, node):
flows = _rml_flowable(self.doc, self.localcontext, images=self.images, path=self.path, title=self.title, canvas=self.canvas).render(node)
infos = utils.attr_get(node, ['x','y','width','height'])
infos['y']+=infos['height']
for flow in flows:
w,h = flow.wrap(infos['width'], infos['height'])
if w<=infos['width'] and h<=infos['height']:
infos['y']-=h
flow.drawOn(self.canvas,infos['x'],infos['y'])
infos['height']-=h
else:
raise ValueError("Not enough space")
def _line_mode(self, node):
ljoin = {'round':1, 'mitered':0, 'bevelled':2}
lcap = {'default':0, 'round':1, 'square':2}
if node.get('width'):
self.canvas.setLineWidth(utils.unit_get(node.get('width')))
if node.get('join'):
self.canvas.setLineJoin(ljoin[node.get('join')])
if node.get('cap'):
self.canvas.setLineCap(lcap[node.get('cap')])
if node.get('miterLimit'):
self.canvas.setDash(utils.unit_get(node.get('miterLimit')))
if node.get('dash'):
dashes = node.get('dash').split(',')
for x in range(len(dashes)):
dashes[x]=utils.unit_get(dashes[x])
self.canvas.setDash(node.get('dash').split(','))
def _image(self, node):
import urllib
import urlparse
from reportlab.lib.utils import ImageReader
nfile = node.get('file')
if not nfile:
if node.get('name'):
image_data = self.images[node.get('name')]
_logger.debug("Image %s used", node.get('name'))
s = StringIO(image_data)
else:
newtext = node.text
if self.localcontext:
res = utils._regex.findall(newtext)
for key in res:
newtext = safe_eval(key, {}, self.localcontext) or ''
image_data = None
if newtext:
image_data = base64.decodestring(newtext)
if image_data:
s = StringIO(image_data)
else:
_logger.debug("No image data!")
return False
else:
if nfile in self.images:
s = StringIO(self.images[nfile])
else:
try:
up = urlparse.urlparse(str(nfile))
except ValueError:
up = False
if up and up.scheme:
# RFC: do we really want to open external URLs?
# Are we safe from cross-site scripting or attacks?
_logger.debug("Retrieve image from %s", nfile)
u = urllib.urlopen(str(nfile))
s = StringIO(u.read())
else:
_logger.debug("Open image file %s ", nfile)
s = _open_image(nfile, path=self.path)
try:
img = ImageReader(s)
(sx,sy) = img.getSize()
_logger.debug("Image is %dx%d", sx, sy)
args = { 'x': 0.0, 'y': 0.0, 'mask': 'auto'}
for tag in ('width','height','x','y'):
if node.get(tag):
args[tag] = utils.unit_get(node.get(tag))
if ('width' in args) and (not 'height' in args):
args['height'] = sy * args['width'] / sx
elif ('height' in args) and (not 'width' in args):
args['width'] = sx * args['height'] / sy
elif ('width' in args) and ('height' in args):
if (float(args['width'])/args['height'])>(float(sx)>sy):
args['width'] = sx * args['height'] / sy
else:
args['height'] = sy * args['width'] / sx
self.canvas.drawImage(img, **args)
finally:
s.close()
# self.canvas._doc.SaveToFile(self.canvas._filename, self.canvas)
def _path(self, node):
self.path = self.canvas.beginPath()
self.path.moveTo(**utils.attr_get(node, ['x','y']))
for n in utils._child_get(node, self):
if not n.text :
if n.tag=='moveto':
vals = utils.text_get(n).split()
self.path.moveTo(utils.unit_get(vals[0]), utils.unit_get(vals[1]))
elif n.tag=='curvesto':
vals = utils.text_get(n).split()
while len(vals)>5:
pos=[]
while len(pos)<6:
pos.append(utils.unit_get(vals.pop(0)))
self.path.curveTo(*pos)
elif n.text:
data = n.text.split() # Not sure if I must merge all TEXT_NODE ?
while len(data)>1:
x = utils.unit_get(data.pop(0))
y = utils.unit_get(data.pop(0))
self.path.lineTo(x,y)
if (not node.get('close')) or utils.bool_get(node.get('close')):
self.path.close()
self.canvas.drawPath(self.path, **utils.attr_get(node, [], {'fill':'bool','stroke':'bool'}))
def setFont(self, node):
fontname = select_fontname(node.get('name'), self.canvas._fontname)
return self.canvas.setFont(fontname, utils.unit_get(node.get('size')))
def render(self, node):
tags = {
'drawCentredString': self._drawCenteredString,
'drawRightString': self._drawRightString,
'drawString': self._drawString,
'rect': self._rect,
'ellipse': self._ellipse,
'lines': self._lines,
'grid': self._grid,
'curves': self._curves,
'fill': lambda node: self.canvas.setFillColor(color.get(node.get('color'))),
'stroke': lambda node: self.canvas.setStrokeColor(color.get(node.get('color'))),
'setFont': self.setFont ,
'place': self._place,
'circle': self._circle,
'lineMode': self._line_mode,
'path': self._path,
'rotate': lambda node: self.canvas.rotate(float(node.get('degrees'))),
'translate': self._translate,
'image': self._image
}
for n in utils._child_get(node, self):
if n.tag in tags:
tags[n.tag](n)
class _rml_draw(object):
def __init__(self, localcontext, node, styles, images=None, path='.', title=None):
if images is None:
images = {}
self.localcontext = localcontext
self.node = node
self.styles = styles
self.canvas = None
self.images = images
self.path = path
self.canvas_title = title
def render(self, canvas, doc):
canvas.saveState()
cnv = _rml_canvas(canvas, self.localcontext, doc, self.styles, images=self.images, path=self.path, title=self.canvas_title)
cnv.render(self.node)
canvas.restoreState()
class _rml_Illustration(platypus.flowables.Flowable):
def __init__(self, node, localcontext, styles, self2):
self.localcontext = (localcontext or {}).copy()
self.node = node
self.styles = styles
self.width = utils.unit_get(node.get('width'))
self.height = utils.unit_get(node.get('height'))
self.self2 = self2
def wrap(self, *args):
return self.width, self.height
def draw(self):
drw = _rml_draw(self.localcontext ,self.node,self.styles, images=self.self2.images, path=self.self2.path, title=self.self2.title)
drw.render(self.canv, None)
# Workaround for issue #15: https://bitbucket.org/rptlab/reportlab/issue/15/infinite-pages-produced-when-splitting
original_pto_split = platypus.flowables.PTOContainer.split
def split(self, availWidth, availHeight):
res = original_pto_split(self, availWidth, availHeight)
if len(res) > 2 and len(self._content) > 0:
header = self._content[0]._ptoinfo.header
trailer = self._content[0]._ptoinfo.trailer
if isinstance(res[-2], platypus.flowables.UseUpSpace) and len(header + trailer) == len(res[:-2]):
return []
return res
platypus.flowables.PTOContainer.split = split
class _rml_flowable(object):
def __init__(self, doc, localcontext, images=None, path='.', title=None, canvas=None):
if images is None:
images = {}
self.localcontext = localcontext
self.doc = doc
self.styles = doc.styles
self.images = images
self.path = path
self.title = title
self.canvas = canvas
def _textual(self, node):
rc1 = utils._process_text(self, node.text or '')
for n in utils._child_get(node,self):
txt_n = copy.deepcopy(n)
for key in txt_n.attrib.keys():
if key in ('rml_except', 'rml_loop', 'rml_tag'):
del txt_n.attrib[key]
if not n.tag == 'bullet':
if n.tag == 'pageNumber':
txt_n.text = self.canvas and str(self.canvas.getPageNumber()) or ''
else:
txt_n.text = utils.xml2str(self._textual(n))
txt_n.tail = n.tail and utils.xml2str(utils._process_text(self, n.tail.replace('\n',''))) or ''
rc1 += etree.tostring(txt_n)
return rc1
def _table(self, node):
children = utils._child_get(node,self,'tr')
if not children:
return None
length = 0
colwidths = None
rowheights = None
data = []
styles = []
posy = 0
for tr in children:
paraStyle = None
if tr.get('style'):
st = copy.deepcopy(self.styles.table_styles[tr.get('style')])
for si in range(len(st._cmds)):
s = list(st._cmds[si])
s[1] = (s[1][0],posy)
s[2] = (s[2][0],posy)
st._cmds[si] = tuple(s)
styles.append(st)
if tr.get('paraStyle'):
paraStyle = self.styles.styles[tr.get('paraStyle')]
data2 = []
posx = 0
for td in utils._child_get(tr, self,'td'):
if td.get('style'):
st = copy.deepcopy(self.styles.table_styles[td.get('style')])
for s in st._cmds:
s[1][1] = posy
s[2][1] = posy
s[1][0] = posx
s[2][0] = posx
styles.append(st)
if td.get('paraStyle'):
# TODO: merge styles
paraStyle = self.styles.styles[td.get('paraStyle')]
posx += 1
flow = []
for n in utils._child_get(td, self):
if n.tag == etree.Comment:
n.text = ''
continue
fl = self._flowable(n, extra_style=paraStyle)
if isinstance(fl,list):
flow += fl
else:
flow.append( fl )
if not len(flow):
flow = self._textual(td)
data2.append( flow )
if len(data2)>length:
length=len(data2)
for ab in data:
while len(ab)<length:
ab.append('')
while len(data2)<length:
data2.append('')
data.append( data2 )
posy += 1
if node.get('colWidths'):
assert length == len(node.get('colWidths').split(','))
colwidths = [utils.unit_get(f.strip()) for f in node.get('colWidths').split(',')]
if node.get('rowHeights'):
rowheights = [utils.unit_get(f.strip()) for f in node.get('rowHeights').split(',')]
if len(rowheights) == 1:
rowheights = rowheights[0]
table = platypus.LongTable(data = data, colWidths=colwidths, rowHeights=rowheights, **(utils.attr_get(node, ['splitByRow'] ,{'repeatRows':'int','repeatCols':'int'})))
if node.get('style'):
table.setStyle(self.styles.table_styles[node.get('style')])
for s in styles:
table.setStyle(s)
return table
def _illustration(self, node):
return _rml_Illustration(node, self.localcontext, self.styles, self)
def _textual_image(self, node):
return base64.decodestring(node.text)
def _pto(self, node):
sub_story = []
pto_header = None
pto_trailer = None
for node in utils._child_get(node, self):
if node.tag == etree.Comment:
node.text = ''
continue
elif node.tag=='pto_header':
pto_header = self.render(node)
elif node.tag=='pto_trailer':
pto_trailer = self.render(node)
else:
flow = self._flowable(node)
if flow:
if isinstance(flow,list):
sub_story = sub_story + flow
else:
sub_story.append(flow)
return platypus.flowables.PTOContainer(sub_story, trailer=pto_trailer, header=pto_header)
def _flowable(self, node, extra_style=None):
if node.tag=='pto':
return self._pto(node)
if node.tag=='para':
style = self.styles.para_style_get(node)
if extra_style:
style.__dict__.update(extra_style)
text_node = self._textual(node).strip().replace('\n\n', '\n').replace('\n', '<br/>')
instance = platypus.Paragraph(text_node, style, **(utils.attr_get(node, [], {'bulletText':'str'})))
result = [instance]
if LooseVersion(reportlab.Version) > LooseVersion('3.0') and not instance.getPlainText().strip() and instance.text.strip():
result.append(platypus.Paragraph(' <br/>', style, **(utils.attr_get(node, [], {'bulletText': 'str'}))))
return result
elif node.tag=='barCode':
try:
from reportlab.graphics.barcode import code128
from reportlab.graphics.barcode import code39
from reportlab.graphics.barcode import code93
from reportlab.graphics.barcode import common
from reportlab.graphics.barcode import fourstate
from reportlab.graphics.barcode import usps
from reportlab.graphics.barcode import createBarcodeDrawing
except ImportError:
_logger.warning("Cannot use barcode renderers:", exc_info=True)
return None
args = utils.attr_get(node, [], {'ratio':'float','xdim':'unit','height':'unit','checksum':'int','quiet':'int','width':'unit','stop':'bool','bearers':'int','barWidth':'float','barHeight':'float'})
codes = {
'codabar': lambda x: common.Codabar(x, **args),
'code11': lambda x: common.Code11(x, **args),
'code128': lambda x: code128.Code128(str(x), **args),
'standard39': lambda x: code39.Standard39(str(x), **args),
'standard93': lambda x: code93.Standard93(str(x), **args),
'i2of5': lambda x: common.I2of5(x, **args),
'extended39': lambda x: code39.Extended39(str(x), **args),
'extended93': lambda x: code93.Extended93(str(x), **args),
'msi': lambda x: common.MSI(x, **args),
'fim': lambda x: usps.FIM(x, **args),
'postnet': lambda x: usps.POSTNET(x, **args),
'ean13': lambda x: createBarcodeDrawing('EAN13', value=str(x), **args),
'qrcode': lambda x: createBarcodeDrawing('QR', value=x, **args),
}
code = 'code128'
if node.get('code'):
code = node.get('code').lower()
return codes[code](self._textual(node))
elif node.tag=='name':
self.styles.names[ node.get('id')] = node.get('value')
return None
elif node.tag=='xpre':
style = self.styles.para_style_get(node)
return platypus.XPreformatted(self._textual(node), style, **(utils.attr_get(node, [], {'bulletText':'str','dedent':'int','frags':'int'})))
elif node.tag=='pre':
style = self.styles.para_style_get(node)
return platypus.Preformatted(self._textual(node), style, **(utils.attr_get(node, [], {'bulletText':'str','dedent':'int'})))
elif node.tag=='illustration':
return self._illustration(node)
elif node.tag=='blockTable':
return self._table(node)
elif node.tag=='title':
styles = reportlab.lib.styles.getSampleStyleSheet()
style = styles['Title']
return platypus.Paragraph(self._textual(node), style, **(utils.attr_get(node, [], {'bulletText':'str'})))
elif re.match('^h([1-9]+[0-9]*)$', (node.tag or '')):
styles = reportlab.lib.styles.getSampleStyleSheet()
style = styles['Heading'+str(node.tag[1:])]
return platypus.Paragraph(self._textual(node), style, **(utils.attr_get(node, [], {'bulletText':'str'})))
elif node.tag=='image':
image_data = False
if not node.get('file'):
if node.get('name'):
if node.get('name') in self.doc.images:
_logger.debug("Image %s read ", node.get('name'))
image_data = self.doc.images[node.get('name')].read()
else:
_logger.warning("Image %s not defined", node.get('name'))
return False
else:
import base64
newtext = node.text
if self.localcontext:
newtext = utils._process_text(self, node.text or '')
image_data = base64.decodestring(newtext)
if not image_data:
_logger.debug("No inline image data")
return False
image = StringIO(image_data)
else:
_logger.debug("Image get from file %s", node.get('file'))
image = _open_image(node.get('file'), path=self.doc.path)
return platypus.Image(image, mask=(250,255,250,255,250,255), **(utils.attr_get(node, ['width','height'])))
elif node.tag=='spacer':
if node.get('width'):
width = utils.unit_get(node.get('width'))
else:
width = utils.unit_get('1cm')
length = utils.unit_get(node.get('length'))
return platypus.Spacer(width=width, height=length)
elif node.tag=='section':
return self.render(node)
elif node.tag == 'pageNumberReset':
return PageReset()
elif node.tag in ('pageBreak', 'nextPage'):
return platypus.PageBreak()
elif node.tag=='condPageBreak':
return platypus.CondPageBreak(**(utils.attr_get(node, ['height'])))
elif node.tag=='setNextTemplate':
return platypus.NextPageTemplate(str(node.get('name')))
elif node.tag=='nextFrame':
return platypus.CondPageBreak(1000) # TODO: change the 1000 !
elif node.tag == 'setNextFrame':
from reportlab.platypus.doctemplate import NextFrameFlowable
return NextFrameFlowable(str(node.get('name')))
elif node.tag == 'currentFrame':
from reportlab.platypus.doctemplate import CurrentFrameFlowable
return CurrentFrameFlowable(str(node.get('name')))
elif node.tag == 'frameEnd':
return EndFrameFlowable()
elif node.tag == 'hr':
width_hr=node.get('width') or '100%'
color_hr=node.get('color') or 'black'
thickness_hr=node.get('thickness') or 1
lineCap_hr=node.get('lineCap') or 'round'
return platypus.flowables.HRFlowable(width=width_hr,color=color.get(color_hr),thickness=float(thickness_hr),lineCap=str(lineCap_hr))
else:
sys.stderr.write('Warning: flowable not yet implemented: %s !\n' % (node.tag,))
return None
def render(self, node_story):
def process_story(node_story):
sub_story = []
for node in utils._child_get(node_story, self):
if node.tag == etree.Comment:
node.text = ''
continue
flow = self._flowable(node)
if flow:
if isinstance(flow,list):
sub_story = sub_story + flow
else:
sub_story.append(flow)
return sub_story
return process_story(node_story)
class EndFrameFlowable(ActionFlowable):
def __init__(self,resume=0):
ActionFlowable.__init__(self,('frameEnd',resume))
class TinyDocTemplate(platypus.BaseDocTemplate):
def beforeDocument(self):
# Store some useful value directly inside canvas, so it's available
# on flowable drawing (needed for proper PageCount handling)
self.canv._doPageReset = False
self.canv._storyCount = 0
def ___handle_pageBegin(self):
self.page += 1
self.pageTemplate.beforeDrawPage(self.canv,self)
self.pageTemplate.checkPageSize(self.canv,self)
self.pageTemplate.onPage(self.canv,self)
for f in self.pageTemplate.frames: f._reset()
self.beforePage()
self._curPageFlowableCount = 0
if hasattr(self,'_nextFrameIndex'):
del self._nextFrameIndex
for f in self.pageTemplate.frames:
if f.id == 'first':
self.frame = f
break
self.handle_frameBegin()
def afterPage(self):
if isinstance(self.canv, NumberedCanvas):
# save current page states before eventual reset
self.canv._saved_page_states.append(dict(self.canv.__dict__))
if self.canv._doPageReset:
# Following a <pageReset/> tag:
# - we reset page number to 0
# - we add an new PageCount flowable (relative to the current
# story number), but not for NumeredCanvas at is handle page
# count itself)
# NOTE: _rml_template render() method add a PageReset flowable at end
# of each story, so we're sure to pass here at least once per story.
if not isinstance(self.canv, NumberedCanvas):
self.handle_flowable([ PageCount(story_count=self.canv._storyCount) ])
self.canv._pageCount = self.page
self.page = 0
self.canv._flag = True
self.canv._pageNumber = 0
self.canv._doPageReset = False
self.canv._storyCount += 1
class _rml_template(object):
def __init__(self, localcontext, out, node, doc, images=None, path='.', title=None):
if images is None:
images = {}
if not localcontext:
localcontext={'internal_header':True}
self.localcontext = localcontext
self.images= images
self.path = path
self.title = title
pagesize_map = {'a4': A4,
'us_letter': letter
}
pageSize = A4
if self.localcontext.get('company'):
pageSize = pagesize_map.get(self.localcontext.get('company').rml_paper_format, A4)
if node.get('pageSize'):
ps = map(lambda x:x.strip(), node.get('pageSize').replace(')', '').replace('(', '').split(','))
pageSize = ( utils.unit_get(ps[0]),utils.unit_get(ps[1]) )
self.doc_tmpl = TinyDocTemplate(out, pagesize=pageSize, **utils.attr_get(node, ['leftMargin','rightMargin','topMargin','bottomMargin'], {'allowSplitting':'int','showBoundary':'bool','rotation':'int','title':'str','author':'str'}))
self.page_templates = []
self.styles = doc.styles
self.doc = doc
self.image=[]
pts = node.findall('pageTemplate')
for pt in pts:
frames = []
for frame_el in pt.findall('frame'):
frame = platypus.Frame( **(utils.attr_get(frame_el, ['x1','y1', 'width','height', 'leftPadding', 'rightPadding', 'bottomPadding', 'topPadding'], {'id':'str', 'showBoundary':'bool'})) )
if utils.attr_get(frame_el, ['last']):
frame.lastFrame = True
frames.append( frame )
try :
gr = pt.findall('pageGraphics')\
or pt[1].findall('pageGraphics')
except Exception: # FIXME: be even more specific, perhaps?
gr=''
if len(gr):
# self.image=[ n for n in utils._child_get(gr[0], self) if n.tag=='image' or not self.localcontext]
drw = _rml_draw(self.localcontext,gr[0], self.doc, images=images, path=self.path, title=self.title)
self.page_templates.append( platypus.PageTemplate(frames=frames, onPage=drw.render, **utils.attr_get(pt, [], {'id':'str'}) ))
else:
drw = _rml_draw(self.localcontext,node,self.doc,title=self.title)
self.page_templates.append( platypus.PageTemplate(frames=frames,onPage=drw.render, **utils.attr_get(pt, [], {'id':'str'}) ))
self.doc_tmpl.addPageTemplates(self.page_templates)
def render(self, node_stories):
if self.localcontext and not self.localcontext.get('internal_header',False):
del self.localcontext['internal_header']
fis = []
r = _rml_flowable(self.doc,self.localcontext, images=self.images, path=self.path, title=self.title, canvas=None)
story_cnt = 0
for node_story in node_stories:
if story_cnt > 0:
fis.append(platypus.PageBreak())
fis += r.render(node_story)
# end of story numbering computation
fis.append(PageReset())
story_cnt += 1
try:
if self.localcontext and self.localcontext.get('internal_header',False):
self.doc_tmpl.afterFlowable(fis)
self.doc_tmpl.build(fis,canvasmaker=NumberedCanvas)
else:
self.doc_tmpl.build(fis)
except platypus.doctemplate.LayoutError, e:
e.name = 'Print Error'
e.value = 'The document you are trying to print contains a table row that does not fit on one page. Please try to split it in smaller rows or contact your administrator.'
raise
def parseNode(rml, localcontext=None, fout=None, images=None, path='.', title=None):
node = etree.XML(rml)
r = _rml_doc(node, localcontext, images, path, title=title)
#try to override some font mappings
try:
SetCustomFonts(r)
except Exception, exc:
_logger.info('Cannot set font mapping: %s', "".join(traceback.format_exception_only(type(exc),exc)))
fp = StringIO()
r.render(fp)
return fp.getvalue()
def parseString(rml, localcontext=None, fout=None, images=None, path='.', title=None):
node = etree.XML(rml)
r = _rml_doc(node, localcontext, images, path, title=title)
#try to override some font mappings
try:
SetCustomFonts(r)
except Exception:
pass
if fout:
fp = file(fout,'wb')
r.render(fp)
fp.close()
return fout
else:
fp = StringIO()
r.render(fp)
return fp.getvalue()
def trml2pdf_help():
print 'Usage: trml2pdf input.rml >output.pdf'
print 'Render the standard input (RML) and output a PDF file'
sys.exit(0)
if __name__=="__main__":
if len(sys.argv)>1:
if sys.argv[1]=='--help':
trml2pdf_help()
print parseString(file(sys.argv[1], 'r').read()),
else:
print 'Usage: trml2pdf input.rml >output.pdf'
print 'Try \'trml2pdf --help\' for more information.'
| gpl-3.0 | -7,056,118,041,523,852,000 | 41.777154 | 238 | 0.545506 | false |
MediaKraken/MediaKraken_Deployment | source/test_asyncpg_json.py | 1 | 1574 | import asyncio
import datetime
import json
import asyncpg
###
# fully working for DICT of row along with auto json decode/encode
###
async def main():
conn = await asyncpg.connect(user='postgres',
password='metaman',
database='postgres',
host='localhost')
await conn.set_type_codec('json',
encoder=json.dumps,
decoder=json.loads,
schema='pg_catalog')
await conn.execute('CREATE TABLE users2(id serial PRIMARY KEY,'
' name text, dob date, test_json jsonb)')
await conn.execute('INSERT INTO users2(name, dob, test_json) VALUES($1, $2, $3)',
'Bob', datetime.date(1984, 3, 1), json.dumps({'test': 'works'}))
# shows that one CANNOT use ::json in the insert
# await conn.execute('INSERT INTO users2(name, test_json::json) VALUES($1, $2)',
# 'Bob', {'test': 'works'})
# shows that one CANNOT use ::json in the update
# await conn.execute('update users2 set name = $1, test_json::json = $2',
# 'Bob', json.dumps({'test': 'works'}))
row = await conn.fetchrow('SELECT id, dob, test_json::json'
' FROM users2 WHERE name = $1', 'Bob')
print(row['id'], row['dob'], row['test_json'])
print(row['test_json']['test'])
# Close the connection.
await conn.close()
asyncio.get_event_loop().run_until_complete(main())
| gpl-3.0 | 375,246,508,988,305,300 | 33.217391 | 87 | 0.52986 | false |
PaulEcoffet/megamachineacaoua | test_machine.py | 1 | 6484 | import unittest
from machine import Machine, InvalidOrderException, NotEnoughStockException
from drink import Drink
from coins import Coins, NoChangePossibleException
import copy
class MachineTestCase(unittest.TestCase):
"""
Test for MMC, to test use in cmd: python.exe -m unittest test_XXXXX.py
in bash: python3 -m unittest test_XXXXX.py
"""
def test_init_default_maxstock(self):
mc = Machine()
for key in Machine.StocksType:
self.assertEqual(mc.max_stocks[key], 100)
def test_init_default_maxcoins(self):
mc = Machine()
for key in Machine.CoinsContainers:
self.assertEqual(mc.max_coins[key], 100)
def test_init_empty_stocks(self):
mc = Machine()
for key in Machine.StocksType:
self.assertEqual(mc.stocks[key], 0)
def test_init_empty_coins(self):
mc = Machine()
for key in Machine.CoinsType:
self.assertEqual(mc.coins[key], 0)
def test_init_empty_log(self):
mc = Machine()
self.assertEqual(mc.log, [])
def test_edit_stocks(self):
mc = Machine()
stock = copy.deepcopy(mc.stocks)
mc.edit_stocks(coffee=50)
stock['coffee'] = 50
self.assertEqual(mc._stocks, stock)
mc.edit_stocks(coffee=49)
self.assertEqual(mc.stocks, stock)
mc.edit_stocks(coffee=mc.max_stocks['coffee']+1)
self.assertEqual(mc.stocks, stock)
mc.edit_stocks(coffee=mc.max_stocks['coffee'])
stock['coffee'] = mc.max_stocks['coffee']
self.assertEqual(mc.stocks, stock)
mc = Machine()
mc.refill_stocks()
self.assertEqual(mc.stocks, mc.max_stocks)
def test_edit_prices(self):
mc = Machine()
prices = copy.deepcopy(mc.stock_prices)
prices['coffee'] = 30
mc.edit_prices(coffee=30)
self.assertEqual(mc.stock_prices,prices)
mc.edit_prices(coffee=-10)
self.assertEqual(mc.stock_prices,prices)
prices['coffee'] = 40
prices['tea'] = 20
mc.edit_prices(coffee=40,tea=20)
self.assertEqual(mc.stock_prices,prices)
prices['sugar'] = [5,5,15,20]
mc.edit_prices(sugar=[5,5,15,20])
self.assertEqual(mc.stock_prices,prices)
mc.edit_prices(sugar=[10,5,15,20])
self.assertEqual(mc.stock_prices,prices)
mc.edit_prices(sugar=[-10,5,15,20])
self.assertEqual(mc.stock_prices,prices)
def test_order_simple(self):
mc = Machine()
mc.refill_stocks()
mc.refill_coins()
coins_stock = copy.copy(mc.coins)
drink, change = mc.order((0,1,0,0,0), (0,0,0,0,1,0))
self.assertEqual(drink.stocks, Drink((0,0,0,0,1,0),
mc.stock_prices).stocks)
self.assertEqual(change.value, 80)
self.assertEqual(mc.stocks, {'milk': 100, 'sugar':100, 'tea': 100,
'coffee': 99, 'chocolate': 100})
self.assertEqual(mc._cash.value, 100)
self.assertEqual(mc.coins.value, coins_stock.value - change.value)
def test_order_complex(self):
mc = Machine()
mc.refill_stocks()
mc.refill_coins()
coins_stock = copy.copy(mc.coins)
drink, change = mc.order((1,1,0,0,0), (1,1,1,0,1,1))
expected = Drink((1,1,1,0,1,1), mc.stock_prices)
self.assertEqual(drink.stocks, expected.stocks)
self.assertEqual(change.value, 300 - expected.price)
self.assertEqual(mc.stocks, {'milk': 99, 'sugar':97, 'tea': 100,
'coffee': 99, 'chocolate': 99})
self.assertEqual(mc._cash.value, 200)
# + 100 for next line because 100 is from user coins input
self.assertEqual(mc.coins.value, coins_stock.value - change.value + 100)
def test_order_fail_not_enough_cash(self):
mc = Machine()
mc.refill_stocks()
mc.refill_coins()
coins_stock = copy.copy(mc.coins)
self.assertRaises(InvalidOrderException, mc.order,(0,0,0,1,0),
(1,1,1,0,1,1))
self.assertEqual(mc.stocks, {'milk': 100, 'sugar':100, 'tea': 100,
'coffee': 100, 'chocolate': 100})
self.assertEqual(mc._cash.value, 0)
# + 100 for next line because 100 is from user coins input
self.assertEqual(mc.coins.value, coins_stock.value)
def test_order_fail_not_drink(self):
mc = Machine()
mc.refill_stocks()
mc.refill_coins()
coins_stock = copy.copy(mc.coins)
self.assertRaises(InvalidOrderException, mc.order,(0,0,0,1,0),
(1,0,1,0,0,0))
self.assertEqual(mc.stocks, {'milk': 100, 'sugar': 100, 'tea': 100,
'coffee': 100, 'chocolate': 100})
self.assertEqual(mc._cash.value, 0)
# + 100 for next line because 100 is from user coins input
self.assertEqual(mc.coins.value, coins_stock.value)
def test_order_fail_no_stock(self):
mc = Machine()
mc.refill_coins()
coins_stock = copy.copy(mc.coins)
self.assertRaises(NotEnoughStockException, mc.order,(0,0,0,1,0),
(1,0,1,1,0,0))
self.assertEqual(mc.stocks, {'milk': 0, 'sugar': 0, 'tea': 0,
'coffee': 0, 'chocolate': 0})
self.assertEqual(mc._cash.value, 0)
# + 100 for next line because 100 is from user coins input
self.assertEqual(mc.coins.value, coins_stock.value)
def test_order_cant_get_maxcash(self):
mc = Machine()
mc.refill_stocks()
mc.refill_coins()
coins_stock = copy.copy(mc.coins)
drink, change = mc.order((0,0,1,8,0), (1,1,1,0,1,1))
self.assertIsNone(drink)
self.assertEqual(change, Coins({200:0, 100: 0, 50:1, 20:8, 10: 0}))
self.assertEqual(mc.stocks, {'milk': 100, 'sugar': 100, 'tea': 100,
'coffee': 100, 'chocolate': 100})
self.assertEqual(mc._cash.value, 0)
# + 100 for next line because 100 is from user coins input
self.assertEqual(mc.coins.value, coins_stock.value)
def test_order_cant_give_money_back(self):
mc = Machine()
mc.refill_stocks()
self.assertRaises(NoChangePossibleException, mc.order, (0,1,0,0,0),
(0,0,0,0,1,0))
| apache-2.0 | -3,239,018,861,716,547,000 | 36.918129 | 80 | 0.572949 | false |
ujdhesa/unisubs | apps/videos/views.py | 1 | 40464 | # Amara, universalsubtitles.org
#
# Copyright (C) 2013 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see
# http://www.gnu.org/licenses/agpl-3.0.html.
import datetime
import string
import urllib, urllib2
from collections import namedtuple
import simplejson as json
from babelsubs.storage import diff as diff_subs
from django.conf import settings
from django.contrib import messages
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth import logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.views import redirect_to_login
from django.contrib.sites.models import Site
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist
from apps.videos.templatetags.paginator import paginate
from django.core.urlresolvers import reverse
from django.db.models import Sum
from django.http import (HttpResponse, Http404, HttpResponseRedirect,
HttpResponseForbidden)
from django.shortcuts import (render, render_to_response, get_object_or_404,
redirect)
from django.template import RequestContext
from django.utils.encoding import force_unicode
from django.utils.http import urlquote_plus
from django.utils.translation import ugettext, ugettext_lazy as _
from django.views.decorators.http import require_POST
from django.views.generic.list_detail import object_list
from gdata.service import RequestError
from vidscraper.errors import Error as VidscraperError
import widget
from widget import rpc as widget_rpc
from apps.auth.models import CustomUser as User
from apps.statistic.models import EmailShareStatistic
from apps.subtitles import models as sub_models
from apps.subtitles.forms import SubtitlesUploadForm
from apps.subtitles.pipeline import rollback_to
from apps.teams.models import Task
from apps.teams.permissions import (can_create_and_edit_subtitles,
can_create_and_edit_translations)
from apps.videos import permissions
from apps.videos.decorators import get_video_revision, get_video_from_code
from apps.videos.forms import (
VideoForm, FeedbackForm, EmailFriendForm, UserTestResultForm,
CreateVideoUrlForm, TranscriptionFileForm, AddFromFeedForm,
ChangeVideoOriginalLanguageForm
)
from apps.videos.models import (
Video, Action, SubtitleLanguage, VideoUrl, AlreadyEditingException
)
from apps.videos.rpc import VideosApiClass
from apps.videos.search_indexes import VideoIndex
from apps.videos.share_utils import _add_share_panel_context_for_video, _add_share_panel_context_for_history
from apps.videos.tasks import video_changed_tasks
from apps.widget.views import base_widget_params
from externalsites.models import can_sync_videourl
from utils import send_templated_email
from utils.basexconverter import base62
from utils.decorators import never_in_prod
from utils.metrics import Meter
from utils.rpc import RpcRouter
from utils.translation import get_user_languages_from_request
from teams.permissions import can_edit_video, can_add_version, can_rollback_language
rpc_router = RpcRouter('videos:rpc_router', {
'VideosApi': VideosApiClass()
})
# .e.g json, nor include aliases
AVAILABLE_SUBTITLE_FORMATS_FOR_DISPLAY = [
'dfxp', 'sbv', 'srt', 'ssa', 'txt', 'vtt',
]
LanguageListItem = namedtuple("LanguageListItem", "name status tags url")
class LanguageList(object):
"""List of languages for the video pages."""
def __init__(self, video):
original_languages = []
other_languages = []
for lang in video.all_subtitle_languages():
public_tip = lang.get_tip(public=False)
if public_tip is None or public_tip.subtitle_count == 0:
# no versions in this language yet
continue
language_name = lang.get_language_code_display()
status = self._calc_status(lang)
tags = self._calc_tags(lang)
url = lang.get_absolute_url()
item = LanguageListItem(language_name, status, tags, url)
if lang.language_code == video.primary_audio_language_code:
original_languages.append(item)
else:
other_languages.append(item)
original_languages.sort(key=lambda li: li.name)
other_languages.sort(key=lambda li: li.name)
self.items = original_languages + other_languages
def _calc_status(self, lang):
if lang.subtitles_complete:
if lang.has_public_version():
return 'complete'
else:
return 'needs-review'
else:
if lang.is_synced(public=False):
return 'incomplete'
else:
return 'needs-timing'
def _calc_tags(self, lang):
tags = []
if lang.is_primary_audio_language():
tags.append(ugettext(u'original'))
team_video = lang.video.get_team_video()
if not lang.subtitles_complete:
tags.append(ugettext(u'incomplete'))
elif team_video is not None:
# subtiltes are complete, check if they are under review/approval.
incomplete_tasks = (Task.objects.incomplete()
.filter(team_video=team_video,
language=lang.language_code))
for t in incomplete_tasks:
if t.type == Task.TYPE_IDS['Review']:
tags.append(ugettext(u'needs review'))
break
elif t.type == Task.TYPE_IDS['Approve']:
tags.append(ugettext(u'needs approval'))
break
else:
# subtitles are complete, but there's a subtitle/translate
# task for them. They must have gotten sent back.
tags.append(ugettext(u'needs editing'))
break
return tags
def __iter__(self):
return iter(self.items)
def __len__(self):
return len(self.items)
def index(request):
context = widget.add_onsite_js_files({})
context['all_videos'] = Video.objects.count()
context['popular_videos'] = VideoIndex.get_popular_videos("-today_views")[:VideoIndex.IN_ROW]
context['featured_videos'] = VideoIndex.get_featured_videos()[:VideoIndex.IN_ROW]
return render_to_response('index.html', context,
context_instance=RequestContext(request))
def watch_page(request):
# Assume we're currently indexing if the number of public
# indexed vids differs from the count of video objects by
# more than 1000
is_indexing = cache.get('is_indexing')
if is_indexing is None:
is_indexing = Video.objects.all().count() - VideoIndex.public().count() > 1000
cache.set('is_indexing', is_indexing, 300)
context = {
'featured_videos': VideoIndex.get_featured_videos()[:VideoIndex.IN_ROW],
'popular_videos': VideoIndex.get_popular_videos()[:VideoIndex.IN_ROW],
'latest_videos': VideoIndex.get_latest_videos()[:VideoIndex.IN_ROW*3],
'popular_display_views': 'week',
'is_indexing': is_indexing
}
return render_to_response('videos/watch.html', context,
context_instance=RequestContext(request))
def featured_videos(request):
return render_to_response('videos/featured_videos.html', {},
context_instance=RequestContext(request))
def latest_videos(request):
return render_to_response('videos/latest_videos.html', {},
context_instance=RequestContext(request))
def popular_videos(request):
return render_to_response('videos/popular_videos.html', {},
context_instance=RequestContext(request))
def volunteer_page(request):
# Get the user comfort languages list
user_langs = get_user_languages_from_request(request)
relevant = VideoIndex.public().filter(video_language_exact__in=user_langs) \
.filter_or(languages_exact__in=user_langs) \
.order_by('-requests_count')
featured_videos = relevant.filter(
featured__gt=datetime.datetime(datetime.MINYEAR, 1, 1)) \
.order_by('-featured')[:5]
popular_videos = relevant.order_by('-week_views')[:5]
latest_videos = relevant.order_by('-edited')[:15]
requested_videos = relevant.filter(requests_exact__in=user_langs)[:5]
context = {
'featured_videos': featured_videos,
'popular_videos': popular_videos,
'latest_videos': latest_videos,
'requested_videos': requested_videos,
'user_langs':user_langs,
}
return render_to_response('videos/volunteer.html', context,
context_instance=RequestContext(request))
def volunteer_category(request, category):
'''
Display results only for a particular category of video results from
popular, featured and latest videos.
'''
return render_to_response('videos/volunteer_%s.html' %(category),
context_instance=RequestContext(request))
def create(request):
video_form = VideoForm(request.user, request.POST or None)
context = {
'video_form': video_form,
'initial_url': request.GET.get('initial_url'),
'feed_form': AddFromFeedForm(request.user)
}
if video_form.is_valid():
try:
video = video_form.save()
except (VidscraperError, RequestError):
context['vidscraper_error'] = True
return render_to_response('videos/create.html', context,
context_instance=RequestContext(request))
messages.info(request, message=_(u'''Here is the subtitle workspace for your video. You can
share the video with friends, or get an embed code for your site. To add or
improve subtitles, click the button below the video.'''))
url_obj = video.videourl_set.filter(primary=True).all()[:1].get()
if url_obj.type != 'Y':
# Check for all types except for Youtube
if not url_obj.effective_url.startswith('https'):
messages.warning(request, message=_(u'''You have submitted a video
that is served over http. Your browser may display mixed
content warnings.'''))
if video_form.created:
messages.info(request, message=_(u'''Existing subtitles will be imported in a few minutes.'''))
return redirect(video.get_absolute_url())
return render_to_response('videos/create.html', context,
context_instance=RequestContext(request))
create.csrf_exempt = True
def create_from_feed(request):
form = AddFromFeedForm(request.user, request.POST or None)
if form.is_valid():
form.save()
messages.success(request, form.success_message())
return redirect('videos:create')
context = {
'video_form': VideoForm(),
'feed_form': form,
'from_feed': True
}
return render_to_response('videos/create.html', context,
context_instance=RequestContext(request))
create_from_feed.csrf_exempt = True
def shortlink(request, encoded_pk):
pk = base62.to_decimal(encoded_pk)
video = get_object_or_404(Video, pk=pk)
return redirect(video, video=video, permanent=True)
class VideoPageContext(dict):
"""Context dict for the video page."""
def __init__(self, request, video, video_url, tab, tab_only=False):
dict.__init__(self)
self['video'] = video
if not tab_only:
video.prefetch_languages(with_public_tips=True,
with_private_tips=True)
self.setup(request, video, video_url)
self.setup_tab(request, video, video_url, tab)
def setup(self, request, video, video_url):
language_for_locale = video.subtitle_language(request.LANGUAGE_CODE)
if language_for_locale:
metadata = language_for_locale.get_metadata()
else:
metadata = video.get_metadata()
self.update(widget.add_onsite_js_files({}))
self['page_title'] = self.page_title(video)
self['metadata'] = metadata.convert_for_display()
self['language_list'] = LanguageList(video)
self['shows_widget_sharing'] = video.can_user_see(request.user)
self['widget_settings'] = json.dumps(
widget_rpc.get_general_settings(request))
_add_share_panel_context_for_video(self, video)
self['task'] = _get_related_task(request)
team_video = video.get_team_video()
if team_video is not None:
self['team'] = team_video.team
self['team_video'] = team_video
self['can_create_subs'] = can_create_and_edit_subtitles(
request.user, team_video)
self['can_create_trans'] = can_create_and_edit_translations(
request.user, team_video)
self['user_is_team_member'] = team_video.team.user_is_member(
request.user)
else:
self['team'] = self['team_video'] = None
self['can_create_trans'] = self['can_create_subs'] = True
self['user_is_team_member'] = False
@staticmethod
def page_title(video):
template = string.Template(ugettext("$title with subtitles | Amara"))
return template.substitute(title=video.title_display())
def setup_tab(self, request, video, video_url, tab):
setup_tab_method = getattr(self, 'setup_tab_%s' % tab, None)
if setup_tab_method is not None:
setup_tab_method(request, video, video_url, tab)
def setup_tab_video(self, request, video, video_url, tab):
self['widget_params'] = _widget_params(
request, video, language=None,
video_url=video_url and video_url.effective_url,
size=(620,370)
)
@get_video_from_code
def redirect_to_video(request, video):
return redirect(video, permanent=True)
@get_video_from_code
def video(request, video, video_url=None, title=None):
"""
If user is about to perform a task on this video, then t=[task.pk]
will be passed to as a url parameter.
"""
if video_url:
video_url = get_object_or_404(VideoUrl, pk=video_url)
# FIXME: what is this crazy mess?
if not video_url and ((video.title_for_url() and not video.title_for_url() == title) or (not video.title and title)):
return redirect(video, permanent=True)
video.update_view_counter()
tab = request.GET.get('tab')
if tab not in ('urls', 'comments', 'activity', 'video'):
# force tab to be video if it doesn't match either of the other
# tabs
tab = 'video'
if request.is_ajax():
context = VideoPageContext(request, video, video_url, tab,
tab_only=True)
template_name = 'videos/video-%s-tab.html' % tab
else:
template_name = 'videos/video-%s.html' % tab
context = VideoPageContext(request, video, video_url, tab)
context['tab'] = tab
return render(request, template_name, context)
def _get_related_task(request):
"""
Checks if request has t=[task-id], and if so checks if the current
user can perform it, in case all goes well, return the task to be
performed.
"""
task_pk = request.GET.get('t', None)
if task_pk:
from teams.permissions import can_perform_task
try:
task = Task.objects.get(pk=task_pk)
if can_perform_task(request.user, task):
return task
except Task.DoesNotExist:
return
def actions_list(request, video_id):
video = get_object_or_404(Video, video_id=video_id)
qs = Action.objects.for_video(video)
extra_context = {
'video': video
}
return object_list(request, queryset=qs, allow_empty=True,
paginate_by=settings.ACTIVITIES_ONPAGE,
template_name='videos/actions_list.html',
template_object_name='action',
extra_context=extra_context)
@login_required
def upload_subtitles(request):
output = {'success': False}
video = Video.objects.get(id=request.POST['video'])
form = SubtitlesUploadForm(request.user, video, True, request.POST,
request.FILES, initial={'primary_audio_language_code':video.primary_audio_language_code})
response = lambda s: HttpResponse('<textarea>%s</textarea>' % json.dumps(s))
try:
if form.is_valid():
version = form.save()
output['success'] = True
output['next'] = version.subtitle_language.get_absolute_url()
output['msg'] = ugettext(
u'Thank you for uploading. '
u'It may take a minute or so for your subtitles to appear.')
else:
output['errors'] = form.get_errors()
except AlreadyEditingException, e:
output['errors'] = {'__all__': [force_unicode(e.msg)]}
except Exception, e:
import traceback
traceback.print_exc()
from raven.contrib.django.models import client
client.create_from_exception()
output['errors'] = {'__all__': [force_unicode(e)]}
return response(output)
@login_required
def upload_transcription_file(request):
output = {}
form = TranscriptionFileForm(request.POST, request.FILES)
if form.is_valid():
output['text'] = getattr(form, 'file_text', '')
else:
output['errors'] = form.get_errors()
return HttpResponse(u'<textarea>%s</textarea>' % json.dumps(output))
def feedback(request, hide_captcha=False):
output = dict(success=False)
form = FeedbackForm(request.POST, initial={'captcha': request.META['REMOTE_ADDR']},
hide_captcha=hide_captcha)
if form.is_valid():
form.send(request)
output['success'] = True
else:
output['errors'] = form.get_errors()
return HttpResponse(json.dumps(output), "text/javascript")
def email_friend(request):
text = request.GET.get('text', '')
link = request.GET.get('link', '')
if link:
text = link if not text else '%s\n%s' % (text, link)
from_email = ''
if request.user.is_authenticated():
from_email = request.user.email
initial = dict(message=text, from_email=from_email)
if request.method == 'POST':
form = EmailFriendForm(request.POST, auto_id="email_friend_id_%s", label_suffix="")
if form.is_valid():
email_st = EmailShareStatistic()
if request.user.is_authenticated():
email_st.user = request.user
email_st.save()
form.send()
messages.info(request, 'Email Sent!')
return redirect(request.get_full_path())
else:
form = EmailFriendForm(auto_id="email_friend_id_%s", initial=initial, label_suffix="")
context = {
'form': form
}
return render_to_response('videos/email_friend.html', context,
context_instance=RequestContext(request))
@get_video_from_code
def legacy_history(request, video, lang=None):
"""
In the old days we allowed only one translation per video.
Therefore video urls looked like /vfjdh2/en/
Now that this constraint is removed we need to redirect old urls
to the new view, that needs
"""
try:
language = video.subtitle_language(lang)
if language is None:
raise SubtitleLanguage.DoesNotExist("No such language")
except sub_models.SubtitleLanguage.DoesNotExist:
raise Http404()
return HttpResponseRedirect(reverse("videos:translation_history", kwargs={
'video_id': video.video_id,
'lang_id': language.pk,
'lang': language.language_code,
}))
class LanguagePageContext(dict):
"""Context dict for language pages
This class defines the base class that sets up the variables we use for
all the languages classes. For the specific language pages (subtitles,
comments, revisions), we use a subclass of this.
"""
def __init__(self, request, video, lang_code, lang_id, version_id,
tab_only=False):
dict.__init__(self)
self.public_only = self.calc_public_only(request, video)
language = self._get_language(video, lang_code, lang_id)
version = self._get_version(request, video, language, version_id)
self['video'] = video
self['language'] = language
self['version'] = version
self['user'] = request.user
if not tab_only:
video.prefetch_languages(with_public_tips=True,
with_private_tips=True)
self.setup(request, video, language, version)
self.setup_tab(request, video, language, version)
def _get_language(self, video, lang_code, lang_id):
"""Get a language for the language page views.
For historical reasons, we normally specify both a language code and a
language id. This method takes both of those and returns a
SubtitleLanguage.
"""
try:
language = video.language_with_pk(lang_id)
except SubtitleLanguage.DoesNotExist:
raise Http404
if language.language_code != lang_code:
raise Http404
return language
def calc_public_only(self, request, video):
team_video = video.get_team_video()
return (team_video and not team_video.team.is_member(request.user))
def _get_version(self, request, video, language, version_id):
"""Get the SubtitleVersion to use for a language page."""
team_video = video.get_team_video()
if version_id:
try:
return language.get_version_by_id(version_id,
public=self.public_only)
except sub_models.SubtitleVersion.DoesNotExist:
raise Http404
else:
return language.get_tip(public=self.public_only)
def setup(self, request, video, language, version):
"""Setup context variables."""
self.update(widget.add_onsite_js_files({}))
self['revision_count'] = language.version_count()
self['language_list'] = LanguageList(video)
self['page_title'] = self.page_title(language)
self['edit_url'] = language.get_widget_url()
self['shows_widget_sharing'] = video.can_user_see(request.user)
self['widget_params'] = _widget_params(request, video, version_no=None,
language=language,
size=(289, 173))
_add_share_panel_context_for_history(self, video, language)
if version is not None:
self['metadata'] = version.get_metadata().convert_for_display()
else:
self['metadata'] = video.get_metadata().convert_for_display()
self['rollback_allowed'] = self.calc_rollback_allowed(
request, version, language)
def calc_rollback_allowed(self, request, version, language):
if version and version.next_version():
return (version.video.get_team_video() is None or
can_rollback_language(request.user, language))
else:
return False
def setup_tab(self, request, video, language, video_url):
"""Setup tab-specific variables."""
pass
@staticmethod
def page_title(language):
template = string.Template(ugettext("$title with subtitles | Amara"))
return template.substitute(title=language.title_display())
class LanguagePageContextSubtitles(LanguagePageContext):
def setup_tab(self, request, video, language, version):
team_video = video.get_team_video()
user_can_add_version = can_add_version(request.user, video,
language.language_code)
self['downloadable_formats'] = AVAILABLE_SUBTITLE_FORMATS_FOR_DISPLAY
self['edit_disabled'] = not user_can_add_version
# If there are tasks for this language, the user has to go through the
# tasks panel to edit things instead of doing it directly from here.
if user_can_add_version and video.get_team_video():
has_open_task = (Task.objects.incomplete()
.filter(team_video=video.get_team_video(),
language=language.language_code)
.exists())
if has_open_task:
self['edit_disabled'] = True
self['must_use_tasks'] = True
if 'rollback_allowed' not in self:
self['rollback_allowed'] = self.calc_rollback_allowed(
request, version, language)
class LanguagePageContextComments(LanguagePageContext):
pass
class LanguagePageContextRevisions(LanguagePageContext):
REVISIONS_PER_PAGE = 10
def setup_tab(self, request, video, language, version):
if self.public_only:
revisions_qs = language.subtitleversion_set.public()
else:
revisions_qs = language.subtitleversion_set.extant()
revisions_qs = revisions_qs.order_by('-version_number')
revisions, pagination_info = paginate(
revisions_qs, self.REVISIONS_PER_PAGE, request.GET.get('page'))
self.update(pagination_info)
self['revisions'] = language.optimize_versions(revisions)
class LanguagePageContextSyncHistory(LanguagePageContext):
def setup_tab(self, request, video, language, version):
self['sync_history'] = language.synchistory_set.order_by('-id').all()
self['current_version'] = language.get_public_tip()
synced_versions = []
for video_url in video.get_video_urls():
if not can_sync_videourl(video_url):
continue
try:
version = (language.syncedsubtitleversion_set.
select_related('version').
get(video_url=video_url)).version
except ObjectDoesNotExist:
version = None
synced_versions.append({
'video_url': video_url,
'version': version,
})
self['synced_versions'] = synced_versions
@get_video_from_code
def language_subtitles(request, video, lang, lang_id, version_id=None):
tab = request.GET.get('tab')
if tab == 'revisions':
ContextClass = LanguagePageContextRevisions
elif tab == 'comments':
ContextClass = LanguagePageContextComments
elif tab == 'sync-history':
if not request.user.is_staff:
return redirect_to_login(request.build_absolute_uri())
ContextClass = LanguagePageContextSyncHistory
else:
# force tab to be subtitles if it doesn't match either of the other
# tabs
tab = 'subtitles'
ContextClass = LanguagePageContextSubtitles
if request.is_ajax():
context = ContextClass(request, video, lang, lang_id, version_id,
tab_only=True)
template_name = 'videos/language-%s-tab.html' % tab
else:
template_name = 'videos/language-%s.html' % tab
context = ContextClass(request, video, lang, lang_id, version_id)
context['tab'] = tab
if 'tab' not in request.GET:
# we only want to update the view counter if this request wasn't
# the result of a tab click.
video.update_view_counter()
return render(request, template_name, context)
def _widget_params(request, video, version_no=None, language=None, video_url=None, size=None):
primary_url = video_url or video.get_video_url()
alternate_urls = [vu.effective_url for vu in video.videourl_set.all()
if vu.effective_url != primary_url]
params = {'video_url': primary_url,
'alternate_video_urls': alternate_urls,
'base_state': {}}
if version_no:
params['base_state']['revision'] = version_no
if language:
params['base_state']['language_code'] = language.language_code
params['base_state']['language_pk'] = language.pk
if size:
params['video_config'] = {"width":size[0], "height":size[1]}
return base_widget_params(request, params)
@login_required
@get_video_revision
def rollback(request, version):
is_writelocked = version.subtitle_language.is_writelocked
team_video = version.video.get_team_video()
if team_video and not can_rollback_language(request.user,
version.subtitle_language):
messages.error(request, _(u"You don't have permission to rollback "
"this language"))
elif is_writelocked:
messages.error(request, u'Can not rollback now, because someone is editing subtitles.')
elif not version.next_version():
messages.error(request, message=u'Can not rollback to the last version')
else:
messages.success(request, message=u'Rollback successful')
version = rollback_to(version.video,
version.subtitle_language.language_code,
version_number=version.version_number,
rollback_author=request.user)
video_changed_tasks.delay(version.video.id, version.id)
return redirect(version.subtitle_language.get_absolute_url()+'#revisions')
return redirect(version)
@get_video_revision
def diffing(request, first_version, second_pk):
language = first_version.subtitle_language
second_version = get_object_or_404(
sub_models.SubtitleVersion.objects.extant(),
pk=second_pk, subtitle_language=language)
if first_version.video != second_version.video:
# this is either a bad bug, or someone evil
raise "Revisions for diff videos"
if first_version.pk < second_version.pk:
# this is just stupid Instead of first, second meaning
# chronological order (first cames before second)
# it means the opposite, so make sure the first version
# has a larger version no than the second
first_version, second_version = second_version, first_version
video = first_version.subtitle_language.video
diff_data = diff_subs(first_version.get_subtitles(), second_version.get_subtitles())
team_video = video.get_team_video()
context = widget.add_onsite_js_files({})
context['video'] = video
context['diff_data'] = diff_data
context['language'] = language
context['first_version'] = first_version
context['second_version'] = second_version
context['latest_version'] = language.get_tip()
if team_video and not can_rollback_language(request.user, language):
context['rollback_allowed'] = False
else:
context['rollback_allowed'] = True
context['widget0_params'] = \
_widget_params(request, video,
first_version.version_number)
context['widget1_params'] = \
_widget_params(request, video,
second_version.version_number)
return render_to_response('videos/diffing.html', context,
context_instance=RequestContext(request))
def test_form_page(request):
if request.method == 'POST':
form = UserTestResultForm(request.POST)
if form.is_valid():
form.save(request)
messages.success(request, 'Thanks for your feedback. It\'s a huge help to us as we improve the site.')
return redirect('videos:test_form_page')
else:
form = UserTestResultForm()
context = {
'form': form
}
return render_to_response('videos/test_form_page.html', context,
context_instance=RequestContext(request))
@login_required
def stop_notification(request, video_id):
user_id = request.GET.get('u')
hash = request.GET.get('h')
if not user_id or not hash:
raise Http404
video = get_object_or_404(Video, video_id=video_id)
user = get_object_or_404(User, id=user_id)
context = dict(video=video, u=user)
if hash and user.hash_for_video(video_id) == hash:
video.followers.remove(user)
for l in video.subtitlelanguage_set.all():
l.followers.remove(user)
if request.user.is_authenticated() and not request.user == user:
logout(request)
else:
context['error'] = u'Incorrect secret hash'
return render_to_response('videos/stop_notification.html', context,
context_instance=RequestContext(request))
@login_required
@require_POST
def video_url_make_primary(request):
output = {}
id = request.POST.get('id')
status = 200
if id:
try:
obj = VideoUrl.objects.get(id=id)
tv = obj.video.get_team_video()
if tv and not permissions.can_user_edit_video_urls(obj.video, request.user):
output['error'] = ugettext('You have not permission change this URL')
status = 403
else:
obj.make_primary(user=request.user)
except VideoUrl.DoesNotExist:
output['error'] = ugettext('Object does not exist')
status = 404
return HttpResponse(json.dumps(output), status=status)
@login_required
@require_POST
def video_url_remove(request):
output = {}
id = request.POST.get('id')
status = 200
if id:
try:
obj = VideoUrl.objects.get(id=id)
tv = obj.video.get_team_video()
if tv and not permissions.can_user_edit_video_urls(obj.video, request.user):
output['error'] = ugettext('You have not permission delete this URL')
status = 403
else:
if obj.primary:
output['error'] = ugettext('You can\'t remove primary URL')
status = 403
else:
# create activity record
act = Action(video=obj.video, action_type=Action.DELETE_URL)
act.new_video_title = obj.url
act.created = datetime.datetime.now()
act.user = request.user
act.save()
# delete
obj.delete()
except VideoUrl.DoesNotExist:
output['error'] = ugettext('Object does not exist')
status = 404
return HttpResponse(json.dumps(output), status=status)
@login_required
def video_url_create(request):
output = {}
form = CreateVideoUrlForm(request.user, request.POST)
if form.is_valid():
obj = form.save()
video = form.cleaned_data['video']
users = video.notification_list(request.user)
for user in users:
subject = u'New video URL added by %(username)s to "%(video_title)s" on universalsubtitles.org'
subject = subject % {'url': obj.url, 'username': obj.added_by, 'video_title': video}
context = {
'video': video,
'video_url': obj,
'user': user,
'domain': Site.objects.get_current().domain,
'hash': user.hash_for_video(video.video_id)
}
Meter('templated-emails-sent-by-type.videos.video-url-added').inc()
send_templated_email(user, subject,
'videos/email_video_url_add.html',
context, fail_silently=not settings.DEBUG)
else:
output['errors'] = form.get_errors()
return HttpResponse(json.dumps(output))
@staff_member_required
def reindex_video(request, video_id):
from teams.tasks import update_one_team_video
video = get_object_or_404(Video, video_id=video_id)
video.update_search_index()
team_video = video.get_team_video()
if team_video:
update_one_team_video.delay(team_video.id)
def subscribe_to_updates(request):
email_address = request.POST.get('email_address', '')
data = urllib.urlencode({'email': email_address})
req = urllib2.Request(
'http://pcf8.pculture.org/interspire/form.php?form=3', data)
urllib2.urlopen(req)
return HttpResponse('ok', 'text/plain')
def test_celery(request):
from videos.tasks import add
add.delay(1, 2)
return HttpResponse('Hello, from Amazon SQS backend for Celery!')
@staff_member_required
def test_celery_exception(request):
from videos.tasks import raise_exception
raise_exception.delay('Exception in Celery', should_be_logged='Hello, man!')
return HttpResponse('Hello, from Amazon SQS backend for Celery! Look for exception.')
@never_in_prod
@staff_member_required
def video_staff_delete(request, video_id):
video = get_object_or_404(Video, video_id=video_id)
video.delete()
return HttpResponse("ok")
def video_debug(request, video_id):
from apps.widget import video_cache as vc
from django.core.cache import cache
from accountlinker.models import youtube_sync
from videos.models import VIDEO_TYPE_YOUTUBE
video = get_object_or_404(Video, video_id=video_id)
vid = video.video_id
get_subtitles_dict = {}
for l in video.newsubtitlelanguage_set.all():
cache_key = vc._subtitles_dict_key(vid, l.pk, None)
get_subtitles_dict[l.language_code] = cache.get(cache_key)
cache = {
"get_video_urls": cache.get(vc._video_urls_key(vid)),
"get_subtitles_dict": get_subtitles_dict,
"get_video_languages": cache.get(vc._video_languages_key(vid)),
"get_video_languages_verbose": cache.get(vc._video_languages_verbose_key(vid)),
"writelocked_langs": cache.get(vc._video_writelocked_langs_key(vid)),
}
tasks = Task.objects.filter(team_video=video)
is_youtube = video.videourl_set.filter(type=VIDEO_TYPE_YOUTUBE).count() != 0
if request.method == 'POST' and request.POST.get('action') == 'sync':
# Sync video to youtube
sync_lang = sub_models.SubtitleLanguage.objects.get(
pk=request.POST.get('language'))
youtube_sync(video, sync_lang)
return render_to_response("videos/video_debug.html", {
'video': video,
'is_youtube': is_youtube,
'tasks': tasks,
"cache": cache
}, context_instance=RequestContext(request))
def reset_metadata(request, video_id):
video = get_object_or_404(Video, video_id=video_id)
video_changed_tasks.delay(video.id)
return HttpResponse('ok')
def set_original_language(request, video_id):
"""
We only allow if a video is own a team, or the video owner is the
logged in user
"""
video = get_object_or_404(Video, video_id=video_id)
if not (can_edit_video(video.get_team_video(), request.user) or video.user == request.user):
return HttpResponseForbidden("Can't touch this.")
form = ChangeVideoOriginalLanguageForm(request.POST or None, initial={
'language_code': video.primary_audio_language_code
})
if request.method == "POST" and form.is_valid():
video.primary_audio_language_code = form.cleaned_data['language_code']
video.save()
messages.success(request, _(u'The language for %s has been changed' % (video)))
return HttpResponseRedirect(reverse("videos:set_original_language", args=(video_id,)))
return render_to_response("videos/set-original-language.html", {
"video": video,
'form': form
}, context_instance=RequestContext(request)
)
| agpl-3.0 | 1,893,509,965,617,708,800 | 38.670588 | 121 | 0.627051 | false |
ToontownUprising/src | toontown/nametag/Nametag2d.py | 1 | 10929 | from direct.task.Task import Task
import math
from panda3d.core import PGButton, VBase4, DepthWriteAttrib, Point3
from toontown.chat.ChatBalloon import ChatBalloon
from toontown.margins import MarginGlobals
from toontown.margins.MarginVisible import MarginVisible
from toontown.nametag import NametagGlobals
from toontown.nametag.Nametag import Nametag
from toontown.toontowngui.Clickable2d import Clickable2d
class Nametag2d(Nametag, Clickable2d, MarginVisible):
CONTENTS_SCALE = 0.25
CHAT_TEXT_MAX_ROWS = 6
CHAT_TEXT_WORD_WRAP = 8
CHAT_BALLOON_ALPHA = 0.4
ARROW_OFFSET = -1.0
ARROW_SCALE = 1.5
def __init__(self):
Nametag.__init__(self)
Clickable2d.__init__(self, 'Nametag2d')
MarginVisible.__init__(self)
self.actualChatText = ''
self.arrow = None
self.textNodePath = None
self.contents.setScale(self.CONTENTS_SCALE)
self.hideThought()
self.accept('MarginVisible-update', self.update)
def destroy(self):
self.ignoreAll()
Nametag.destroy(self)
if self.textNodePath is not None:
self.textNodePath.removeNode()
self.textNodePath = None
if self.arrow is not None:
self.arrow.removeNode()
self.arrow = None
Clickable2d.destroy(self)
def getUniqueName(self):
return 'Nametag2d-' + str(id(self))
def getChatBalloonModel(self):
return NametagGlobals.chatBalloon2dModel
def getChatBalloonWidth(self):
return NametagGlobals.chatBalloon2dWidth
def getChatBalloonHeight(self):
return NametagGlobals.chatBalloon2dHeight
def setChatText(self, chatText):
self.actualChatText = chatText
Nametag.setChatText(self, chatText)
def updateClickRegion(self):
if self.chatBalloon is not None:
right = self.chatBalloon.width / 2.0
left = -right
top = self.chatBalloon.height / 2.0
bottom = -top
self.setClickRegionFrame(left, right, bottom, top)
self.region.setActive(True)
elif self.panel is not None:
centerX = (self.textNode.getLeft()+self.textNode.getRight()) / 2.0
centerY = (self.textNode.getBottom()+self.textNode.getTop()) / 2.0
left = centerX - (self.panelWidth/2.0)
right = centerX + (self.panelWidth/2.0)
bottom = centerY - (self.panelHeight/2.0)
top = centerY + (self.panelHeight/2.0)
self.setClickRegionFrame(left, right, bottom, top)
self.region.setActive(True)
else:
if self.region is not None:
self.region.setActive(False)
def isClickable(self):
if self.getChatText() and self.hasChatButton():
return True
return NametagGlobals.wantActiveNametags and Clickable2d.isClickable(self)
def setClickState(self, clickState):
if self.isClickable():
self.applyClickState(clickState)
else:
self.applyClickState(PGButton.SInactive)
Clickable2d.setClickState(self, clickState)
def enterDepressed(self):
if self.isClickable():
base.playSfx(NametagGlobals.clickSound)
def enterRollover(self):
if self.isClickable() and (self.lastClickState != PGButton.SDepressed):
base.playSfx(NametagGlobals.rolloverSound)
def update(self):
self.contents.node().removeAllChildren()
Nametag.update(self)
if self.cell is not None:
# We're in the margin display. Reposition our content, and update
# the click region:
self.reposition()
self.updateClickRegion()
else:
# We aren't in the margin display. Disable the click region if one
# is present:
if self.region is not None:
self.region.setActive(False)
def tick(self, task):
if (self.avatar is None) or self.avatar.isEmpty():
return Task.cont
if (self.cell is None) or (self.arrow is None):
return Task.cont
location = self.avatar.getPos(NametagGlobals.me)
rotation = NametagGlobals.me.getQuat(base.cam)
camSpacePos = rotation.xform(location)
arrowRadians = math.atan2(camSpacePos[0], camSpacePos[1])
arrowDegrees = (arrowRadians/math.pi) * 180
self.arrow.setR(arrowDegrees - 90)
return Task.cont
def drawChatBalloon(self, model, modelWidth, modelHeight):
if self.chatFont is None:
# We can't draw this without a font.
return
# Prefix the nametag text:
self.chatTextNode.setText(self.getText() + ': ' + self.actualChatText)
# Set our priority in the margin system:
self.setPriority(MarginGlobals.MP_normal)
if self.textNodePath is not None:
self.textNodePath.removeNode()
self.textNodePath = None
if self.arrow is not None:
self.arrow.removeNode()
self.arrow = None
if self.isClickable():
foreground, background = self.chatColor[self.clickState]
else:
foreground, background = self.chatColor[PGButton.SInactive]
if self.chatType == NametagGlobals.SPEEDCHAT:
background = self.speedChatColor
if background[3] > self.CHAT_BALLOON_ALPHA:
background = VBase4(
background[0], background[1], background[2],
self.CHAT_BALLOON_ALPHA)
self.chatBalloon = ChatBalloon(
model, modelWidth, modelHeight, self.chatTextNode,
foreground=foreground, background=background,
reversed=self.chatReversed,
button=self.chatButton[self.clickState])
self.chatBalloon.reparentTo(self.contents)
# Calculate the center of the TextNode:
left, right, bottom, top = self.chatTextNode.getFrameActual()
center = self.contents.getRelativePoint(
self.chatBalloon.textNodePath,
((left+right) / 2.0, 0, (bottom+top) / 2.0))
# Translate the chat balloon along the inverse:
self.chatBalloon.setPos(self.chatBalloon, -center)
def drawNametag(self):
# Set our priority in the margin system:
self.setPriority(MarginGlobals.MP_low)
if self.textNodePath is not None:
self.textNodePath.removeNode()
self.textNodePath = None
if self.arrow is not None:
self.arrow.removeNode()
self.arrow = None
if self.font is None:
# We can't draw this without a font.
return
# Attach the icon:
if self.icon is not None:
self.contents.attachNewNode(self.icon)
if self.isClickable():
foreground, background = self.nametagColor[self.clickState]
else:
foreground, background = self.nametagColor[PGButton.SInactive]
# Set the color of the TextNode:
self.textNode.setTextColor(foreground)
# Attach the TextNode:
self.textNodePath = self.contents.attachNewNode(self.textNode, 1)
self.textNodePath.setTransparency(foreground[3] < 1)
self.textNodePath.setAttrib(DepthWriteAttrib.make(0))
self.textNodePath.setY(self.TEXT_Y_OFFSET)
# Attach a panel behind the TextNode:
self.panel = NametagGlobals.cardModel.copyTo(self.contents, 0)
self.panel.setColor(background)
self.panel.setTransparency(background[3] < 1)
# Reposition the panel:
x = (self.textNode.getLeft()+self.textNode.getRight()) / 2.0
z = (self.textNode.getBottom()+self.textNode.getTop()) / 2.0
self.panel.setPos(x, 0, z)
# Resize the panel:
self.panelWidth = self.textNode.getWidth() + self.PANEL_X_PADDING
self.panelHeight = self.textNode.getHeight() + self.PANEL_Z_PADDING
self.panel.setScale(self.panelWidth, 1, self.panelHeight)
# Add an arrow:
self.arrow = NametagGlobals.arrowModel.copyTo(self.contents)
self.arrow.setZ(self.ARROW_OFFSET + self.textNode.getBottom())
self.arrow.setScale(self.ARROW_SCALE)
self.arrow.setColor(self.nametagColor[0][0])
def marginVisibilityChanged(self):
if self.cell is not None:
# We're in the margin display. Reposition our content, and update
# the click region:
self.reposition()
self.updateClickRegion()
else:
# We aren't in the margin display. Disable the click region if one
# is present:
if self.region is not None:
self.region.setActive(False)
def reposition(self):
if self.contents is None:
return
origin = Point3()
self.contents.setPos(origin)
if self.chatBalloon is not None:
self.chatBalloon.removeNode()
self.chatBalloon = None
self.contents.node().removeAllChildren()
if (self.cell in base.leftCells) or (self.cell in base.rightCells):
text = self.getChatText().replace('\x01WLDisplay\x01', '').replace('\x02', '')
textWidth = self.chatTextNode.calcWidth(text)
if (textWidth / self.CHAT_TEXT_WORD_WRAP) > self.CHAT_TEXT_MAX_ROWS:
self.chatTextNode.setWordwrap(textWidth / (self.CHAT_TEXT_MAX_ROWS-0.5))
else:
self.chatTextNode.setWordwrap(self.CHAT_TEXT_WORD_WRAP)
model = self.getChatBalloonModel()
modelWidth = self.getChatBalloonWidth()
modelHeight = self.getChatBalloonHeight()
self.drawChatBalloon(model, modelWidth, modelHeight)
nodePath = self.chatBalloon.textNodePath
left, right, bottom, top = self.chatTextNode.getFrameActual()
elif self.panel is not None:
nodePath = self.textNodePath
left, right, bottom, top = self.textNode.getFrameActual()
# Compensate for the arrow:
bottom -= self.ARROW_SCALE
else:
return
if self.cell in base.bottomCells:
# Move the origin to the bottom center of the node path:
origin = self.contents.getRelativePoint(
nodePath, ((left+right) / 2.0, 0, bottom))
elif self.cell in base.leftCells:
# Move the origin to the left center of the node path:
origin = self.contents.getRelativePoint(
nodePath, (left, 0, (bottom+top) / 2.0))
elif self.cell in base.rightCells:
# Move the origin to the right center of the node path:
origin = self.contents.getRelativePoint(
nodePath, (right, 0, (bottom+top) / 2.0))
self.contents.setPos(self.contents, -origin)
| mit | 4,598,167,220,696,883,700 | 33.805732 | 94 | 0.620093 | false |
lucastheis/cmt | code/cmt/python/tests/stm_test.py | 1 | 8330 | import sys
import unittest
from numpy import *
from numpy import max, min
from numpy.random import *
from pickle import dump, load
from tempfile import mkstemp
from cmt.models import STM, GLM, Bernoulli, Poisson
from cmt.nonlinear import LogisticFunction, ExponentialFunction
from scipy.stats import norm
class Tests(unittest.TestCase):
def test_basics(self):
dim_in_nonlinear = 10
dim_in_linear = 8
num_components = 7
num_features = 50
num_samples = 100
# create model
stm = STM(dim_in_nonlinear, dim_in_linear, num_components, num_features)
# generate output
input_nonlinear = randint(2, size=[dim_in_nonlinear, num_samples])
input_linear = randint(2, size=[dim_in_linear, num_samples])
input = vstack([input_nonlinear, input_linear])
output = stm.sample(input)
loglik = stm.loglikelihood(input, output)
# check hyperparameters
self.assertEqual(stm.dim_in, dim_in_linear + dim_in_nonlinear)
self.assertEqual(stm.dim_in_linear, dim_in_linear)
self.assertEqual(stm.dim_in_nonlinear, dim_in_nonlinear)
self.assertEqual(stm.num_components, num_components)
self.assertEqual(stm.num_features, num_features)
# check parameters
self.assertEqual(stm.biases.shape[0], num_components)
self.assertEqual(stm.biases.shape[1], 1)
self.assertEqual(stm.weights.shape[0], num_components)
self.assertEqual(stm.weights.shape[1], num_features)
self.assertEqual(stm.features.shape[0], dim_in_nonlinear)
self.assertEqual(stm.features.shape[1], num_features)
self.assertEqual(stm.predictors.shape[0], num_components)
self.assertEqual(stm.predictors.shape[1], dim_in_nonlinear)
self.assertEqual(stm.linear_predictor.shape[0], dim_in_linear)
self.assertEqual(stm.linear_predictor.shape[1], 1)
# check dimensionality of output
self.assertEqual(output.shape[0], 1)
self.assertEqual(output.shape[1], num_samples)
self.assertEqual(loglik.shape[0], 1)
self.assertEqual(loglik.shape[1], num_samples)
def test_sample(self):
q = 0.92
N = 10000
stm = STM(0, 0, 1, 1)
stm.biases = [log(q / (1. - q))]
x = mean(stm.sample(empty([0, N]))) - q
p = 2. - 2. * norm.cdf(abs(x), scale=sqrt(q * (1. - q) / N))
# should fail in about 1/1000 tests, but not more
self.assertGreater(p, 0.0001)
def test_train(self):
stm = STM(8, 4, 4, 10)
parameters = stm._parameters()
stm.train(
randint(2, size=[stm.dim_in, 2000]),
randint(2, size=[stm.dim_out, 2000]),
parameters={
'verbosity': 0,
'max_iter': 0,
})
# parameters should not have changed
self.assertLess(max(abs(stm._parameters() - parameters)), 1e-20)
def callback(i, stm):
callback.counter += 1
return
callback.counter = 0
max_iter = 10
stm.train(
randint(2, size=[stm.dim_in, 10000]),
randint(2, size=[stm.dim_out, 10000]),
parameters={
'verbosity': 0,
'max_iter': max_iter,
'threshold': 0.,
'batch_size': 1999,
'callback': callback,
'cb_iter': 2,
})
self.assertEqual(callback.counter, max_iter / 2)
# test zero-dimensional nonlinear inputs
stm = STM(0, 5, 5)
glm = GLM(stm.dim_in_linear, LogisticFunction, Bernoulli)
glm.weights = randn(*glm.weights.shape)
input = randn(stm.dim_in_linear, 10000)
output = glm.sample(input)
stm.train(input, output, parameters={'max_iter': 20})
# STM should be able to learn GLM behavior
self.assertAlmostEqual(glm.evaluate(input, output), stm.evaluate(input, output), 1)
# test zero-dimensional inputs
stm = STM(0, 0, 10)
input = empty([0, 10000])
output = rand(1, 10000) < 0.35
stm.train(input, output)
self.assertLess(abs(mean(stm.sample(input)) - mean(output)), 0.1)
def test_gradient(self):
stm = STM(5, 2, 10)
stm.sharpness = 1.5
# choose random parameters
stm._set_parameters(randn(*stm._parameters().shape) / 100.)
err = stm._check_gradient(
randn(stm.dim_in, 1000),
randint(2, size=[stm.dim_out, 1000]),
1e-5,
parameters={'train_sharpness': True})
self.assertLess(err, 1e-8)
# test with regularization turned off
for param in ['biases', 'weights', 'features', 'pred', 'linear_predictor', 'sharpness']:
err = stm._check_gradient(
randn(stm.dim_in, 1000),
randint(2, size=[stm.dim_out, 1000]),
1e-6,
parameters={
'train_biases': param == 'biases',
'train_weights': param == 'weights',
'train_features': param == 'features',
'train_predictors': param == 'pred',
'train_linear_predictor': param == 'linear_predictor',
'train_sharpness': param == 'sharpness',
})
self.assertLess(err, 1e-7)
# test with regularization turned on
for norm in [b'L1', b'L2']:
for param in ['priors', 'weights', 'features', 'pred', 'input_bias', 'output_bias']:
err = stm._check_gradient(
randint(2, size=[stm.dim_in, 1000]),
randint(2, size=[stm.dim_out, 1000]),
1e-7,
parameters={
'train_prior': param == 'priors',
'train_weights': param == 'weights',
'train_features': param == 'features',
'train_predictors': param == 'pred',
'train_input_bias': param == 'input_bias',
'train_output_bias': param == 'output_bias',
'regularize_biases': {'strength': 0.6, 'norm': norm},
'regularize_features': {'strength': 0.6, 'norm': norm},
'regularize_predictors': {'strength': 0.6, 'norm': norm},
'regularize_weights': {'strength': 0.6, 'norm': norm},
})
self.assertLess(err, 1e-6)
self.assertFalse(any(isnan(
stm._parameter_gradient(
randint(2, size=[stm.dim_in, 1000]),
randint(2, size=[stm.dim_out, 1000]),
stm._parameters()))))
def test_glm_data_gradient(self):
models = []
models.append(
STM(
dim_in_nonlinear=5,
dim_in_linear=0,
num_components=3,
num_features=2,
nonlinearity=LogisticFunction,
distribution=Bernoulli))
models.append(
STM(
dim_in_nonlinear=5,
dim_in_linear=0,
num_components=3,
num_features=2,
nonlinearity=ExponentialFunction,
distribution=Poisson))
models.append(
STM(
dim_in_nonlinear=2,
dim_in_linear=3,
num_components=3,
num_features=2,
nonlinearity=LogisticFunction,
distribution=Bernoulli))
models.append(
STM(
dim_in_nonlinear=2,
dim_in_linear=3,
num_components=4,
num_features=0,
nonlinearity=LogisticFunction,
distribution=Bernoulli))
models.append(
STM(
dim_in_nonlinear=0,
dim_in_linear=3,
num_components=2,
num_features=0,
nonlinearity=LogisticFunction,
distribution=Bernoulli))
for stm in models:
stm.sharpness = .5 + rand()
x = randn(stm.dim_in, 100)
y = stm.sample(x)
dx, _, ll = stm._data_gradient(x, y)
h = 1e-7
# compute numerical gradient
dx_ = zeros_like(dx)
for i in range(stm.dim_in):
x_p = x.copy()
x_m = x.copy()
x_p[i] += h
x_m[i] -= h
dx_[i] = (
stm.loglikelihood(x_p, y) -
stm.loglikelihood(x_m, y)) / (2. * h)
self.assertLess(max(abs(ll - stm.loglikelihood(x, y))), 1e-8)
self.assertLess(max(abs(dx_ - dx)), 1e-7)
def test_poisson(self):
stm = STM(5, 5, 3, 10, ExponentialFunction, Poisson)
# choose random parameters
stm._set_parameters(randn(*stm._parameters().shape) / 100.)
err = stm._check_gradient(
randn(stm.dim_in, 1000),
randint(2, size=[stm.dim_out, 1000]), 1e-5)
self.assertLess(err, 1e-8)
def test_pickle(self):
stm0 = STM(5, 10, 4, 21)
tmp_file = mkstemp()[1]
# store model
with open(tmp_file, 'wb') as handle:
dump({'stm': stm0}, handle)
# load model
with open(tmp_file, 'rb') as handle:
stm1 = load(handle)['stm']
# make sure parameters haven't changed
self.assertEqual(stm0.dim_in, stm1.dim_in)
self.assertEqual(stm0.dim_in_nonlinear, stm1.dim_in_nonlinear)
self.assertEqual(stm0.dim_in_linear, stm1.dim_in_linear)
self.assertEqual(stm0.num_components, stm1.num_components)
self.assertEqual(stm0.num_features, stm1.num_features)
self.assertLess(max(abs(stm0.biases - stm1.biases)), 1e-20)
self.assertLess(max(abs(stm0.weights - stm1.weights)), 1e-20)
self.assertLess(max(abs(stm0.features - stm1.features)), 1e-20)
self.assertLess(max(abs(stm0.predictors - stm1.predictors)), 1e-20)
self.assertLess(max(abs(stm0.linear_predictor - stm1.linear_predictor)), 1e-20)
if __name__ == '__main__':
unittest.main()
| mit | -6,274,521,801,595,028,000 | 25.957929 | 90 | 0.655582 | false |
d0c-s4vage/pipless | docs/conf.py | 1 | 9492 | # -*- coding: utf-8 -*-
#
# pipless documentation build configuration file, created by
# sphinx-quickstart on Fri Sep 2 01:39:02 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
import sphinx_rtd_theme
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pipless'
copyright = u'2016, James "doc_s4vage" Johnson'
author = u'James "doc_s4vage" Johnson'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.1.1'
# The full version, including alpha/beta/rc tags.
release = u'0.1.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'piplessdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pipless.tex', u'pipless Documentation',
u'James "doc\\_s4vage" Johnson', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pipless', u'pipless Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pipless', u'pipless Documentation',
author, 'pipless', 'pipless - automagically uses virtual environments, installs missing packages on import, and generates frozen requirements.txt',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mit | 6,915,282,151,394,304,000 | 31.731034 | 150 | 0.70807 | false |
tommyogden/maxwellbloch | setup.py | 1 | 2118 | """MaxwellBloch
MaxwellBloch is a Python package for solving the coupled Maxwell-Bloch equations
describing the nonlinear propagation of near-resonant light through thermal
atomic vapours.
"""
import os
import textwrap
from setuptools import setup, find_packages
import subprocess
DESCRIPTION = "A Python package for solving the Maxwell-Bloch equations."
LONG_DESCRIPTION = ("MaxwellBloch is a Python package for solving the coupled "
"Maxwell-Bloch equations describing the nonlinear propagation of "
"near-resonant light through thermal atomic vapours.")
def git_short_hash():
""" Returns the short hash of the latest git commit as a string. """
git_str = subprocess.check_output(['git', 'log', '-1',
'--format=%h']).decode('UTF-8').strip()
return git_str
version = "no_version"
if "TRAVIS_TAG" in os.environ:
# Versions are of the pattern 'vX.Y.X'
version = os.environ.get("TRAVIS_TAG", "")[1:]
elif "TRAVIS_COMMIT" in os.environ:
version = os.environ.get("TRAVIS_COMMIT", "")[:8]
else:
version = git_short_hash()
def write_version_module(version_path='maxwellbloch/version.py'):
""" Write a version module with the current version."""
# Remove if already exists
if os.path.exists(version_path):
os.remove(version_path)
version_str = textwrap.dedent("""\
# This file is generated by setup.py
VERSION = '{!s}'
""".format(version))
f = open(version_path, 'w')
try:
f.write(version_str)
finally:
f.close()
write_version_module()
setup(name='MaxwellBloch',
version=version,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
url='http://github.com/tommyogden/maxwellbloch',
author='Thomas P Ogden',
author_email='[email protected]',
license='MIT',
packages=find_packages(),
package_data={'maxwellbloch.tests': ['json/*.json']},
install_requires=['qutip'],
scripts=['bin/make-mp4-fixed-frame.py',
'bin/make-mp4-fixed-frame-2-fields.py',
'bin/make-gif-ffmpeg.sh'],
zip_safe=False)
| mit | -6,282,543,788,265,770,000 | 29.695652 | 80 | 0.660057 | false |
signalfx/maestro-ng | maestro/entities.py | 1 | 35993 | # Copyright (C) 2013-2014 SignalFuse, Inc.
# Copyright (C) 2015-2019 SignalFx, Inc.
#
# Docker container orchestration utility.
import bgtunnel
import collections
import datetime
import time
import os
# Import _strptime manually to work around a thread safety issue when using
# strptime() from threads for the first time.
import _strptime # noqa
import docker
try:
from docker.errors import APIError
except ImportError:
# Fall back to <= 0.3.1 location
from docker.client import APIError
try:
from docker.types import LogConfig
except ImportError:
# Fall bock to <= 1.10 location
from docker.utils.types import LogConfig
import multiprocessing.pool
import re
import six
# For Python bug workaround
import threading
import weakref
from . import environment
from . import exceptions
from . import lifecycle
# Valid syntax for port spec definitions
_PORT_SPEC_REGEX = re.compile(r'^(?P<p1>\d+)(?:-(?P<p2>\d+))?(?:/(?P<proto>(tcp|udp)))?$') # noqa
_DEFAULT_PORT_PROTOCOL = 'tcp'
# Possible values for the restart policy type.
_VALID_RESTART_POLICIES = ['no', 'always', 'on-failure', 'unless-stopped']
class Entity:
"""Base class for named entities in the orchestrator."""
def __init__(self, name):
self._name = name
@property
def name(self):
"""Get the name of this entity."""
return self._name
def __repr__(self):
return self._name
class Ship(Entity):
"""A Ship that can host and run Containers.
Ships are hosts in the infrastructure. A Docker daemon is expected to be
running on each ship, providing control over the containers that will be
executed there.
"""
DEFAULT_DOCKER_PORT = 2375
DEFAULT_DOCKER_TLS_PORT = 2376
DEFAULT_API_VERSION = 1.18
DEFAULT_DOCKER_TIMEOUT = 5
def __init__(self, name, ip, endpoint=None, docker_port=None,
socket_path=None, api_version=None, timeout=None,
ssh_tunnel=None, tls=None, tls_verify=False,
tls_ca_cert=None, tls_cert=None, tls_key=None,
ssl_version=None):
"""Instantiate a new ship.
Args:
name (string): the name of the ship.
ip (string): the IP address of resolvable host name of the host.
docker_port (int): the port the Docker daemon listens on.
socket_path (string): the path to the unix socket the Docker
daemon listens on.
api_version (string): the API version of the Docker daemon.
ssh_tunnel (dict): configuration for SSH tunneling to the remote
Docker daemon.
"""
Entity.__init__(self, name)
self._ip = ip
self._endpoint = endpoint or ip
self._docker_port = int(
docker_port or
(self.DEFAULT_DOCKER_TLS_PORT if tls
else self.DEFAULT_DOCKER_PORT))
self._socket_path = os.path.realpath(socket_path) \
if socket_path else None
self._tunnel = None
if ssh_tunnel:
if 'user' not in ssh_tunnel:
raise exceptions.EnvironmentConfigurationException(
'Missing SSH user for ship {} tunnel configuration'.format(
self.name))
if 'key' not in ssh_tunnel:
raise exceptions.EnvironmentConfigurationException(
'Missing SSH key for ship {} tunnel configuration'.format(
self.name))
self._tunnel = bgtunnel.open(
ssh_address=self._endpoint,
ssh_user=ssh_tunnel['user'],
ssh_port=int(ssh_tunnel.get('port', 22)),
host_port=self._docker_port,
silent=True,
identity_file=ssh_tunnel['key'])
# Make sure we use https through the tunnel, if tls is enabled
proto = "https" if (tls or tls_verify) else "http"
self._backend_url = '{:s}://localhost:{:d}'.format(
proto, self._tunnel.bind_port)
# Apparently bgtunnel isn't always ready right away and this
# drastically cuts down on the timeouts
time.sleep(1)
elif self._socket_path is not None:
self._backend_url = 'unix://{:s}'.format(self._socket_path)
else:
proto = "https" if (tls or tls_verify) else "http"
self._backend_url = '{:s}://{:s}:{:d}'.format(
proto, self._endpoint, self._docker_port)
self._tls = docker.tls.TLSConfig(
verify=tls_verify,
client_cert=(tls_cert, tls_key),
ca_cert=tls_ca_cert,
ssl_version=ssl_version) if tls else None
self._backend = docker.Client(
base_url=self._backend_url,
version=str(api_version or Ship.DEFAULT_API_VERSION),
timeout=timeout or Ship.DEFAULT_DOCKER_TIMEOUT,
tls=self._tls)
@property
def ip(self):
"""Returns this ship's IP address or hostname."""
return self._ip
@property
def endpoint(self):
"""Returns this ship's Docker endpoint IP address or hostname."""
return self._endpoint
@property
def backend(self):
"""Returns the Docker client wrapper to talk to the Docker daemon on
this host."""
return self._backend
def address(self, use_ip=False):
name = self.ip if use_ip else self.name
if self._tunnel:
return '{} (ssh:{})'.format(name, self._tunnel.bind_port)
return name
def get_image_ids(self):
"""Returns a dictionary of tagged images available on the Docker daemon
running on this ship."""
images = {}
for image in self._backend.images():
tags = image.get('RepoTags', [])
if not tags or tags == '<none>:<none>':
continue
for tag in image['RepoTags']:
images[tag] = image['Id']
return images
def __repr__(self):
if self._tunnel:
return '{}@{} via ssh://{}@{}:{}->{}'.format(
self.name, self._ip, self._tunnel.ssh_user,
self._endpoint, self._tunnel.bind_port, self._docker_port)
return '{}@{} via {}'.format(self.name, self._ip, self._backend_url)
class Service(Entity):
"""A Service is a collection of Containers running on one or more Ships
that constitutes a logical grouping of containers that make up an
infrastructure service.
Services may depend on each other. This dependency tree is honored when
services need to be started.
"""
def __init__(self, name, image, omit=True, env=None, envfile=None,
maestro_schema=None, maestro_env_name='local',
maestro_env_base=None, lifecycle=None, limits=None,
ports=None):
"""Instantiate a new named service/component of the platform using a
given Docker image.
By default, a service has no dependencies. Dependencies are resolved
and added once all Service objects have been instantiated.
Args:
name (string): the name of this service.
image (string): the name of the Docker image the instances of this
service should use.
omit (boolean): Whether to include this service in no-argument
commands or omit it.
env (dict): a dictionary of environment variables to use as the
base environment for all instances of this service.
envfile (string or list): filename, or list of filenames, to
environment files for all instances of this service. Explicit
environment from 'env' take precedence over the contents of
those files.
maestro_schema (dict): Maestro schema versioning information.
maestro_env_name (string): name of the Maestro environment.
maestro_env_base (string): base path of the Maestro environment.
lifecycle (dict): a dictionary of lifecycle checks configurations.
limits (dict): a dictionary of service limits.
ports (dict): a dictionary of service ports.
"""
Entity.__init__(self, name)
self._image = image
self._omit = omit
self._schema = maestro_schema
try:
self._env = environment.build(maestro_env_base, envfile, env, {
'MAESTRO_ENVIRONMENT_NAME': maestro_env_name,
'SERVICE_NAME': self.name,
})
except ValueError:
raise exceptions.EnvironmentConfigurationException(
'Invalid environment configuration for service {}'
.format(name))
self._lifecycle = lifecycle or {}
self._limits = limits or {}
self._ports = ports or {}
self._requires = set([])
self._wants_info = set([])
self._needed_for = set([])
self._containers = {}
@property
def image(self):
return self._image
@property
def omit(self):
return self._omit
@property
def env(self):
return self._env
@property
def lifecycle(self):
return self._lifecycle
@property
def limits(self):
return self._limits
@property
def ports(self):
return self._ports
@property
def dependencies(self):
return self._requires
@property
def requires(self):
"""Returns the full set of direct and indirect dependencies of this
service."""
dependencies = self._requires
for dep in dependencies:
dependencies = dependencies.union(dep.requires)
return dependencies
@property
def wants_info(self):
"""Returns the full set of "soft" dependencies this service wants
information about through link environment variables."""
return self._wants_info
@property
def needed_for(self):
"""Returns the full set of direct and indirect dependents (aka services
that depend on this service)."""
dependents = self._needed_for
for dep in dependents:
dependents = dependents.union(dep.needed_for)
return dependents
@property
def containers(self):
"""Return an ordered list of instance containers for this service, by
instance name."""
return map(lambda c: self._containers[c],
sorted(self._containers.keys()))
def add_dependency(self, service):
"""Declare that this service depends on the passed service."""
self._requires.add(service)
def add_dependent(self, service):
"""Declare that the passed service depends on this service."""
self._needed_for.add(service)
def add_wants_info(self, service):
"""Declare that this service wants information about the passed service
via link environment variables."""
self._wants_info.add(service)
def register_container(self, container):
"""Register a new instance container as part of this service."""
self._containers[container.name] = container
def get_link_variables(self, add_internal=False):
"""Return the dictionary of all link variables from each container of
this service. An additional variable, named '<service_name>_INSTANCES',
contain the list of container/instance names of the service."""
basename = re.sub(r'[^\w]', '_', self.name).upper()
links = {}
for c in self._containers.values():
for name, value in c.get_link_variables(add_internal).items():
links['{}_{}'.format(basename, name)] = value
links['{}_INSTANCES'.format(basename)] = \
','.join(sorted(self._containers.keys()))
return links
class Container(Entity):
"""A Container represents an instance of a particular service that will be
executed inside a Docker container on its target ship/host."""
def __init__(self, ships, name, service, config=None, maestro_schema=None,
maestro_env_base=None):
"""Create a new Container object.
Args:
ships (dict): the dictionary of all defined ships in the
environment.
name (string): the instance name (should be unique).
service (Service): the Service this container is an instance of.
config (dict): the YAML-parsed dictionary containing this
instance's configuration (ports, environment, volumes, etc.)
maestro_schema (dict): Maestro schema versioning information.
maestro_env_base (string): base path of the Maestro environment.
"""
Entity.__init__(self, name)
config = config or {}
self._status = None # The container's status, cached.
self._ship = ships[config['ship']]
self._service = service
self._image = config.get('image', service.image)
self._schema = maestro_schema
# Register this instance container as being part of its parent service.
self._service.register_container(self)
# Get command
# TODO(mpetazzoni): remove deprecated 'cmd' support
self.command = config.get('command', config.get('cmd'))
# Parse the port specs.
self.ports = self._parse_ports(
dict(self.service.ports, **config.get('ports', {})))
# Gather environment variables.
try:
self.env = environment.build(
maestro_env_base, service.env,
config.get('envfile', []),
config.get('env', {}), {
# Seed the service name, container name and host address as
# part of the container's environment.
'CONTAINER_NAME': self.name,
'CONTAINER_HOST_ADDRESS': self.ship.ip,
'DOCKER_IMAGE': self.image,
'DOCKER_TAG': self.get_image_details()['tag'],
})
except ValueError:
raise exceptions.EnvironmentConfigurationException(
'Invalid environment configuration for container {}'
.format(name))
self.volumes = self._parse_volumes(config.get('volumes', {}))
self.container_volumes = config.get('container_volumes', [])
if type(self.container_volumes) != list:
self.container_volumes = [self.container_volumes]
self.container_volumes = set(self.container_volumes)
# Check for conflicts
for volume in self.volumes.values():
if volume['bind'] in self.container_volumes:
raise exceptions.InvalidVolumeConfigurationException(
'Conflict in {} between bind-mounted volume '
'and container-only volume on {}'
.format(self.name, volume['bind']))
# Contains the list of containers from which volumes should be mounted
# in this container. Host-locality and volume conflicts are checked by
# the conductor.
self.volumes_from = config.get('volumes_from', [])
if type(self.volumes_from) != list:
self.volumes_from = [self.volumes_from]
self.volumes_from = set(self.volumes_from)
# Get links
self.links = dict(
(name, alias) for name, alias in
config.get('links', {}).items())
# User name to use inside the container
self.username = config.get('username', None)
# Should this container run with -privileged?
self.privileged = config.get('privileged', False)
# Add or drop privileges
self.cap_add = config.get('cap_add', None)
self.cap_drop = config.get('cap_drop', None)
# Add extra hosts
self.extra_hosts = self._parse_extra_hosts(
ships, config.get('extra_hosts'))
# Network mode
self.network_mode = config.get('net')
# Restart policy
self.restart_policy = self._parse_restart_policy(config.get('restart'))
# DNS settings for the container, always as a list
self.dns = config.get('dns')
if isinstance(self.dns, six.string_types):
self.dns = [self.dns]
# Stop timeout
self.stop_timeout = config.get('stop_timeout', 10)
# Get limits
limits = dict(self.service.limits, **config.get('limits', {}))
self.cpu_shares = limits.get('cpu')
self.mem_limit = self._parse_bytes(limits.get('memory'))
self.memswap_limit = self._parse_bytes(limits.get('swap'))
# Get logging config.
self.log_config = self._parse_log_config(
config.get('log_driver'), config.get('log_opt'))
# Additional LXC configuration options. See the LXC documentation for a
# reference of the available settings. Those are only supported if the
# remote Docker daemon uses the lxc execution driver.
self.lxc_conf = config.get('lxc_conf', {})
# Work directory for the container
self.workdir = config.get('workdir')
# Reformat port structure
ports = collections.defaultdict(list) if self.ports else None
if ports is not None:
for port in self.ports.values():
ports[port['exposed']].append(
(port['external'][0], port['external'][1].split('/')[0]))
# Security options
self.security_opt = config.get('security_opt')
# Ulimits options
self.ulimits = self._parse_ulimits(config.get('ulimits', None))
# Container labels; may be a dictionary or a list
self.labels = config.get('labels', None)
if self.labels is not None and type(self.labels) not in [list, dict]:
raise exceptions.EnvironmentConfigurationException(
('Invalid labels configuration for container {}; '
'must be a list or mapping! ').format(self.name))
# host_config now contains all settings previously passed in container
# start().
self.host_config = self._ship.backend.create_host_config(
log_config=self.log_config,
mem_limit=self.mem_limit,
memswap_limit=self.memswap_limit,
binds=self.volumes,
port_bindings=ports,
lxc_conf=self.lxc_conf,
privileged=self.privileged,
cap_add=self.cap_add,
cap_drop=self.cap_drop,
extra_hosts=self.extra_hosts,
network_mode=self.network_mode,
restart_policy=self.restart_policy,
dns=self.dns,
links=self.links,
ulimits=self.ulimits,
volumes_from=list(self.volumes_from),
security_opt=self.security_opt)
# With everything defined, build lifecycle state helpers as configured
lifecycle = dict(self.service.lifecycle)
for state, checks in config.get('lifecycle', {}).items():
if state not in lifecycle:
lifecycle[state] = []
lifecycle[state].extend(checks)
self._lifecycle = self._parse_lifecycle(lifecycle)
@property
def ship(self):
"""Returns the Ship this container runs on."""
return self._ship
@property
def service(self):
"""Returns the Service this container is an instance of."""
return self._service
@property
def id(self):
"""Returns the ID of this container given by the Docker daemon, or None
if the container doesn't exist."""
status = self.status()
return status and status.get('ID', status.get('Id', None))
@property
def shortid(self):
"""Returns a short representation of this container's ID, or '-' if the
container is not running."""
return self.id[:7] if self.id else '-'
def is_running(self):
"""Refreshes the status of this container and tells if it's running or
not."""
status = self.status(refresh=True)
return status and status['State']['Running']
def is_down(self):
"""Returns True if this container isn't defined or is not running."""
return not self.is_running()
@property
def image(self):
"""Return the full name and tag of the image used by instances of this
service."""
return self._image
@property
def short_image(self):
"""Return the abbreviated name (stripped of its registry component,
when present) of the image used by this service."""
return self._image[self._image.find('/')+1:]
def get_image_details(self, image=None):
"""Return a dictionary detailing the image used by this service, with
its repository name and the requested tag (defaulting to latest if not
specified)."""
image = image or self._image
p = image.rsplit(':', 1)
if len(p) > 1 and '/' in p[1]:
p[0] = image
p.pop()
return {'repository': p[0], 'tag': len(p) > 1 and p[1] or 'latest'}
@property
def shortid_and_tag(self):
"""Returns a string representing the tag of the image this container
runs on and the short ID of the running container."""
status = self.status()
image = status and status['Config']['Image']
tag = self.get_image_details(image)['tag']
return '{}:{}'.format(tag, self.shortid)
@property
def started_at(self):
"""Returns the time at which the container was started."""
status = self.status()
return status and self._parse_go_time(status['State']['StartedAt'])
@property
def finished_at(self):
"""Returns the time at which the container finished executing."""
status = self.status()
return status and self._parse_go_time(status['State']['FinishedAt'])
@property
def hostname(self):
"""Returns a hostname for the container, or None if the container has a
defined network mode."""
return self.name if not self.network_mode else None
def status(self, refresh=False):
"""Retrieve the details about this container from the Docker daemon, or
None if the container doesn't exist."""
if refresh or not self._status:
try:
self._status = self.ship.backend.inspect_container(self.name)
except APIError:
pass
return self._status
def get_volumes(self):
"""Returns all the declared local volume targets within this container.
This does not includes volumes from other containers."""
volumes = set(self.container_volumes)
for volume in self.volumes.values():
volumes.add(volume['bind'])
return volumes
def get_link_variables(self, add_internal=False):
"""Build and return a dictionary of environment variables providing
linking information to this container.
Variables are named
'<service_name>_<container_name>_{HOST,PORT,INTERNAL_PORT}'.
"""
def _to_env_var_name(n):
return re.sub(r'[^\w]', '_', n).upper()
def _port_number(p):
return p.split('/')[0]
basename = _to_env_var_name(self.name)
links = {'{}_HOST'.format(basename): self.ship.ip}
for name, spec in self.ports.items():
links['{}_{}_PORT'.format(basename, _to_env_var_name(name))] = \
_port_number(spec['external'][1])
if add_internal:
links['{}_{}_INTERNAL_PORT'.format(
basename, _to_env_var_name(name))] = \
_port_number(spec['exposed'])
return links
def start_lifecycle_checks(self, state):
"""Check if a particular lifecycle state has been reached by executing
all its defined checks. If not checks are defined, it is assumed the
state is reached immediately."""
if state not in self._lifecycle:
# Return None to indicate no checks were performed.
return None
# HACK: Workaround for Python bug #10015 (also #14881). Fixed in
# Python >= 2.7.5 and >= 3.3.2.
thread = threading.current_thread()
if not hasattr(thread, "_children"):
thread._children = weakref.WeakKeyDictionary()
pool = multiprocessing.pool.ThreadPool()
return pool.map_async(lambda check: check.test(self),
self._lifecycle[state])
def ping_port(self, port):
"""Ping a single port, by its given name in the port mappings. Returns
True if the port is opened and accepting connections, False
otherwise."""
parts = self.ports[port]['external'][1].split('/')
if parts[1] == 'udp':
return False
return lifecycle.TCPPortPinger(self.ship.ip, int(parts[0]), 1).test()
def _parse_bytes(self, s):
if not s or not isinstance(s, six.string_types):
return s
units = {'k': 1024,
'm': 1024*1024,
'g': 1024*1024*1024}
suffix = s[-1].lower()
if suffix not in units.keys():
if not s.isdigit():
raise exceptions.EnvironmentConfigurationException(
'Unknown unit suffix {} in {} for container {}!'
.format(suffix, s, self.name))
return int(s)
return int(s[:-1]) * units[suffix]
def _parse_restart_policy(self, spec):
"""Parse the restart policy configured for this container.
Args:
spec: the restart policy specification, as extract from the YAML.
It can be a string <name>:<max-retries>, or a dictionary with the
name and retries for the restart policy.
Returns: A Docker-ready dictionary representing the parsed restart
policy.
"""
def _make_policy(name='no', retries=0):
if name not in _VALID_RESTART_POLICIES:
raise exceptions.InvalidRestartPolicyConfigurationException(
('Invalid restart policy {} for container {}; '
'choose one of {}.').format(
name, self.name,
', '.join(_VALID_RESTART_POLICIES)))
return {'Name': name, 'MaximumRetryCount': int(retries)}
try:
if isinstance(spec, six.string_types):
return _make_policy(*spec.split(':', 1))
elif type(spec) == dict:
return _make_policy(**spec)
except exceptions.InvalidRestartPolicyConfigurationException:
raise
except Exception:
raise exceptions.InvalidRestartPolicyConfigurationException(
'Invalid restart policy format for container {}: "{}"'
.format(self.name, spec))
# Fall-back to default
return _make_policy()
def _parse_volumes(self, volumes):
"""Parse the volume bindings defined by this container's configuration.
Args:
volumes (dict): the configured volume mappings as extracted from
the YAML file.
Returns: A dictionary of bindings host -> binding spec, where the
binding spec specifies the target inside the container and its mode
(read-only or read-write) in docker-py's format.
"""
result = {}
def _parse_spec(src, spec):
# Short path for obsolete schemas
# TODO(mpetazzoni): remove when obsoleted
if self._schema == 1:
result[spec] = {'bind': src, 'ro': False}
return
if isinstance(spec, six.string_types):
result[src] = {'bind': spec}
elif type(spec) == dict and 'target' in spec:
result[src] = {'bind': spec['target'],
'mode': spec.get('mode', 'rw')}
else:
raise exceptions.InvalidVolumeConfigurationException(
'Invalid volume specification for container {}: {} -> {}'
.format(self.name, src, spec))
for src, spec in volumes.items():
_parse_spec(src, spec)
return result
def _parse_log_config(self, log_driver, log_opt):
""" Parse the log config found in the container's configuration.
Args:
log_driver (enum): Should be a valid value as defined by
docker/docker-py, e.g. json-file, syslog, none.
log_opt (dict): Should be a valid dictionary with additional log
driver settings. Values are not interpreted.
Returns: A dictionary that can be passed to to docker-py via the
host_config.LogConfig variable.
"""
if log_driver:
if log_driver not in LogConfig.types._values:
raise exceptions.InvalidLogConfigurationException(
"log_driver must be one of ({0})"
.format(', '.join(LogConfig.types._values)))
if log_opt and not type(log_opt) == dict:
raise exceptions.InvalidLogConfigurationException(
"log_opt must be a dictionary")
if log_opt:
log_opt = dict((k, str(v)) for k, v in log_opt.items())
return {"Type": log_driver, "Config": log_opt}
else:
return {"Type": log_driver}
return None
def _parse_go_time(self, s):
"""Parse a time string found in the container status into a Python
datetime object.
Docker uses Go's Time.String() method to convert a UTC timestamp into a
string, but that representation isn't directly parsable from Python as
it includes nanoseconds: http://golang.org/pkg/time/#Time.String
We don't really care about sub-second precision here anyway, so we
strip it out and parse the datetime up to the second.
Args:
s (string): the time string from the container inspection
dictionary.
Returns: The corresponding Python datetime.datetime object, or None if
the time string clearly represented a non-initialized time (which
seems to be 0001-01-01T00:00:00Z in Go).
"""
if not s:
return None
t = datetime.datetime.strptime(s[:-1].split('.')[0],
'%Y-%m-%dT%H:%M:%S')
return t if t.year > 1 else None
def _parse_ports(self, ports):
"""Parse port mapping specifications for this container."""
def parse_port_spec(spec):
if type(spec) == int:
spec = str(spec)
m = _PORT_SPEC_REGEX.match(spec)
if not m:
raise exceptions.InvalidPortSpecException(
('Invalid port specification {}! '
'Expected format is <port>, <p1>-<p2> '
'or <port>/{{tcp,udp}}.').format(spec))
s = m.group('p1')
if m.group('p2'):
s += '-' + m.group('p2')
proto = m.group('proto') or _DEFAULT_PORT_PROTOCOL
s += '/' + proto
return s
result = {}
for name, spec in ports.items():
# Single number, interpreted as being a TCP port number and to be
# the same for the exposed port and external port bound on all
# interfaces.
if type(spec) == int:
result[name] = {
'exposed': parse_port_spec(spec),
'external': ('0.0.0.0', parse_port_spec(spec)),
}
# Port spec is a string. This means either a protocol was specified
# with /tcp or /udp, that a port range was specified, or that a
# mapping was provided, with each side of the mapping optionally
# specifying the protocol.
# External port is assumed to be bound on all interfaces as well.
elif type(spec) == str:
parts = list(map(parse_port_spec, spec.split(':')))
if len(parts) == 1:
# If only one port number is provided, assumed external =
# exposed.
parts.append(parts[0])
elif len(parts) > 2:
raise exceptions.InvalidPortSpecException(
('Invalid port spec {} for port {} of {}! ' +
'Format should be "name: external:exposed".').format(
spec, name, self))
if parts[0][-4:] != parts[1][-4:]:
raise exceptions.InvalidPortSpecException(
'Mismatched protocols between {} and {}!'.format(
parts[0], parts[1]))
result[name] = {
'exposed': parts[0],
'external': ('0.0.0.0', parts[1]),
}
# Port spec is fully specified.
elif type(spec) == dict and \
'exposed' in spec and 'external' in spec:
spec['exposed'] = parse_port_spec(spec['exposed'])
if type(spec['external']) != list:
spec['external'] = ('0.0.0.0', spec['external'])
spec['external'] = (spec['external'][0],
parse_port_spec(spec['external'][1]))
result[name] = spec
else:
raise exceptions.InvalidPortSpecException(
'Invalid port spec {} for port {} of {}!'.format(
spec, name, self))
return result
def _parse_lifecycle(self, lifecycles):
"""Parse the lifecycle checks configured for this container and
instantiate the corresponding check helpers, as configured."""
checkers = {}
for state, checks in lifecycles.items():
if not type(checks) == list:
raise exceptions.InvalidLifecycleCheckConfigurationException(
('Invalid {} lifecycle checks configuration; '
'expected list of checks, got {}!')
.format(state, type(checks)))
checkers[state] = list(
map(lambda c: (lifecycle.LifecycleHelperFactory
.from_config(self, c)), checks))
return checkers
def _parse_ulimits(self, ulimits):
"""Parse ulimits"""
if ulimits is None:
return None
result = []
for name, value in ulimits.items():
ulimit = {'name': name}
if isinstance(value, dict):
ulimit.update(value)
elif isinstance(value, int):
ulimit.update({'hard': value, 'soft': value})
else:
continue
result.append(ulimit)
return result
def _parse_extra_hosts(self, ships, hosts):
"""Parse extra hosts that will be added to /etc/hosts."""
if not hosts:
return None
result = {}
for name, value in hosts.items():
if isinstance(value, dict):
result[name] = ships[value['ship']].ip
elif isinstance(value, six.string_types):
result[name] = value
else:
continue
return result
def __repr__(self):
return '{} (on {})'.format(self.name, self.ship.name)
def __lt__(self, other):
return self.name < other.name
def __eq__(self, other):
return self.name == other.name
def __hash__(self):
return hash(self.name)
| apache-2.0 | 8,911,041,435,713,636,000 | 36.847529 | 97 | 0.572 | false |
percival-detector/odin-data | tools/python/odin_data/meta_listener_adapter.py | 1 | 7328 | import logging
from tornado import escape
from odin.adapters.adapter import ApiAdapterResponse, \
request_types, response_types
from odin_data.odin_data_adapter import OdinDataAdapter
class MetaListenerAdapter(OdinDataAdapter):
"""An OdinControl adapter for a MetaListener"""
def __init__(self, **kwargs):
logging.debug("MetaListenerAdapter init called")
# These are internal adapter parameters
self.acquisitionID = ""
self.acquisition_active = False
self.acquisitions = []
# These parameters are stored under an acquisition tree, so we need to
# parse out the parameters for the acquisition we have stored
self._readback_parameters = {}
self._set_defaults()
# These config parameters are buffered so they can be included whenever a new acquisition
# is created. This helps to abstract the idea of acquisitions being created and removed and
# means the client does not need to send things in a certain order.
self._config_parameters = {
"config/output_dir": "",
"config/flush": 100,
"config/file_prefix": ""
}
# Parameters must be created before base init called
super(MetaListenerAdapter, self).__init__(**kwargs)
self._client = self._clients[0] # We only have one client
def _set_defaults(self):
self.acquisitionID = ""
self._readback_parameters = {
"status/filename": "",
"status/num_processors": 0,
"status/writing": False,
"status/written": 0
}
def _map_acquisition_parameter(self, path):
"""Map acquisition parameter path string to full uri item list"""
# Replace the first slash with acquisitions/<acquisitionID>/
# E.g. status/filename -> status/acquisitions/<acquisitionID>/filename
full_path = path.replace(
"/", "/acquisitions/{}/".format(self.acquisitionID),
1 # First slash only
)
return full_path.split("/") # Return list of uri items
@request_types('application/json')
@response_types('application/json', default='application/json')
def get(self, path, request):
"""Implementation of the HTTP GET verb for MetaListenerAdapter
:param path: URI path of the GET request
:param request: Tornado HTTP request object
:return: ApiAdapterResponse object to be returned to the client
"""
status_code = 200
response = {}
logging.debug("GET path: %s", path)
logging.debug("GET request: %s", request)
if path == "config/acquisition_id":
response["value"] = self.acquisitionID
elif path == "status/acquisition_active":
response["value"] = self.acquisition_active
elif path == "config/acquisitions":
acquisition_tree = self.traverse_parameters(
self._clients[0].parameters,
["config", "acquisitions"]
)
if acquisition_tree is not None:
response["value"] = "," .join(acquisition_tree.keys())
else:
response["value"] = None
elif path in self._readback_parameters:
response["value"] = self._readback_parameters[path]
elif path in self._config_parameters:
response["value"] = self._config_parameters[path]
else:
return super(MetaListenerAdapter, self).get(path, request)
return ApiAdapterResponse(response, status_code=status_code)
@request_types('application/json')
@response_types('application/json', default='application/json')
def put(self, path, request):
"""
Implementation of the HTTP PUT verb for MetaListenerAdapter
:param path: URI path of the PUT request
:param request: Tornado HTTP request object
:return: ApiAdapterResponse object to be returned to the client
"""
logging.debug("PUT path: %s", path)
logging.debug("PUT request: %s", request)
logging.debug("PUT request.body: %s",
str(escape.url_unescape(request.body)))
value = str(escape.url_unescape(request.body)).replace('"', '')
if path == "config/acquisition_id":
self.acquisitionID = value
# Set inactive so process_updates doesn't clear acquisition ID
self.acquisition_active = False
# Send entire config with new acquisition ID
config = dict(
acquisition_id=self.acquisitionID,
output_dir=self._config_parameters["config/output_dir"],
flush=self._config_parameters["config/flush"],
file_prefix=self._config_parameters["config/file_prefix"]
)
status_code, response = self._send_config(config)
elif path == "config/stop":
self.acquisition_active = False
# By default we stop all acquisitions by passing None
config = {
"acquisition_id": None,
"stop": True
}
if self.acquisitionID:
# If we have an Acquisition ID then stop that one only
config["acquisition_id"] = self.acquisitionID
status_code, response = self._send_config(config)
self.acquisitionID = ""
elif path in self._config_parameters:
# Store config to re-send with acquisition ID when it is changed
self._config_parameters[path] = value
parameter = path.split("/", 1)[-1] # Remove 'config/'
config = {
"acquisition_id": self.acquisitionID,
parameter: value
}
status_code, response = self._send_config(config)
else:
return super(OdinDataAdapter, self).put(path, request)
return ApiAdapterResponse(response, status_code=status_code)
def _send_config(self, config_message):
status_code = 200
response = {}
try:
self._client.send_configuration(config_message)
except Exception as err:
logging.debug(OdinDataAdapter.ERROR_FAILED_TO_SEND)
logging.error("Error: %s", err)
status_code = 503
response = {"error": OdinDataAdapter.ERROR_FAILED_TO_SEND}
return status_code, response
def process_updates(self):
"""Handle additional background update loop tasks
Store a copy of all parameters so they don't disappear
"""
if self.acquisitionID:
acquisition_active = self.acquisitionID in self.traverse_parameters(
self._client.parameters, ["status", "acquisitions"]
)
if acquisition_active:
self.acquisition_active = True
for parameter in self._readback_parameters.keys():
value = self.traverse_parameters(
self._client.parameters,
self._map_acquisition_parameter(parameter)
)
self._readback_parameters[parameter] = value
else:
self.acquisition_active = False
else:
self._set_defaults()
| apache-2.0 | 4,103,719,632,012,855,000 | 37.978723 | 99 | 0.593068 | false |
Kronuz/pyXapiand | xapiand/search.py | 1 | 11782 | from __future__ import unicode_literals, absolute_import
import base64
import logging
import xapian
from . import json
from .core import get_slot, get_prefix, expand_terms, find_terms, DOCUMENT_CUSTOM_TERM_PREFIX
from .serialise import normalize, serialise_value
from .exceptions import XapianError
MAX_DOCS = 10000
class Search(object):
def __init__(self, database, search,
get_matches=True, get_data=True, get_terms=False, get_size=False,
data='.', log=logging, dead=False):
self.database = database
self.search = search
self.get_matches = get_matches
self.get_terms = get_terms
self.get_data = get_data
self.get_size = get_size
self.data = data
self.log = log
self.dead = dead
self.spies = {}
self.warnings = []
self.produced = 0
self.size = None
self.facets = self.search.get('facets')
self.check_at_least = self.search.get('check_at_least', MAX_DOCS if self.facets else 0)
self.maxitems = self.search.get('maxitems', MAX_DOCS)
self.first = self.search.get('first', 0)
self.setup()
def setup(self):
queryparser = xapian.QueryParser()
queryparser.set_database(self.database.database)
query = None
prefixes = set()
def add_prefixes(string):
for term, term_field, terms in find_terms(string):
if term_field and term_field not in prefixes:
prefix = get_prefix(term_field, DOCUMENT_CUSTOM_TERM_PREFIX)
if term_field.lower() == term_field:
queryparser.add_prefix(term_field, prefix)
else:
queryparser.add_boolean_prefix(term_field, prefix)
prefixes.add(term_field)
# Build final query:
search = self.search.get('search')
if search:
if not isinstance(search, (tuple, list)):
search = [search]
search = " AND ".join("(%s)" % s for s in search if s)
if search and search != '(*)':
search = normalize(search).encode('utf-8')
ranges = self.search.get('ranges')
if ranges:
_ranges = set()
for field, begin, end in ranges:
field = field.encode('utf-8')
if field not in _ranges:
slot = get_slot(field)
vrp = xapian.StringValueRangeProcessor(slot, field)
queryparser.add_valuerangeprocessor(vrp)
_ranges.add(field)
if begin is None:
begin = b''
if end is None:
end = b''
rng1 = b'(%s:%s..%s)' % (field, begin, end)
rng2 = b'(%s:%s..%s)' % (field, serialise_value(begin)[0], serialise_value(end)[0])
if rng1 == rng2:
_search = search
if rng1 in search:
search = None
else:
_search = search.replace(rng1, rng2)
if search != _search:
search = _search
else:
search += b' AND %s' % rng2
search = expand_terms(search)
add_prefixes(search)
flags = xapian.QueryParser.FLAG_DEFAULT | xapian.QueryParser.FLAG_WILDCARD | xapian.QueryParser.FLAG_PURE_NOT
try:
query = queryparser.parse_query(search, flags)
except (xapian.NetworkError, xapian.DatabaseError):
self.database.reopen()
queryparser.set_database(self.database.database)
query = queryparser.parse_query(search, flags)
partials = self.search.get('partials')
if partials:
if not isinstance(partials, (tuple, list)):
partials = [partials]
# Partials (for autocomplete) using FLAG_PARTIAL and OP_AND_MAYBE
partials_query = None
for partial in partials:
self.dead or 'alive' # Raises DeadException when needed
partial = normalize(partial)
partial = expand_terms(partial)
add_prefixes(partial)
flags = xapian.QueryParser.FLAG_PARTIAL
try:
_partials_query = queryparser.parse_query(partial, flags)
except (xapian.NetworkError, xapian.DatabaseError):
self.database.reopen()
queryparser.set_database(self.database.database)
_partials_query = queryparser.parse_query(partial, flags)
if partials_query:
partials_query = xapian.Query(
xapian.Query.OP_AND_MAYBE,
partials_query,
_partials_query,
)
else:
partials_query = _partials_query
if query:
query = xapian.Query(
xapian.Query.OP_AND,
query,
partials_query,
)
else:
query = partials_query
terms = self.search.get('terms')
if terms:
if not isinstance(terms, (tuple, list)):
terms = [terms]
for term in terms:
term = normalize(term)
term = expand_terms(term)
add_prefixes(term)
flags = xapian.QueryParser.FLAG_BOOLEAN | xapian.QueryParser.FLAG_PURE_NOT
try:
terms_query = queryparser.parse_query(term, flags)
except (xapian.NetworkError, xapian.DatabaseError):
self.database.reopen()
queryparser.set_database(self.database.database)
terms_query = queryparser.parse_query(term, flags)
if query:
query = xapian.Query(
xapian.Query.OP_AND,
query,
terms_query,
)
else:
query = terms_query
if not query:
if search == '(*)':
query = xapian.Query('')
else:
query = xapian.Query()
self.query = query
self.sort_by = self.search.get('sort_by')
self.distinct = self.search.get('distinct')
self.sort_by_reversed = self.search.get('sort_by_reversed')
def get_enquire(self):
enquire = xapian.Enquire(self.database.database)
# enquire.set_weighting_scheme(xapian.BoolWeight())
# enquire.set_docid_order(xapian.Enquire.DONT_CARE)
# if weighting_scheme:
# enquire.set_weighting_scheme(xapian.BM25Weight(*self.weighting_scheme))
enquire.set_query(self.query)
spies = {}
sort_by = []
warnings = []
if self.facets:
for name in self.facets:
self.dead or 'alive' # Raises DeadException when needed
name = name.strip().lower()
slot = get_slot(name)
if slot:
spy = xapian.ValueCountMatchSpy(slot)
enquire.add_matchspy(spy)
spies[name] = spy
else:
warnings.append("Ignored document value name (%r)" % name)
if self.sort_by:
for sort_field in self.sort_by:
self.dead or 'alive' # Raises DeadException when needed
if sort_field.startswith('-'):
reverse = True
sort_field = sort_field[1:] # Strip the '-'
else:
reverse = False
sort_by.append((sort_field, reverse))
sorter = xapian.MultiValueKeyMaker()
for name, reverse in sort_by:
self.dead or 'alive' # Raises DeadException when needed
name = name.strip().lower()
slot = get_slot(name)
if slot:
sorter.add_value(slot, reverse)
else:
warnings.append("Ignored document value name (%r)" % name)
enquire.set_sort_by_key_then_relevance(sorter, self.sort_by_reversed)
if self.distinct:
if self.distinct is True:
field = 'ID'
else:
field = self.distinct
enquire.set_collapse_key(get_slot(field))
self.spies = spies
self.warnings = warnings
return enquire
def get_results(self):
doccount = self.database.get_doccount()
maxitems = max(min(self.maxitems, doccount - self.first, MAX_DOCS), 0)
check_at_least = max(min(self.check_at_least, doccount, MAX_DOCS), 0)
if not self.get_matches:
maxitems = 0
try:
enquire = self.get_enquire()
matches = enquire.get_mset(self.first, maxitems, check_at_least)
except (xapian.NetworkError, xapian.DatabaseError):
self.database.reopen()
try:
enquire = self.get_enquire()
matches = enquire.get_mset(self.first, maxitems, check_at_least)
except (xapian.NetworkError, xapian.DatabaseError) as exc:
raise XapianError(exc)
self.produced = 0
self.estimated = None
self.size = matches.size()
if self.get_size:
self.estimated = matches.get_matches_estimated()
yield {
'size': self.size,
'estimated': self.estimated,
}
if self.spies:
for name, spy in self.spies.items():
self.dead or 'alive' # Raises DeadException when needed
for facet in spy.values():
self.dead or 'alive' # Raises DeadException when needed
yield {
'facet': name,
'term': facet.term.decode('utf-8'),
'termfreq': facet.termfreq,
}
produced = 0
for match in matches:
docid = match.docid
document = self.database.get_document(docid)
self.dead or 'alive' # Raises DeadException when needed
id = self.database.get_value(document, get_slot('ID'))
produced += 1
result = {
'id': id,
'docid': docid,
'rank': match.rank,
'weight': match.weight,
'percent': match.percent,
}
if self.get_data:
data = self.database.get_data(document)
if data is None:
continue
try:
data = json.loads(data)
except Exception:
data = base64.b64encode(data)
result.update({
'data': data,
})
if self.get_terms:
terms = []
termlist = self.database.get_termlist(document)
for t in termlist:
self.dead or 'alive' # Raises DeadException when needed
terms.append(t.term.decode('utf-8'))
result.update({
'terms': terms,
})
yield result
self.produced = produced
@property
def results(self):
return self.get_results()
| gpl-2.0 | 5,752,482,212,671,406,000 | 36.28481 | 121 | 0.496605 | false |
luigiberrettini/Kiln-to-GitHub | kiln_repo_list.py | 1 | 1422 | import requests
from anvil import Anvil
def main():
requests.packages.urllib3.disable_warnings()
in_file = open("./kiln_base_url.txt", "r")
base_url = in_file.read().replace('\n', '')
in_file.close()
anvil = Anvil(base_url, False)
anvil.create_session_by_prompting()
repo_indices = set()
out_file = open("./kiln_repoList.txt", "w")
for project in anvil.get_projects():
for repo_group in project.repo_groups:
for repo in repo_group.repos:
if not repo.index in repo_indices:
repo_indices.add(repo.index)
prj_indx = str(project.index)
grp_indx = str(repo_group.index)
rep_indx = str(repo.index)
prj_name = project.name
grp_name = repo_group.name
rep_name = repo.name
prj_slug = repo.project_slug
grp_slug = repo.group_slug or 'Group'
rep_slug = repo.slug
url = base_url + '/Code/' + prj_slug + '/' + grp_slug + '/' + rep_slug
indexes = prj_indx + ',' + grp_indx + ',' + rep_indx
names = prj_name + ',' + grp_name + ',' + rep_name
out_file.write(url + "," + indexes + "," + names + ',' + rep_name + "\n")
out_file.close()
if __name__ == '__main__':
main()
| mit | 3,321,808,101,163,100,700 | 33.682927 | 93 | 0.489451 | false |
AlexandreDecan/multiwords | rims.py | 1 | 1223 | #!/usr/bin/python
"""
Module that provides several functions to handle (un)rimmed words.
"""
import words
def rims_of(word):
""" Return the rims for the given word. A rim is a (nonempty) word u such
that w = u.s = p.u' for some s,p,u' such that |u'| = |u|, and u' and u
agree on every position except one.
For example, a and aab are rims for aabb. """
rims = []
for shift in words.get_koverlaps(word, 1):
rims.append(word[:len(word)-shift])
return rims
def pretty_print(word, rims = None):
""" Pretty print of the rims of the given word. """
if rims == None:
rims = rims_of(word)
print word
for r in rims:
print word.rjust(len(word)*2-len(r), ' ')
def mismatch_pos(word, rim):
""" Return the position (in the rim) of the mismatch between
the word and the rim. Position starts at 0. """
shift = len(word) - len(rim)
for k in range(len(rim)):
if word[shift + k] != rim[k]:
return k
return -1
def rim_index(word, rim):
""" Return the index of a rim in the given word. The index of a rim
is a position in w where the corresponding suffix of the rim starts. """
return len(word) - len(rim) + 1
| apache-2.0 | -7,824,436,984,240,739,000 | 29.575 | 78 | 0.609975 | false |
cemarchi/biosphere | Src/BioDataManagement/DataAccess/Repositories/DnaMethylationSampleRepository.py | 1 | 1967 | from typing import List, Dict
from Src.BioDataManagement.CrossCutting.Contracts.DnaMethylationSampleRepositoryBase import \
DnaMethylationSampleRepositoryBase
from Src.BioDataManagement.CrossCutting.DTOs.DnaMethylationSampleDto import DnaMethylationSampleDto
from Src.BioDataManagement.CrossCutting.Filters import FeListDnaMethylationSample
from Src.BioDataManagement.DataAccess.Entities.DnaMethylationSample import DnaMethylationSample
from Src.BioDataManagement.DataAccess.Mappers.Mapper import Mapper
from Src.Core.Data.MongoRepositoryActions import MongoRepositoryActions
class DnaMethylationSampleRepository(DnaMethylationSampleRepositoryBase):
"""description of class"""
def __init__(self, db):
"""
:param db:
"""
super().__init__(db)
self.__mongo_actions = MongoRepositoryActions(self._collection, Mapper.get_instance())
def add_many(self, dna_methylation_samples: List[DnaMethylationSampleDto]):
"""
:param dna_methylation_samples:
"""
self.__mongo_actions.add_many(dna_methylation_samples, DnaMethylationSample)
def get_many(self, fe_dna_methylation_sample: FeListDnaMethylationSample, dto_class = None,
include_or_exclude_fields: Dict[str, int] = None) -> FeListDnaMethylationSample:
"""
:param fe_dna_methylation_sample:
:param dto_class:
:param include_or_exclude_fields:
:return:
"""
query = {} if not fe_dna_methylation_sample.patient_id_list \
else {'patient_id': {'$in': fe_dna_methylation_sample.patient_id_list}}
return self.__mongo_actions.get_many(query,
fe_dna_methylation_sample,
DnaMethylationSample,
dto_class,
include_or_exclude_fields)
| bsd-3-clause | -3,809,271,948,774,501,400 | 41.76087 | 99 | 0.646162 | false |
xiaohan2012/snpp | tests/test_signed_graph.py | 1 | 1749 | """
For the utilities
"""
import contexts as ctx
import numpy as np
from scipy.sparse import csr_matrix, isspmatrix_csr
from snpp.utils.signed_graph import symmetric_stat, \
fill_diagonal, \
make_symmetric, \
matrix2graph
def test_symmetric_stat(Q1_d):
c_sym, c_consis = symmetric_stat(Q1_d)
assert c_sym == 6
assert c_consis == 4
def test_fill_diagonal():
N = 2
m = csr_matrix(np.array([[1, 0], [0, 0]]))
assert len(set([m[i, i] for i in range(N)])) == 2
m_new = fill_diagonal(m)
assert isspmatrix_csr(m_new)
assert set([m_new[i, i] for i in range(N)]) == {1}
def test_make_symmetric(Q1_d):
def mask(m):
"""remove inconsistent entries
"""
inconsis_idx = [(i, j)
for i, j in zip(*m.nonzero())
if (m[i, j] != 0
and m[j, i] != 0
and m[j, i] != m[i, j])]
m_masked = m.copy()
for i, j in inconsis_idx:
m_masked[i, j] = m_masked[j, i] = 0
return m_masked
Q1_d_masked = mask(Q1_d)
assert not np.allclose(Q1_d_masked.toarray(), np.transpose(Q1_d_masked.toarray()))
m = make_symmetric(Q1_d)
assert isspmatrix_csr(m)
m = m.toarray()
m_masked = mask(m)
assert np.allclose(m_masked, np.transpose(m_masked))
def test_matrix2graph(Q1_d):
gm = matrix2graph(Q1_d, None, multigraph=True)
g = matrix2graph(Q1_d, None, multigraph=False)
for i, j in gm.edges():
s = g[i][j]['sign']
assert gm[i][j][s]['sign'] == s
assert gm[0][0][1]['sign'] == g[0][0]['sign'] == 1
assert gm[2][3][1]['sign'] == g[2][3]['sign'] == 1
assert gm[0][2][-1]['sign'] == g[0][2]['sign'] == -1
| mit | 6,050,825,672,574,178,000 | 27.672131 | 86 | 0.532876 | false |
sandeez/lino-book_locator | locate/locate/lib/books/models.py | 1 | 4811 | from lino.api import dd
from django.db import models
class Floor(dd.Model):
number = models.IntegerField('Number', null=False)
def __unicode__(self):
return 'Floor: {0}'.format(self.number)
class Meta:
verbose_name = 'Floor'
verbose_name_plural = 'Floors'
class Room(dd.Model):
number = models.IntegerField('Number', null=False)
name = models.CharField('Name', max_length=10)
floor = models.ForeignKey(Floor)
def __unicode__(self):
return 'Floor: {0} -> Room: {1}'.format(
self.floor.number,
self.number)
class Meta:
verbose_name = 'Room'
verbose_name_plural = 'Rooms'
unique_together = ('number', 'floor')
class Bookshelf(dd.Model):
code = models.CharField('Code', null=False, max_length=5)
room = models.ForeignKey(Room)
def __unicode__(self):
return 'Floor: {0} -> Room: {1} -> Bookshelf: {2}'.format(
self.room.floor.number,
self.room.number,
self.code)
class Meta:
verbose_name = 'Bookshelf'
verbose_name_plural = 'Bookshelves'
unique_together = ('code', 'room')
class Rack(dd.Model):
code = models.CharField('Code', max_length=5, null=False)
bookshelf = models.ForeignKey(Bookshelf)
def __unicode__(self):
return 'Floor: {0} -> Room: {1} -> Bookshelf: {2} -> Rack: {3}'.format(
self.bookshelf.room.floor.number,
self.bookshelf.room.number,
self.bookshelf.code,
self.code)
class Meta:
verbose_name = 'Rack'
verbose_name_plural = 'Racks'
unique_together = ('code', 'bookshelf')
class Slot(dd.Model):
number = models.IntegerField('Number', null=False)
rack = models.ForeignKey(Rack)
def __unicode__(self):
return 'Floor: {0} -> Room: {1} -> Bookshelf: {2} -> Rack: {3} -> Slot: {4}'.format(
self.rack.bookshelf.room.floor.number,
self.rack.bookshelf.room.number,
self.rack.bookshelf.code,
self.rack.code,
self.number)
class Meta:
verbose_name = 'Slot'
verbose_name_plural = 'Slots'
unique_together = ('number', 'rack')
class Category(dd.Model):
name = models.CharField(null=False, max_length=50)
def __unicode__(self):
return self.name
class Meta:
verbose_name = 'Category'
verbose_name_plural = 'Categories'
class Author(dd.Model):
name = models.CharField(null=False, max_length=50)
def __unicode__(self):
return self.name
class Meta:
verbose_name = 'Author'
verbose_name_plural = 'Authors'
class Publication(dd.Model):
name = models.CharField(null=False, max_length=50)
def __unicode__(self):
return self.name
class Meta:
verbose_name = 'Publication'
verbose_name_plural = 'Publications'
class BookInfo(dd.Model):
name = models.CharField('Name', max_length=50)
author = models.ForeignKey(Author)
publication = models.ForeignKey(Publication)
category = models.ForeignKey(Category)
copies = models.IntegerField('Total Copies', null=False, default=1)
def __unicode__(self):
return 'Name: {0} -> Author: {1} -> Publication: {2}'.format(
self.name,
self.author,
self.publication)
class Meta:
verbose_name = 'Book Information'
verbose_name_plural = 'Books Information'
unique_together = ('name', 'author', 'publication')
class Book(dd.Model):
code = models.CharField(max_length=10, unique=True)
info = models.ForeignKey(BookInfo)
def __unicode__(self):
return 'Code: {0} -> Name: {1} -> Author: {2}'.format(
self.code,
self.info.name,
self.info.author)
class Meta:
verbose_name = 'Book'
verbose_name_plural = 'Books'
unique_together = ('code', 'info')
class BookLocation(dd.Model):
book = models.ForeignKey(Book, unique=True)
slot = models.ForeignKey(Slot, unique=True)
def __unicode__(self):
return 'Floor: {0} -> Room: {1} -> Bookshelf: {2} -> Rack:{3} -> Slot: {4} -> Book: {5}'.format(
self.slot.rack.bookshelf.room.floor.number,
self.slot.rack.bookshelf.room.number,
self.slot.rack.bookshelf.code,
self.slot.rack.code,
self.slot.number,
self.book.code)
class Meta:
verbose_name = 'Book Location'
verbose_name_plural = 'Book Locations'
from .ui import *
| bsd-2-clause | 1,497,361,167,193,368,000 | 27.467456 | 104 | 0.561214 | false |
ryos36/polyphony-tutorial | Life/life_one.py | 1 | 2840 | import polyphony
from polyphony import module, pure
from polyphony import testbench
from polyphony.io import Port
from polyphony.typing import bit, uint3, uint4, List
from polyphony.timing import clksleep, clkfence, wait_rising, wait_falling
@module
class life:
def __init__(self):
self.i_bit4 = Port(uint4, 'in', protocol='valid')
self.o_bit = Port(bit, 'out', protocol='valid')
self.append_worker(self.life_worker, self.i_bit4, self.o_bit)
def life_worker(self, i_bit4, o_bit):
bit3_to_n = [ 0, 1, 1, 2, 1, 2, 2, 3 ]
bit3_to_m = [ 0, 1, 0, 1, 1, 2, 1, 2 ]
n_to_o = [0, 0, 1, 1, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0]
mat = [0] * 3 #
while polyphony.is_worker_running():
v = i_bit4()
#print("mat", mat)
#print("v", v)
if v == 8 :
mat2_old = mat[2]
mat[0] = 0
mat[1] = 0
mat[2] = 0
else:
v0 = bit3_to_n[v]
v1 = bit3_to_m[v]
mat0_old = mat[0]
mat1_old = mat[1]
mat2_old = mat[2]
mat[0] = 16 + v0
mat[1] = mat0_old + v1
mat[2] = mat1_old + v0
#print("mat2_old:", mat2_old)
if (mat2_old & 16) == 16 :
out_v = n_to_o[mat2_old & 15]
o_bit.wr(out_v)
m = life()
@testbench
def test(m):
m.i_bit4.wr(0)
clksleep(5)
m.i_bit4.wr(0)
clksleep(5)
m.i_bit4.wr(1)
v = m.o_bit.rd()
clksleep(5)
if 1 :
m.i_bit4.wr(0)
clksleep(5)
print("outv:", v)
if 0:
m.i_bit4.wr(0)
clksleep(5)
v = m.o_bit.rd()
print("outv:", v)
if 0 :
m.i_bit4.wr(4)
v = m.o_bit.rd()
print("outv:", v)
m.i_bit4.wr(3)
v = m.o_bit.rd()
print("outv:", v)
m.i_bit4.wr(0)
v = m.o_bit.rd()
print("outv:", v)
m.i_bit4.wr(0)
v = m.o_bit.rd()
print("outv:", v)
m.i_bit4.wr(8)
v = m.o_bit.rd()
print("outv:", v)
print("-")
clksleep(10)
#
m.i_bit4.wr(0)
m.i_bit4.wr(0)
m.i_bit4.wr(2)
m.i_bit4.wr(1)
v = m.o_bit.rd()
print("outv:", v)
m.i_bit4.wr(1)
v = m.o_bit.rd()
print("outv:", v)
m.i_bit4.wr(1)
v = m.o_bit.rd()
print("outv:", v)
m.i_bit4.wr(7)
v = m.o_bit.rd()
print("outv:", v)
m.i_bit4.wr(0)
v = m.o_bit.rd()
print("outv:", v)
m.i_bit4.wr(0)
v = m.o_bit.rd()
print("outv:", v)
m.i_bit4.wr(8)
v = m.o_bit.rd()
print("outv:", v)
test(m)
| mit | 7,964,215,205,951,401,000 | 21.362205 | 74 | 0.415493 | false |
bytedance/fedlearner | web_console_v2/api/test/fedlearner_webconsole/scheduler/workflow_commit_test.py | 1 | 5229 | # Copyright 2021 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import os
import time
import unittest
from google.protobuf.json_format import ParseDict
from unittest.mock import patch
from testing.common import BaseTestCase
from fedlearner_webconsole.db import db
from fedlearner_webconsole.job.models import JobState
from fedlearner_webconsole.project.models import Project
from fedlearner_webconsole.workflow.models import Workflow, WorkflowState
from fedlearner_webconsole.scheduler.transaction import TransactionState
from fedlearner_webconsole.scheduler.scheduler import \
scheduler
from fedlearner_webconsole.proto import project_pb2
from workflow_template_test import make_workflow_template
class WorkflowsCommitTest(BaseTestCase):
class Config(BaseTestCase.Config):
START_GRPC_SERVER = False
START_SCHEDULER = True
@classmethod
def setUpClass(self):
os.environ['FEDLEARNER_WEBCONSOLE_POLLING_INTERVAL'] = '1'
def setUp(self):
super().setUp()
# Inserts project
config = {
'participants': [{
'name': 'party_leader',
'url': '127.0.0.1:5000',
'domain_name': 'fl-leader.com',
'grpc_spec': {
'authority': 'fl-leader.com'
}
}],
'variables': [{
'name': 'namespace',
'value': 'leader'
}, {
'name': 'basic_envs',
'value': '{}'
}, {
'name': 'storage_root_dir',
'value': '/'
}, {
'name': 'EGRESS_URL',
'value': '127.0.0.1:1991'
}]
}
project = Project(
name='test',
config=ParseDict(config,
project_pb2.Project()).SerializeToString())
db.session.add(project)
db.session.commit()
@staticmethod
def _wait_until(cond, retry_times: int = 5):
for _ in range(retry_times):
time.sleep(5)
db.session.expire_all()
if cond():
return
def test_workflow_commit(self):
# test the committing stage for workflow creating
workflow_def = make_workflow_template()
workflow = Workflow(
id=20,
name='job_test1',
comment='这是一个测试工作流',
config=workflow_def.SerializeToString(),
project_id=1,
forkable=True,
state=WorkflowState.NEW,
target_state=WorkflowState.READY,
transaction_state=TransactionState.PARTICIPANT_COMMITTING)
db.session.add(workflow)
db.session.commit()
scheduler.wakeup(20)
self._wait_until(
lambda: Workflow.query.get(20).state == WorkflowState.READY)
workflow = Workflow.query.get(20)
self.assertEqual(len(workflow.get_jobs()), 2)
self.assertEqual(workflow.get_jobs()[0].state, JobState.NEW)
self.assertEqual(workflow.get_jobs()[1].state, JobState.NEW)
# test the committing stage for workflow running
workflow.target_state = WorkflowState.RUNNING
workflow.transaction_state = TransactionState.PARTICIPANT_COMMITTING
db.session.commit()
scheduler.wakeup(20)
self._wait_until(
lambda: Workflow.query.get(20).state == WorkflowState.RUNNING)
workflow = Workflow.query.get(20)
self._wait_until(
lambda: workflow.get_jobs()[0].state == JobState.STARTED)
self.assertEqual(workflow.get_jobs()[1].state, JobState.WAITING)
workflow = Workflow.query.get(20)
for job in workflow.owned_jobs:
job.state = JobState.COMPLETED
self.assertEqual(workflow.to_dict()['state'], 'COMPLETED')
workflow.get_jobs()[0].state = JobState.FAILED
self.assertEqual(workflow.to_dict()['state'], 'FAILED')
# test the committing stage for workflow stopping
workflow.target_state = WorkflowState.STOPPED
workflow.transaction_state = TransactionState.PARTICIPANT_COMMITTING
for job in workflow.owned_jobs:
job.state = JobState.STARTED
db.session.commit()
scheduler.wakeup(20)
self._wait_until(
lambda: Workflow.query.get(20).state == WorkflowState.STOPPED)
workflow = Workflow.query.get(20)
self._wait_until(
lambda: workflow.get_jobs()[0].state == JobState.STOPPED)
self.assertEqual(workflow.get_jobs()[1].state, JobState.STOPPED)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -1,008,619,520,266,837,200 | 36.489209 | 76 | 0.617156 | false |
xmxjq/phoneyc-SjtuISADTeam | phoneyc.py | 1 | 6820 | #!/usr/bin/env python
"""
Synopsis:
PHoneyC: Pure python honeyclient implementation.
Usage:
python phoneyc.py [ options ] url
Options:
-h , --help Display this help information.
-l <filename> , --logfile=<filename> Output file name for logs.
-v , --verbose Explain what is being done (DEBUG mode).
-d <debuglevel> , --debug=<debuglevel> Debug Level, 1-10.
-r , --retrieval-all Retrieval all inline linking data.
-c , --cache-response Cache the responses from the remote sites.
-u <personality>, --user-agent=<personality> Select a user agent (see below for values, default: 2)
-n , --replace-nonascii Replace all non-ASCII characters with spaces(0x20) in all HTML or JS contents
-m , --universal-activex Enable Universal ActiveX object
"""
import sys, os, shutil
import pycurl
import hashlib
import site
import getopt
from binascii import hexlify
site.addsitedir('lib/python')
import config
import magic
DOWNLOADS_DIR = "log/downloads"
BINARIES_DIR = "%s/binaries" % (DOWNLOADS_DIR, )
PDF_DIR = "%s/pdf" % (DOWNLOADS_DIR, )
APPLET_DIR = "%s/applet" % (DOWNLOADS_DIR, )
MISC_DIR = "%s/misc" % (DOWNLOADS_DIR, )
LOGDIRS = (BINARIES_DIR,
PDF_DIR,
APPLET_DIR,
MISC_DIR)
DOWNLOADS_STR = ["data", ]
USAGE_TEXT = __doc__
def usage():
print USAGE_TEXT
print "User Agents:"
for ua in config.UserAgents:
print " [%2d] %s" % (ua[0], ua[1], )
print ""
sys.exit(1)
def check_logdirs():
for logdir in LOGDIRS:
if not os.access(logdir, os.F_OK):
try:
os.makedirs(logdir)
except OSError:
pass
def download(url):
f = hashlib.md5()
f.update(url)
filename = "%s/%s" % (BINARIES_DIR, f.hexdigest(), )
fd = open(filename, 'wb')
ua = config.userAgent
c = pycurl.Curl()
c.setopt(pycurl.FOLLOWLOCATION, 1)
c.setopt(pycurl.URL, str(url))
c.setopt(pycurl.WRITEDATA, fd)
c.setopt(pycurl.USERAGENT, ua)
try:
c.perform()
code = c.getinfo(pycurl.HTTP_CODE)
if code == 404:
config.VERBOSE(config.VERBOSE_DEBUG,
"[DEBUG] 404 File Not Found: "+url)
fd.close()
os.remove(filename)
return
except:
import traceback
traceback.print_exc(file = sys.stderr)
sys.stderr.flush()
c.close()
fd.close()
statinfo = os.stat(filename)
if not statinfo.st_size:
os.remove(filename)
return
fd = open(filename, 'r')
h = hashlib.md5()
h.update(fd.read())
newfilename = "%s/%s" % (BINARIES_DIR, h.hexdigest(), )
shutil.move(filename, newfilename)
fd.close()
def report(alerts):
for alert in alerts:
print "\n===================================="
if alert.atype == "ALERT_SHELLCODE":
print "|--------AID:" + str(alert.aid) + "----------"
print "|ATYPE:" + str(alert.atype)
print "|MESSAGE:" + str(alert.msg)
print "|MISC:" + str(alert.misc)
print "|LENGTH:" + str(len(alert.shellcode))
print "|SHELLCODE:"
print hexlify(alert.shellcode)
print "|Now run it:"
shellcoderesult = alert.run_shellcode()
print str(shellcoderesult)
for item in shellcoderesult:
if item['name'] == 'URLDownloadToFile':
url = item['arguments'][1][2][2]
print "Downloading from URL: %s" % url
download(url)
if alert.atype == "ALERT_HEAPSPRAY" and alert.entropy < 1:
print "|--------AID:" + str(alert.aid) + "----------"
print "|ATYPE:" + str(alert.atype)
print "|MESSAGE:" + str(alert.msg)
print "|HIT:" + str(alert.hit)
print "|MEMUSAGE:" + str(alert.memusage)
print "|LENGTH:" + str(alert.length)
print "|ENTROPY:" + str(alert.entropy)
print "|MISC:" + str(alert.misc)
if __name__ == "__main__":
args = sys.argv[1:]
try:
options, args = getopt.getopt(args, 'hu:l:vd:rcnm',
['help',
'user-agent=',
'logfile=',
'verbose',
'debug=',
'retrieval-all',
'cache-response',
'replace-nonascii',
'universal-activex'
])
except getopt.GetoptError, exp:
usage()
if not options and not args:
usage()
for option in options:
if option[0] == '-h' or option[0] == '--help':
usage()
if option[0] == '-u' or option[0] == '---user-agent':
for ua in config.UserAgents:
if option[1] == str(ua[0]):
config.userAgent = str(ua[2])
config.appCodeName = str(ua[3])
config.appName = str(ua[4])
config.appVersion = str(ua[5])
config.browserTag = str(ua[6])
if option[0] == '-l' or option[0] == '--logfile':
config.logfilename = option[1]
if option[0] == '-v' or option[0] == '--verbose':
config.verboselevel = 1
if option[0] == '-d' or option[0] == '--debug':
config.verboselevel = int(option[1])
if option[0] == '-r' or option[0] == '--retrieval-all':
config.retrieval_all = True
if option[0] == '-c' or option[0] == '--cache-response':
config.cache_response = True
if option[0] == '-n' or option[0] == '--replace-nonascii':
config.replace_nonascii = True
if option[0] == '-m' or option[0] == '--universal-activex':
config.universal_activex = True
if config.verboselevel >= config.VERBOSE_DEBUG:
config.universal_activex = True
config.initial_URL = args[0]
check_logdirs()
from DOM.DOM import DOM
phoneycdom = DOM(config.initial_URL)
alerts = phoneycdom.analyze()
if alerts:
print "There is some Shellcode/Heapspray Alerts but it is not important for this program now."
# report(alerts)
else:
print "No Shellcode/Heapspray Alerts."
binaries_dir = os.listdir(BINARIES_DIR)
for file in binaries_dir:
filename = "%s/%s" % (BINARIES_DIR, file,)
newname = "%s/%s" % (MISC_DIR, file, )
if magic.file(filename) in DOWNLOADS_STR:
shutil.move(filename, newname)
| gpl-2.0 | 8,244,649,507,720,224,000 | 32.431373 | 129 | 0.516422 | false |
itucsdb1621/itucsdb1621 | tags.py | 1 | 2795 | import psycopg2
from flask import Flask
from flask import render_template, request
from flask import Blueprint, current_app,session,redirect, url_for
#declaring sub app with blueprint
tags_app = Blueprint('tags_app', __name__)
@tags_app.route('/add_tag/<photo_id>/', methods=["POST"])
def add_tag(photo_id):
# a post request would be more elegant
username = request.form["username"]
x = request.form["x"]
y = request.form["y"]
with psycopg2.connect(current_app.config['dsn']) as conn:
crs = conn.cursor()
crs.execute("select ID from users where username=%s",(username,))
conn.commit()
tagged_id = crs.fetchone()
if tagged_id == None:
return render_template("message.html",message="User not found")
## if null show and error message
crs.execute("insert into tags (tagger_id,tagged_id,photo_id,time,x,y) values (%s,%s,%s,now(),%s,%s)",(session["user_id"],tagged_id,photo_id,x,y))
conn.commit()
return render_template('message.html',message="Successfully added tag")
@tags_app.route('/update_tag/<photo_id>/', methods=["POST"])
def update_tag(photo_id):
newUsername = request.form["username"]
x = request.form["x"]
y = request.form["y"]
tagged_id=request.form["_id"]
with psycopg2.connect(current_app.config['dsn']) as conn:
crs = conn.cursor()
crs.execute("select ID from users where username=%s",(newUsername,))
newId = crs.fetchone()
if newId == None:
return render_template("message.html",message="User not found")
print(tagged_id)
## if null show and error message
crs.execute("update tags set tagged_id=%s,time=now(),x=%s,y=%s where tagger_id=%s and tagged_id=%s and photo_id=%s ",(newId[0],x,y,session["user_id"],tagged_id,photo_id))
conn.commit()
return render_template('message.html',message="Successfully updated tag")
@tags_app.route('/delete_tag/<photo_id>/', methods=["POST"])
def delete_tag(photo_id,):
tagged_id=request.form["_id"]
with psycopg2.connect(current_app.config['dsn']) as conn:
crs = conn.cursor()
## if null show and error message
print(tagged_id)
crs.execute("delete from tags where tagger_id=%s and tagged_id=%s and photo_id=%s ",(session["user_id"],tagged_id,photo_id))
conn.commit()
return render_template('message.html',message="Successfully deleted tag")
## no use
@tags_app.route('/retrieve_tags/<photo_id>/')
def retrieve_tags(photo_id):
with psycopg2.connect(current_app.config['dsn']) as conn:
crs = conn.cursor()
crs.execute("select * from tags where photo_id=%s ",(photo_id))
conn.commit()
return render_template('message.html',message="Successfully added tag")
| gpl-3.0 | -8,171,542,644,453,910,000 | 41.348485 | 179 | 0.647227 | false |
changhoonhahn/centralMS | centralms/tests/test_sfrs.py | 1 | 1027 | '''
Test functions for handling star formation rates
'''
import numpy as np
from scipy.integrate import odeint
import sfrs as SFR
def IntegrationTest():
''' Simple test the integration
'''
logsfr = lambda mstar, t: np.log10(t**2)
for tt in np.arange(1., 11., 1.):
M_int = SFR.integSFR(logsfr, np.array([0.]),
np.array([0.]), np.array([tt]),
mass_dict={'type': 'rk4', 'f_retain': 1e-9, 't_step': 0.01})
print np.log10(10**M_int[0] - 1.), np.log10(tt**3/3.)
return None
def Integration_ScipyComp():
''' Simple test the integration
'''
dydt = lambda y, t: t
M_euler = SFR.ODE_Euler(dydt, np.array([0.]), np.array([0.,10.]), 0.001)
M_RK4 = SFR.ODE_RK4(dydt, np.array([0.]), np.array([0.,10.]), 0.1)
M_scipy = odeint(dydt, np.array([0.]), np.array([0.,10.]))
print M_euler
print M_RK4
print M_scipy
return None
if __name__=='__main__':
#IntegrationTest()
Integration_ScipyComp()
| mit | -3,111,330,352,351,419,400 | 19.54 | 76 | 0.555015 | false |
robosafe/mc-vs-bdi | models/pta_models/table/legible_full/extract.py | 1 | 8357 | #!/usr/bin/env python
# This script translates and separates the traces computed with UPPAAL model checking and the tracer tool (libutap). The traces are originally in a *.xtr file format. The specified automata transitions are separated from the global traces (the human, the setting of gaze, pressure and location), and transformed into a *.txt file with a list of high-level commands for the human machine in the simulator (sending signals, waiting for signals, setting parameters).
# Written by Dejanira Araiza-Illan, March 2015
# Modified for the table scenario, July 2016
import rospy
import re
import os
import sys
variables_keep = ['pressure','tlocation','gaze','bored','humanReady','leg']
def extract(nameFile):
# -------------- PARSING OF THE MODEL INTO USEFUL STRUCTURES
#automata = raw_input("Name of the automata with commas and no spaces (e.g. aut1,aut2,aut3):")
automata = 'human,g,p,l'
#automata = 'Human,Gaze,Pressure,Location'
automaton = re.split(",",automata)
type_of_trace=0
transitions=[]
states=[]
traces=[]
delays=[]
numberfile=re.split('\D+',nameFile)
# print numberfile
for i, line in enumerate(open(nameFile+ '.tr', 'r')):
for match in re.finditer("Initial state",line): #If from legible_traces.py
type_of_trace=1
for match in re.finditer("Trace",line): #Separate multiple traces
traces.append(i)
for match in re.finditer("Transitions:", line):
transitions.append(i)
for match in re.finditer("State:", line):
states.append(i)
for match in re.finditer("Delay:", line):
delays.append(i)
#Eliminate states and keep transitions
# print type_of_trace
# print traces
# print transitions
# print states
# print delays
if type_of_trace==1:
f=open('stimulus_'+numberfile[1]+'.txt', 'w')
trans_content=[]
for i in range(0,len(transitions)):
for j, line in enumerate(open(nameFile+ '.tr', 'r')):
if j>transitions[i] and j<(states[i]):
if line!='\n':
trans_content.append(line)
# print trans_content
#Eliminate unimportant transitions
important=[]
for i, line in enumerate(trans_content):
for j, aut in enumerate(automaton):
if aut != ',':
if re.match(aut+'.', line)!=None:
important.append(line)
# print important
#Check each transition and determine if human: sends signal, receives signal, sets variables
global variables_keep
for i,trans in enumerate(important):
var_split = re.split('; ',trans)
if var_split[1] != '0': #Signals
for match in re.finditer('!',var_split[1]):
signal = re.split('!',var_split[1])
for kk,variable in enumerate(variables_keep):
if re.search('\\b'+signal[0]+'\\b',variable):
#Write send signal
f.write('tell '+signal[0]+'\n')
for match in re.finditer('\?',var_split[1]):
#Write receive signal
for kk,variable in enumerate(variables_keep):
if re.search('\\b'+signal[0]+'\\b',variable):
signal = re.split('\?',var_split[1])
f.write('receivesignal\n')
if var_split[2] != '0}\n': #Variables
commas = re.split(',',var_split[2])
for j,part in enumerate(commas):
if commas!='':
new_string = corrected(part)
if new_string !='' and not re.search('bored',new_string):
f.write('set_param\t'+new_string+'\n')
elif re.search('bored',new_string):
f.write(new_string+'\n')
else:
#Eliminate extra "states:
for j,delay in enumerate(delays):
for i,state in enumerate(states):
if state>delay:
states.pop(i)
break
for j,tr in enumerate(traces):
for i,state in enumerate(states):
if state>tr:
states.pop(i)
break
# print states
#First traces
for tr in range (0,len(traces)-1):
f=open('stimulus_'+str(tr+1)+'_'+nameFile+'.txt', 'w')
trans_content=[]
for i in range(0,len(transitions)):
if transitions[i]>traces[tr] and transitions[i]<traces[tr+1]:
# print transitions[i]
for j, line in enumerate(open(nameFile+ '.tr', 'r')):
if j>=(transitions[i]) and j<(states[i]):
# print line
if line!='\n' and line!='Transitions:\n':
trans_content.append(line)
# print trans_content
#Eliminate unimportant transitions
important=[]
for i, line in enumerate(trans_content):
for j, aut in enumerate(automaton):
if aut != ',':
if re.match(' '+aut+'.', line)!=None:
important.append(line)
# print important
#Check each transition and determine if human: sends signal, receives signal, sets variables
for i,trans in enumerate(important):
var_split = re.split(', ',trans)
if var_split[1] != 'tau': #Signals
for match in re.finditer('!',var_split[1]):
signal = re.split('!',var_split[1])
f.write('tell '+signal[0]+'\n')#Write send signal
for match in re.finditer('\?',var_split[1]):
signal = re.split('\?',var_split[1])
f.write('receivesignal\n')#Write receive signal
if var_split[2] != '1 }\n': #Variables
for j in range(2,len(var_split)):
new_string = corrected2(var_split[j])
if new_string !='' and new_string != 'bored':
f.write('set_param\t'+new_string+'\n')
elif new_string == 'bored':
f.write(new_string+'\n')
#Last trace
f=open('stimulus_'+str(len(traces))+'_'+nameFile+'.txt', 'w')
trans_content=[]
for i in range(0,len(transitions)):
if transitions[i]>traces[len(traces)-1]:
for j, line in enumerate(open(nameFile+ '.tr', 'r')):
if j>=(transitions[i]) and j<(states[i]):
if line!='\n' and line!='Transitions:\n':
trans_content.append(line)
# print trans_content
important=[]
for i, line in enumerate(trans_content):
for j, aut in enumerate(automaton):
if aut != ',':
if re.match(' '+aut+'.', line)!=None:
important.append(line)
# print important
#Check each transition and determine if human: sends signal, receives signal, sets variables
for i,trans in enumerate(important):
var_split = re.split(', ',trans)
if var_split[1] != 'tau': #Signals
for match in re.finditer('!',var_split[1]):
signal = re.split('!',var_split[1])
f.write('tell '+signal[0]+'\n')#Write send signal
for match in re.finditer('\?',var_split[1]):
signal = re.split('\?',var_split[1])
f.write('receivesignal\n')#Write receive signal
if var_split[2] != '1 }\n': #Variables
for j in range(2,len(var_split)):
new_string = corrected2(var_split[j])
if new_string !='' and not re.search('bored',new_string):
f.write('set_param\t'+new_string+'\n')
elif re.search('bored',new_string):
f.write(new_string+'\n')
def corrected(expr):
expr_new=''
modif1 = re.split("\:=",expr)
#print modif1[0]
global variables_keep
for kk,variable in enumerate(variables_keep):
if re.search('\\b'+modif1[0]+'\\b',variable):
if re.search('bored',modif1[0]) and re.search('true',modif1[1]):
expr_new='bored'
elif re.search('bored',modif1[0]) and re.search('false',modif1[1]):
expr_new=''
elif re.search('tlocation',modif1[0]):
expr_new=expr_new+'location'+'='
if re.match('true',modif1[1]):
expr_new=expr_new+'1'
elif re.match('false',modif1[1]):
expr_new=expr_new+'0'
else:
expr_new=expr_new+modif1[0]+'='
if re.match(modif1[0],modif1[1]):
modif2 = re.split('\s*[+]|[-]\s*',modif1[1])
#print modif2
modif3 = re.split('}\n',modif2[1])
#print modif3
expr_new=expr_new+modif3[0]
elif re.match('rand_v',modif1[1]):
expr_new = ''
elif re.match('true',modif1[1]):
expr_new=expr_new+'1'
elif re.match('false',modif1[1]):
expr_new=expr_new+'0'
else:
modif4 = re.split('}\n',modif1[1])
expr_new=expr_new+modif4[0]
# print expr_new
return expr_new
def corrected2(expr):
expr_new=''
modif1 = re.split(" \:= ",expr)
expr_new=expr_new+modif1[0]+'='
if re.match(modif1[0],modif1[1]):
modif2 = re.split('\s*[+]|[-]\s*',modif1[1])
modif3 = re.split('\s*[}]\n',modif2[1])
expr_new=expr_new+modif3[0]
elif re.match('rand_v',modif1[1]):
expr_new = ''
else:
modif4 = re.split(' }\n',modif1[1])
expr_new=expr_new+modif4[0]
# print expr_new
return expr_new
if __name__ == "__main__":
if len(sys.argv) == 2: #arguments passed by command line: program, trace file
extract(sys.argv[1])
else:
print 'extract.py [trace file or .tr]'
sys.exit(1)
| gpl-3.0 | 51,145,047,482,889,110 | 34.261603 | 463 | 0.630848 | false |
akshayka/bft2f | bft2f.py | 1 | 3641 | #!/usr/bin/python
import sys
import os
from mininet.topo import Topo
from mininet.node import CPULimitedHost
from mininet.link import TCLink
from mininet.net import Mininet
from mininet.log import lg, info
from mininet.util import dumpNodeConnections
from mininet.cli import CLI
from mininet.util import pmonitor
from subprocess import Popen, PIPE
from time import sleep, time
from multiprocessing import Process
from argparse import ArgumentParser
import signal
NUMBER_NODES = 7
NUMBER_CLIENTS = 1
NUMBER_USERS = 1
RUN_DURATION = 35
popens = {}
LINK_BW=10
LINK_DELAY='10ms'
LINK_LOSS=10
ETC_HOSTS_FILE_NAME="bft2f_etc_hosts"
DEBUG_OUTPUT_FILE='bft2f.debug'
class BftTopo(Topo):
def __init__(self, n=2):
super(BftTopo, self).__init__()
s0 = self.addSwitch('s0')
# create hosts
for i in xrange(0, NUMBER_NODES):
self.addLink(self.addHost('h%d' % (i)), s0, bw=LINK_BW, delay=LINK_DELAY)
for i in xrange(0, NUMBER_CLIENTS):
self.addLink(self.addHost('c%d' % (i)), s0, bw=LINK_BW, delay=LINK_DELAY)
for i in xrange(0, NUMBER_USERS):
self.addLink(self.addHost('u%d' % (i)), s0, bw=LINK_BW, delay=LINK_DELAY)
self.addLink(self.addHost('app'), s0, bw=LINK_BW, delay=LINK_DELAY)
return
def start_nodes(net, verbose):
for i in range(0, NUMBER_NODES):
h = net.getNodeByName('h%d'%(i))
h.cmd("route add -net default dev h%d-eth0" % (i))
if verbose:
cmd = 'python start_node.py --node_id=%d -v >>%s 2>&1' % (i, DEBUG_OUTPUT_FILE)
else:
cmd = 'python start_node.py --node_id=%d >>%s 2>&1' % (i, DEBUG_OUTPUT_FILE)
popens[h] = h.popen(cmd, shell=True, preexec_fn=os.setsid)
def start_client(net):
for i in xrange(0, NUMBER_CLIENTS):
client = net.getNodeByName('c%d' % (i))
client.cmd("route add -net default dev c%d-eth0" % (i))
popens[client] = client.popen('python start_client.py --client_id=%d' % (i),
shell=True, preexec_fn=os.setsid)
def start_user(net):
app = net.getNodeByName('app')
for i in xrange(0, NUMBER_USERS):
user = net.getNodeByName('u%d'%(i))
client = net.getNodeByName('c%d' % (i))
user.cmd("route add -net default dev u%d-eth0" % (i))
#popens[user] = client.popen('python start_user.py --user_id=%d --client_ip=%s --app_ip=%s >>%s 2>&1' % (i, client.IP(), app.IP(), DEBUG_OUTPUT_FILE), shell=True, preexec_fn=os.setsid)
def start_app(net):
app = net.getNodeByName('app')
app.cmd("route add -net default dev app-eth0")
popens[app] = app.popen('node haraka.js >>%s 2>&1' % (DEBUG_OUTPUT_FILE),
shell=True, preexec_fn=os.setsid, cwd='./Haraka')
def create_etc_hosts(net):
with open(ETC_HOSTS_FILE_NAME, "w+") as f:
for h in net.values():
f.write("%s\t%s\n" % (h.name, h.IP()))
def main():
parser = ArgumentParser()
parser.add_argument('--verbose', '-v', action='store_true')
args = parser.parse_args()
topo = BftTopo()
net = Mininet(topo=topo, host=CPULimitedHost, link=TCLink)
net.start()
create_etc_hosts(net)
# This dumps the topology and how nodes are interconnected through
# links.
dumpNodeConnections(net.hosts)
start_app(net)
start_nodes(net, args.verbose)
start_client(net)
#CLI(net)
sleep(5)
start_user(net)
CLI(net)
endTime = time() + RUN_DURATION
for p in popens.values():
os.killpg(p.pid, signal.SIGTERM)
net.stop()
if __name__ == '__main__':
main()
| gpl-2.0 | 563,290,887,564,789,900 | 32.1 | 193 | 0.61192 | false |
alexanderAustin/PythonGame | test.py | 1 | 2259 | # This was built from the tutorial https://www.raywenderlich.com/24252/beginning-game-programming-for-teens-with-python
import pygame, math, random
from pygame.locals import *
import pyganim
# 2 - Initialize the game
pygame.init()
width, height = 640, 480
screen=pygame.display.set_mode((width, height))
pygame.display.set_caption('PyGame - Testing')
rootImg = "resources/images/basic_game/"
rootAud = "resources/audio/basic_game/"
player = pygame.image.load(rootImg + "dude.png")
grass = pygame.image.load(rootImg + "grass.png")
castle = pygame.image.load(rootImg + "castle.png").convert_alpha()
# cow = pygame.image.load("resources/images/animals/cow/cow_front.png") #subject to change
# Used https://github.com/asweigart/pyganim/tree/master/examples
# http://www.pygame.org/project-Pyganim+sprite+animation+module-2106-.html
# for the sprite sheets
cows = pyganim.getImagesFromSpriteSheet(
filename="resources/images/animals/cow/cow_front.png",
rows=4, cols=2,
scale=2)
cframes = list(zip(cows, [100] * len(cows)))
cowObj = pyganim.PygAnimation(cframes)
cowObj.play()
cowsr = pyganim.getImagesFromSpriteSheet(
filename="resources/images/animals/cow/cow_rear.png",
rows=3, cols=3,
scale=2)
crframes = list(zip(cowsr, [100] * len(cowsr)))
# crframes = crframes.pop()#remove blank frame
print crframes
cowrObj = pyganim.PygAnimation(crframes)
cowrObj.play()
# 4 - keep looping through
running = 1
while running:
# 5 - clear the screen before drawing it again
screen.fill(0)
# 6 - draw the screen elements
for x in range(width/grass.get_width()+1):
for y in range(height/grass.get_height()+1):
screen.blit(grass,(x*100,y*100))
cowObj.blit(screen, (200, 20))
cowrObj.blit(screen, (50, 200))
# screen.blit(castle, (100,100))
# 7 - update the screen
pygame.display.flip()
# 8 - loop through the events
for event in pygame.event.get():
# check if the event is the X button
if event.type==pygame.QUIT:
# if it is quit the game
pygame.quit()
exit(0)
while 1:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
exit(0)
pygame.display.flip() | apache-2.0 | 5,823,003,622,661,369,000 | 29.958904 | 119 | 0.678619 | false |
Caian/ostools | oslib/__init__.py | 1 | 1251 | #!/usr/bin/env python3
# MIT License
#
# Copyright (c) 2017 Caian Benedicto
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from .state import OSState
from .user import OSUser
from .find import findff, findfr, findrf, findrr, superfind
| mit | -590,005,395,829,594,500 | 45.333333 | 80 | 0.77458 | false |
michel-slm/0install | tests/testescaping.py | 1 | 2832 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import basetest
from basetest import BaseTest
import sys, os, re
import unittest
sys.path.insert(0, '..')
from zeroinstall.injector import model
from zeroinstall.support import escaping
safe = re.compile('^[-.a-zA-Z0-9_]*$')
class TestEscaping(BaseTest):
def testEscape(self):
self.assertEqual("", model.escape(""))
self.assertEqual("hello", model.escape("hello"))
self.assertEqual("%20", model.escape(" "))
self.assertEqual("file%3a%2f%2ffoo%7ebar",
model.escape("file://foo~bar"))
self.assertEqual("file%3a%2f%2ffoo%25bar",
model.escape("file://foo%bar"))
self.assertEqual("file:##foo%7ebar",
model._pretty_escape("file://foo~bar"))
self.assertEqual("file:##foo%25bar",
model._pretty_escape("file://foo%bar"))
def testUnescape(self):
self.assertEqual("", model.unescape(""))
self.assertEqual("hello", model.unescape("hello"))
self.assertEqual(" ", model.unescape("%20"))
self.assertEqual("file://foo~bar",
model.unescape("file%3a%2f%2ffoo%7ebar"))
self.assertEqual("file://foo%bar",
model.unescape("file%3a%2f%2ffoo%25bar"))
self.assertEqual("file://foo",
model.unescape("file:##foo"))
self.assertEqual("file://foo~bar",
model.unescape("file:##foo%7ebar"))
self.assertEqual("file://foo%bar",
model.unescape("file:##foo%25bar"))
def testEscaping(self):
def check(str):
self.assertEqual(str, model.unescape(model.escape(str)))
self.assertEqual(str, model.unescape(model._pretty_escape(str)))
self.assertEqual(str,
escaping.ununderscore_escape(escaping.underscore_escape(str)))
check('')
check('http://example.com')
check('http://example%46com')
check('http:##example#com')
check('http://example.com/foo/bar.xml')
check('%20%21~&!"£ :@;,./{}$%^&()')
check('http://example.com/foo_bar-50%á.xml')
check('_one__two___three____four_____')
check('_1_and_2_')
def testUnderEscape(self):
for x in range(0, 128):
unescaped = chr(x)
escaped = escaping.underscore_escape(unescaped)
assert safe.match(escaped), escaped
self.assertEqual(unescaped, escaping.ununderscore_escape(escaped))
self.assertEqual("_2e_", escaping.underscore_escape("."))
self.assertEqual("_2e_.", escaping.underscore_escape(".."))
def testEscapeInterface(self):
self.assertEqual(["http", "example.com", "foo.xml"], model.escape_interface_uri("http://example.com/foo.xml"))
self.assertEqual(["http", "example.com", "foo__.bar.xml"], model.escape_interface_uri("http://example.com/foo/.bar.xml"))
self.assertEqual(["file", "root__foo.xml"], model.escape_interface_uri("/root/foo.xml"))
try:
model.escape_interface_uri("ftp://example.com/foo.xml")
assert 0
except AssertionError:
pass
if __name__ == '__main__':
unittest.main()
| lgpl-2.1 | -8,598,118,403,903,593,000 | 31.906977 | 123 | 0.674205 | false |
imtapps/django-dynamic-validation | dynamic_validation/migrations/0002_auto__add_field_violation_silent.py | 1 | 3111 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Violation.silent'
db.add_column('dynamic_validation_violation', 'silent',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Violation.silent'
db.delete_column('dynamic_validation_violation', 'silent')
models = {
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'dynamic_rules.rule': {
'Meta': {'object_name': 'Rule'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'dynamic_fields': ('django_fields.fields.PickleField', [], {}),
'group_object_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'secondary_object_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'dynamic_validation.violation': {
'Meta': {'ordering': "('acceptable',)", 'unique_together': "(('trigger_model_id', 'trigger_content_type', 'rule', '_key'),)", 'object_name': 'Violation'},
'_key': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'acceptable': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'rule': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dynamic_rules.Rule']"}),
'silent': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'trigger_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'violations'", 'to': "orm['contenttypes.ContentType']"}),
'trigger_model_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'violated_fields': ('django_fields.fields.PickleField', [], {})
}
}
complete_apps = ['dynamic_validation'] | bsd-2-clause | 6,311,877,027,371,380,000 | 56.62963 | 166 | 0.569913 | false |
hodgestar/graas | graas/cli.py | 1 | 1213 | """ Command for launching GRaaS. """
import sys
import click
from twisted.internet import reactor
from twisted.python import log
from twisted.web.server import Site
from .api import GraasApi
from .devserver import GraasServerFactory
@click.command("graas")
@click.version_option()
@click.option(
'--host', '-h',
default='localhost',
help='Host to listen on')
@click.option(
'--web-port', '-p',
type=int, default=8080,
help='Port for web server to listen on')
@click.option(
'--device-port', '-d',
type=int, default=8081,
help='Port for device server to listen on')
@click.option(
'--log-file', '-l',
type=str, default=None,
help='File to log to')
def main(host, web_port, device_port, log_file):
""" Vumi Go Opt Out API. """
if log_file is None:
log_file = sys.stdout
log.startLogging(log_file)
site = Site(GraasApi().app.resource())
reactor.listenTCP(web_port, site, interface=host)
factory = GraasServerFactory()
reactor.listenTCP(device_port, factory, interface=host)
log.msg("Web API listening on %s:%s" % (host, web_port))
log.msg("Device server listening on %s:%s" % (host, device_port))
reactor.run()
| bsd-3-clause | 7,179,159,240,244,600,000 | 24.808511 | 69 | 0.658697 | false |
infobloxopen/infoblox-client | infoblox_client/object_manager.py | 1 | 21402 | # Copyright 2015 Infoblox Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
try:
from oslo_log import log as logging
except ImportError: # pragma: no cover
import logging
from infoblox_client import exceptions as ib_ex
from infoblox_client import objects as obj
from infoblox_client import utils as ib_utils
LOG = logging.getLogger(__name__)
class InfobloxObjectManager(object):
def __init__(self, connector):
self.connector = connector
def create_network_view(self, network_view, extattrs):
return obj.NetworkView.create(self.connector,
name=network_view,
extattrs=extattrs)
def delete_network_view(self, network_view):
# never delete default network view
if network_view == 'default':
return
nview = obj.NetworkView.search(self.connector,
name=network_view)
if nview:
nview.delete()
def create_dns_view(self, network_view, dns_view):
return obj.DNSView.create(self.connector,
name=dns_view,
network_view=network_view)
def delete_dns_view(self, dns_view):
dns_view = obj.DNSView.search(self.connector,
name=dns_view)
if dns_view:
dns_view.delete()
def create_network(self, net_view_name, cidr, nameservers=None,
members=None, gateway_ip=None, dhcp_trel_ip=None,
network_extattrs=None):
"""Create NIOS Network and prepare DHCP options.
Some DHCP options are valid for IPv4 only, so just skip processing
them for IPv6 case.
:param net_view_name: network view name
:param cidr: network to allocate, example '172.23.23.0/24'
:param nameservers: list of name servers hosts/ip
:param members: list of objects.AnyMember objects that are expected
to serve dhcp for created network
:param gateway_ip: gateway ip for the network (valid for IPv4 only)
:param dhcp_trel_ip: ip address of dhcp relay (valid for IPv4 only)
:param network_extattrs: extensible attributes for network (instance of
objects.EA)
:returns: created network (instance of objects.Network)
"""
ipv4 = ib_utils.determine_ip_version(cidr) == 4
options = []
if nameservers:
options.append(obj.Dhcpoption(name='domain-name-servers',
value=",".join(nameservers)))
if ipv4 and gateway_ip:
options.append(obj.Dhcpoption(name='routers',
value=gateway_ip))
if ipv4 and dhcp_trel_ip:
options.append(obj.Dhcpoption(name='dhcp-server-identifier',
num=54,
value=dhcp_trel_ip))
return obj.Network.create(self.connector,
network_view=net_view_name,
cidr=cidr,
members=members,
options=options,
extattrs=network_extattrs,
check_if_exists=False)
def get_network(self, network_view, cidr):
return obj.Network.search(self.connector,
network_view=network_view,
cidr=cidr)
def create_ip_range(self, network_view, start_ip, end_ip, network,
disable, range_extattrs):
"""Creates IPRange or fails if already exists."""
return obj.IPRange.create(self.connector,
network_view=network_view,
start_addr=start_ip,
end_addr=end_ip,
cidr=network,
disable=disable,
extattrs=range_extattrs,
check_if_exists=False)
def delete_ip_range(self, network_view, start_ip, end_ip):
range = obj.IPRange.search(self.connector,
network_view=network_view,
start_addr=start_ip,
end_addr=end_ip)
if range:
range.delete()
def has_networks(self, network_view_name):
networks = obj.Network.search_all(self.connector,
network_view=network_view_name)
return bool(networks)
def network_exists(self, network_view, cidr):
"""Deprecated, use get_network() instead."""
LOG.warning(
"DEPRECATION WARNING! Using network_exists() is deprecated "
"and to be removed in next releases. "
"Use get_network() or objects.Network.search instead")
network = obj.Network.search(self.connector,
network_view=network_view,
cidr=cidr)
return network is not None
def delete_network(self, network_view, cidr):
network = obj.Network.search(self.connector,
network_view=network_view,
cidr=cidr)
if network:
network.delete()
def create_network_from_template(self, network_view, cidr, template,
extattrs):
return obj.Network.create(self.connector,
network_view=network_view,
cidr=cidr,
template=template,
extattrs=extattrs,
check_if_exists=False)
def update_network_options(self, ib_network, extattrs=None):
if extattrs:
if ib_network.extattrs:
# Merge EA values as dicts
ea_dict = ib_network.extattrs.ea_dict
ea_dict.update(extattrs.ea_dict)
merged_ea = obj.EA(ea_dict)
ib_network.extattrs = merged_ea
else:
ib_network.extattrs = extattrs
return ib_network.update()
def get_host_record(self, dns_view, ip, network_view=None):
return obj.HostRecord.search(self.connector,
view=dns_view,
ip=ip,
network_view=network_view)
def find_hostname(self, dns_view, hostname, ip, network_view=None):
return obj.HostRecord.search(self.connector,
name=hostname,
view=dns_view,
ip=ip,
network_view=network_view)
def find_host_records_by_mac(self, dns_view, mac, network_view=None):
host_records = []
host_records.extend(obj.HostRecord.search_all(
self.connector, view=dns_view, mac=mac, network_view=network_view))
# Unfortunately WAPI does not support search host records by DUID, so
# search host addresses by duid and then search hosts by name
ipv6_host_addresses = obj.IPv6HostAddress.search_all(
self.connector, duid=mac, network_view=network_view)
ipv6_hosts = []
for addr in ipv6_host_addresses:
hosts = obj.HostRecordV6.search_all(
self.connector, name=addr.host, view=dns_view,
network_view=network_view)
for host in hosts:
if host not in ipv6_hosts:
ipv6_hosts.append(host)
host_records.extend(ipv6_hosts)
return host_records
def create_host_record_for_given_ip(self, dns_view, zone_auth,
hostname, mac, ip, extattrs,
use_dhcp, use_dns=True):
name = '.'.join([hostname, zone_auth])
ip_obj = obj.IP.create(ip=ip, mac=mac, configure_for_dhcp=use_dhcp)
return obj.HostRecord.create(self.connector,
view=dns_view,
name=name,
ip=ip_obj,
configure_for_dns=use_dns,
extattrs=extattrs,
check_if_exists=False)
def create_host_record_from_range(self, dns_view, network_view_name,
zone_auth, hostname, mac, first_ip,
last_ip, extattrs, use_dhcp,
use_dns=True):
name = '.'.join([hostname, zone_auth])
ip_alloc = obj.IPAllocation.next_available_ip_from_range(
network_view_name, first_ip, last_ip)
ip_obj = obj.IP.create(ip=ip_alloc, mac=mac,
configure_for_dhcp=use_dhcp)
return obj.HostRecord.create(self.connector,
view=dns_view,
name=name,
ip=ip_obj,
configure_for_dns=use_dns,
extattrs=extattrs,
check_if_exists=False)
def delete_host_record(self, dns_view, ip_address, network_view=None):
host_record = obj.HostRecord.search(self.connector,
view=dns_view, ip=ip_address,
network_view=network_view)
if host_record:
host_record.delete()
def create_fixed_address_for_given_ip(self, network_view, mac, ip,
extattrs):
return obj.FixedAddress.create(self.connector,
network_view=network_view,
mac=mac,
ip=ip,
extattrs=extattrs,
check_if_exists=False)
def create_fixed_address_from_range(self, network_view, mac, first_ip,
last_ip, extattrs):
ip = obj.IPAllocation.next_available_ip_from_range(
network_view, first_ip, last_ip)
return obj.FixedAddress.create(self.connector,
ip=ip,
mac=mac,
network_view=network_view,
extattrs=extattrs,
check_if_exists=False)
def create_fixed_address_from_cidr(self, netview, mac, cidr, extattrs):
ip = obj.IPAllocation.next_available_ip_from_cidr(netview, cidr)
return obj.FixedAddress.create(self.connector,
network_view=netview,
ip=ip,
mac=mac,
extattrs=extattrs,
check_if_exists=False)
def delete_fixed_address(self, network_view, ip_address):
fixed_address = obj.FixedAddress.search(self.connector,
network_view=network_view,
ip=ip_address)
if fixed_address:
fixed_address.delete()
def get_fixed_addresses_by_mac(self, network_view, mac):
return obj.FixedAddress.search_all(
self.connector, network_view=network_view, mac=mac)
def add_ip_to_record(self, host_record, ip, mac, use_dhcp=True):
ip_obj = obj.IP.create(ip=ip, mac=mac, configure_for_dhcp=use_dhcp)
host_record.ip.append(ip_obj)
return host_record.update()
def add_ip_to_host_record_from_range(self, host_record, network_view,
mac, first_ip, last_ip,
use_dhcp=True):
ip_alloc = obj.IPAllocation.next_available_ip_from_range(
network_view, first_ip, last_ip)
ip_obj = obj.IP.create(ip=ip_alloc, mac=mac,
configure_for_dhcp=use_dhcp)
host_record.ip.append(ip_obj)
return host_record.update()
def delete_ip_from_host_record(self, host_record, ip):
host_record.ip.remove(ip)
return host_record.update()
def has_dns_zones(self, dns_view):
zones = obj.DNSZone.search_all(self.connector, view=dns_view)
return bool(zones)
def create_dns_zone(self, dns_view, dns_zone,
grid_primary=None, grid_secondaries=None,
zone_format=None, ns_group=None, prefix=None,
extattrs=None):
return obj.DNSZone.create(self.connector,
fqdn=dns_zone,
view=dns_view,
extattrs=extattrs,
zone_format=zone_format,
ns_group=ns_group,
prefix=prefix,
grid_primary=grid_primary,
grid_secondaries=grid_secondaries)
def delete_dns_zone(self, dns_view, dns_zone_fqdn):
dns_zone = obj.DNSZone.search(self.connector,
fqdn=dns_zone_fqdn,
view=dns_view)
if dns_zone:
dns_zone.delete()
def update_dns_zone_attrs(self, dns_view, dns_zone_fqdn, extattrs):
if not extattrs:
return
dns_zone = obj.DNSZone.search(self.connector,
fqdn=dns_zone_fqdn,
view=dns_view)
if dns_zone:
dns_zone.extattrs = extattrs
dns_zone.update()
def update_host_record_eas(self, dns_view, ip, extattrs):
host_record = obj.HostRecord.search(self.connector,
view=dns_view,
ip=ip)
if host_record:
host_record.extattrs = extattrs
host_record.update()
def update_fixed_address_eas(self, network_view, ip, extattrs):
fixed_address = obj.FixedAddress.search(self.connector,
network_view=network_view,
ip=ip)
if fixed_address:
fixed_address.extattrs = extattrs
fixed_address.update()
def update_dns_record_eas(self, dns_view, ip, extattrs):
a_record = obj.ARecordBase.search(self.connector,
ip=ip,
view=dns_view)
if a_record:
a_record.extattrs = extattrs
a_record.update()
ptr_record = obj.PtrRecord.search(self.connector,
ip=ip,
view=dns_view)
if ptr_record:
ptr_record.extattrs = extattrs
ptr_record.update()
def bind_name_with_host_record(self, dns_view, ip, name, extattrs,
network_view=None):
host_record = obj.HostRecord.search(self.connector,
view=dns_view,
ip=ip,
network_view=network_view)
if host_record:
host_record.name = name
host_record.extattrs = extattrs
host_record.update()
def bind_name_with_record_a(self, dns_view, ip, name, bind_list,
extattrs):
is_ipv4 = ib_utils.determine_ip_version(ip) == 4
if ((is_ipv4 and 'record:a' in bind_list) or
(not is_ipv4 and 'record:aaaa' in bind_list)):
obj.ARecordBase.create(self.connector,
view=dns_view,
ip=ip,
name=name,
extattrs=extattrs,
update_if_exists=True)
if 'record:ptr' in bind_list:
obj.PtrRecord.create(self.connector,
view=dns_view,
ip=ip,
ptrdname=name,
extattrs=extattrs,
update_if_exists=True)
def unbind_name_from_record_a(self, dns_view, ip, name, unbind_list):
is_ipv4 = ib_utils.determine_ip_version(ip) == 4
if ((is_ipv4 and 'record:a' in unbind_list) or
(not is_ipv4 and 'record:aaaa' in unbind_list)):
a_record = obj.ARecordBase.search(self.connector,
view=dns_view,
ip=ip,
name=name)
if a_record:
self.delete_objects_associated_with_a_record(a_record.name,
a_record.view,
unbind_list)
a_record.delete()
if 'record:ptr' in unbind_list:
ptr_record = obj.PtrRecord.search(self.connector,
view=dns_view,
ip=ip,
ptrdname=name)
if ptr_record:
ptr_record.delete()
def get_member(self, member):
member.fetch()
return member
def get_all_ea_definitions(self):
return obj.EADefinition.search_all(self.connector)
def create_ea_definition(self, ea_def, reraise=False):
try:
return obj.EADefinition.create(self.connector,
check_if_exists=False,
**ea_def)
except ib_ex.InfobloxCannotCreateObject:
LOG.error('Unable to create Extensible Attribute Definition '
'%s' % ea_def)
if reraise:
raise
def create_required_ea_definitions(self, required_ea_defs, reraise=False):
existing_ea_defs = self.get_all_ea_definitions()
missing_ea_defs = []
for req_def in required_ea_defs:
if not [ea_def for ea_def in existing_ea_defs
if ea_def.name == req_def['name']]:
missing_ea_defs.append(req_def)
created_ea_defs = []
for ea_def in missing_ea_defs:
if self.create_ea_definition(ea_def, reraise=reraise):
created_ea_defs.append(ea_def)
return created_ea_defs
def restart_all_services(self, member):
if not member._ref:
member.fetch(only_ref=True)
self.connector.call_func('restartservices', member._ref,
{'restart_option': 'RESTART_IF_NEEDED',
'service_option': 'ALL'})
def delete_objects_associated_with_a_record(self, name, view, delete_list):
"""Deletes records associated with record:a or record:aaaa."""
search_objects = {}
if 'record:cname' in delete_list:
search_objects['record:cname'] = 'canonical'
if 'record:txt' in delete_list:
search_objects['record:txt'] = 'name'
if not search_objects:
return
for obj_type, search_type in search_objects.items():
payload = {'view': view,
search_type: name}
ib_objs = self.connector.get_object(obj_type, payload)
if ib_objs:
for ib_obj in ib_objs:
self.delete_object_by_ref(ib_obj['_ref'])
def delete_all_associated_objects(self, network_view, ip, delete_list):
LOG.warning(
"DEPRECATION WARNING! Using delete_all_associated_objects() "
"is deprecated and to be removed in next releases. "
"Use unbind_name_from_record_a() instead.")
def delete_object_by_ref(self, ref):
try:
self.connector.delete_object(ref)
except ib_ex.InfobloxCannotDeleteObject:
pass
| apache-2.0 | -3,936,074,170,078,028,300 | 43.40249 | 79 | 0.48944 | false |
plantigrade/geni-tools | src/gcf-gch.py | 1 | 6022 | #!/usr/bin/env python
#----------------------------------------------------------------------
# Copyright (c) 2012-2015 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
"""
Framework to run a 'new' GENI Clearinghouse. See geni/gch.py for the
GENI Clearinghouse interface that this runs.
This serves an XMLRPC interface to the GENI Clearinghouse services,
which otherwise speak S/MIME.
Run with "-h" flag to see usage and command line options.
"""
# FIXME: Treat this only as example code. The CH APIs that this uses
# have likely changed since this was last used.
import sys
# Check python version. Requires 2.6 or greater, but less than 3.
if sys.version_info < (2, 6):
raise Exception('Must use python 2.6 or greater.')
elif sys.version_info >= (3,):
raise Exception('Not python 3 ready')
import logging
import optparse
import os
from gcf import geni
from gcf.geni.gch import GENIClearinghouse
from gcf.geni.config import read_config
config = None
def getAbsPath(path):
"""Return None or a normalized absolute path version of the argument string.
Does not check that the path exists."""
if path is None:
return None
if path.strip() == "":
return None
path = os.path.normcase(os.path.expanduser(path))
if os.path.isabs(path):
return path
else:
return os.path.abspath(path)
class CommandHandler(object):
# TODO: Implement a register handler to register aggregate managers
# (persistently) so that a client could ask for the list of
# aggregate managers.
def runserver_handler(self, opts):
"""Run the clearinghouse server."""
ch = GENIClearinghouse()
# address is a tuple in python socket servers
addr = (opts.host, int(opts.port))
certfile = getAbsPath(opts.certfile)
keyfile = getAbsPath(opts.keyfile)
if not os.path.exists(certfile):
sys.exit("Clearinghouse certfile %s doesn't exist" % certfile)
if not os.path.getsize(certfile) > 0:
sys.exit("Clearinghouse certfile %s is empty" % certfile)
if not os.path.exists(keyfile):
sys.exit("Clearinghouse keyfile %s doesn't exist" % keyfile)
if not os.path.getsize(keyfile) > 0:
sys.exit("Clearinghouse keyfile %s is empty" % keyfile)
# rootcafile is turned into a concatenated file for Python SSL use inside ch.py
ch.runserver(addr,
keyfile,
certfile,
getAbsPath(opts.rootcadir),
config)
def parse_args(argv):
parser = optparse.OptionParser()
parser.add_option("-k", "--keyfile",
help="CH key file name", metavar="FILE")
parser.add_option("-g", "--certfile",
help="CH certificate file name (PEM format)", metavar="FILE")
parser.add_option("-c", "--configfile", default="gcf_config", help="config file path", metavar="FILE")
# Note: A CH that only wants to talk to its own users doesn't need
# this argument. It works if it just trusts its own cert.
# Supplying this arg allows users of other frameworks to create slices on this CH.
parser.add_option("-r", "--rootcadir",
help="Root certificate directory name (files in PEM format)", metavar="FILE")
# Could try to determine the real IP Address instead of the loopback
# using socket.gethostbyname(socket.gethostname())
parser.add_option("-H", "--host",
help="server ip", metavar="HOST")
parser.add_option("-p", "--port", type=int,
help="server port", metavar="PORT")
parser.add_option("--debug", action="store_true", default=False,
help="enable debugging output")
return parser.parse_args()
def main(argv=None):
if argv is None:
argv = sys.argv
opts, args = parse_args(argv)
level = logging.INFO
if opts.debug:
level = logging.DEBUG
logging.basicConfig(level=level)
if not args:
args = ('runserver',)
handler = '_'.join((args[0], 'handler'))
# Read in config file options, command line gets priority
global config
optspath = None
if not opts.configfile is None:
optspath = os.path.expanduser(opts.configfile)
config = read_config(optspath)
for (key,val) in config['geni clearinghouse'].items():
if hasattr(opts,key) and getattr(opts,key) is None:
setattr(opts,key,val)
if not hasattr(opts,key):
setattr(opts,key,val)
if getattr(opts,'rootcadir') is None:
setattr(opts,'rootcadir',config['global']['rootcadir'])
config['debug'] = opts.debug
ch = CommandHandler()
if hasattr(ch, handler):
return getattr(ch, handler)(opts)
else:
print >> sys.stderr, 'Unknown command ', args[0]
if __name__ == "__main__":
sys.exit(main())
| mit | -3,464,075,406,930,950,000 | 36.874214 | 106 | 0.639489 | false |
IvIePhisto/ECoXiPy | ecoxipy/pyxom/_document.py | 1 | 17879 | # -*- coding: utf-8 -*-
import collections
from xml.sax.xmlreader import AttributesImpl
from ecoxipy import _python2, _unicode
from ecoxipy import _helpers
from ._common import XMLNode, ContainerNode, _string_repr
from ._content_nodes import Text
from .indexing import (IndexDescriptor, ElementByUniqueAttributeValueIndexer,
ElementsByNameIndexer, NamespaceIndexer)
class DocumentType(object):
'''\
Represents a document type declaration of a :class:`Document`. It should
not be instantiated on itself.
:param name: The document element name.
:type name: Unicode string
:param publicid: The document type public ID or :const:`None`.
:type publicid: Unicode string
:param systemid: The document type system ID or :const:`None`.
:type systemid: Unicode string
:param check_well_formedness: If :const:`True` the document element name
will be checked to be a valid XML name.
:type check_well_formedness: :func:`bool`
'''
__slots__ = {'_name', '_publicid', '_systemid', '_check_well_formedness'}
def __init__(self, name, publicid, systemid, check_well_formedness):
if check_well_formedness:
if name is not None:
_helpers.enforce_valid_xml_name(name)
if publicid is not None:
_helpers.enforce_valid_doctype_publicid(publicid)
if systemid is not None:
_helpers.enforce_valid_doctype_systemid(systemid)
self._name = name
self._publicid = publicid
self._systemid = systemid
self._check_well_formedness = check_well_formedness
@property
def name(self):
'''\
The document element name or :const:`None`. On setting if the value
is :const:`None`, :attr:`publicid` and :attr:`systemid` are also set
to :const:`None`. Otherwise the value is converted to an Unicode
string; a :class:`ecoxipy.XMLWellFormednessException` is thrown if it
is not a valid XML name and ``check_well_formedness`` is
:const:`True`.
'''
return self._name
@name.setter
def name(self, name):
if name is None:
self._publicid = None
self._systemid = None
else:
name = _unicode(name)
if self._check_well_formedness:
_helpers.enforce_valid_xml_name(name)
self._name = name
@property
def publicid(self):
'''\
The document type public ID or :const:`None`. On setting if the value
is not :const:`None` it is converted to a Unicode string; a
:class:`ecoxipy.XMLWellFormednessException` is thrown if it is not a
valid doctype public ID and ``check_well_formedness`` is
:const:`True`.
'''
return self._publicid
@publicid.setter
def publicid(self, publicid):
if publicid is not None:
publicid = _unicode(publicid)
if self._check_well_formedness:
_helpers.enforce_valid_doctype_publicid(publicid)
self._publicid = publicid
@property
def systemid(self):
'''\
The document type system ID or :const:`None`. On setting if the value
is not :const:`None` it is converted to a Unicode string; a
:class:`ecoxipy.XMLWellFormednessException` is thrown if it is not a
valid doctype system ID and ``check_well_formedness`` is
:const:`True`.
'''
return self._systemid
@systemid.setter
def systemid(self, systemid):
if systemid is not None:
systemid = _unicode(systemid)
if self._check_well_formedness:
_helpers.enforce_valid_doctype_systemid(systemid)
self._systemid = systemid
def __repr__(self):
return 'ecoxipy.pyxom.DocumentType({}, {}, {})'.format(
_string_repr(self._name),
_string_repr(self._publicid),
_string_repr(self._systemid),
)
def __eq__(self, other):
return (isinstance(other, DocumentType)
and self._name == other._name
and self._publicid == other._publicid
and self._systemid == other._systemid
)
def __ne__(self, other):
return (not(isinstance(other, DocumentType))
or self._name != other._name
or self._publicid != other._publicid
or self._systemid != other._systemid
)
@staticmethod
def _parse_values(name, publicid, systemid):
if name is None:
publicid = None
systemid = None
else:
name = _unicode(name)
if publicid is not None:
publicid = _unicode(publicid)
if systemid is not None:
systemid = _unicode(systemid)
return name, publicid, systemid
@staticmethod
def _create(name, publicid, systemid, check_well_formedness):
name, publicid, systemid = DocumentType._parse_values(
name, publicid, systemid)
return DocumentType(name, publicid, systemid, check_well_formedness)
class Document(ContainerNode):
'''\
A :class:`ContainerNode` representing a XML document.
:param doctype_name: The document type root element name or :const:`None`
if the document should not have document type declaration.
:type doctype_name: Unicode string
:param doctype_publicid: The public ID of the document type declaration
or :const:`None`.
:type doctype_publicid: Unicode string
:param doctype_systemid: The system ID of the document type declaration
or :const:`None`.
:type doctype_systemid: Unicode string
:param children: The document root :class:`XMLNode` instances.
:param encoding: The encoding of the document. If it is :const:`None`
`UTF-8` is used.
:type encoding: Unicode string
:param omit_xml_declaration: If :const:`True` the XML declaration is
omitted.
:type omit_xml_declaration: :func:`bool`
:param check_well_formedness: If :const:`True` the document element name
will be checked to be a valid XML name.
:type check_well_formedness: :func:`bool`
:raises ecoxipy.XMLWellFormednessException: If ``check_well_formedness``
is :const:`True` and ``doctype_name`` is not a valid XML name,
``doctype_publicid`` is not a valid public ID or ``doctype_systemid``
is not a valid system ID.
'''
__slots__ = {'_doctype', '_omit_xml_declaration', '_encoding'}
def __init__(self, doctype_name, doctype_publicid, doctype_systemid,
children, omit_xml_declaration, encoding,
check_well_formedness=False):
ContainerNode.__init__(self, children)
self._doctype = DocumentType(doctype_name, doctype_publicid,
doctype_systemid, check_well_formedness)
self._omit_xml_declaration = omit_xml_declaration
if encoding is None:
encoding = u'UTF-8'
self._encoding = encoding
@staticmethod
def create(*children, **kargs):
'''\
Creates a document and converts parameters to appropriate types.
:param children: The document root nodes. All items that are not
:class:`XMLNode` instances create :class:`Text` nodes after they
have been converted to Unicode strings.
:param kargs: The same parameters as the constructor has (except
``children``) are recognized. The items ``doctype_name``,
``doctype_publicid``, ``doctype_systemid``, and ``encoding`` are
converted to Unicode strings if they are not :const:`None`.
``omit_xml_declaration`` is converted to boolean.
:returns: The created document.
:rtype: :class:`Document`
:raises ecoxipy.XMLWellFormednessException: If ``doctype_name`` is not
a valid XML name, ``doctype_publicid`` is not a valid public ID or
``doctype_systemid`` is not a valid system ID.
'''
doctype_name = kargs.get('doctype_name', None)
doctype_publicid = kargs.get('doctype_publicid', None)
doctype_systemid = kargs.get('doctype_systemid', None)
doctype_name, doctype_publicid, doctype_systemid = DocumentType._parse_values(
doctype_name, doctype_publicid, doctype_systemid)
omit_xml_declaration = kargs.get('omit_xml_declaration', None)
omit_xml_declaration = bool(omit_xml_declaration)
encoding = kargs.get('encoding', None)
if encoding is not None:
encoding = _unicode(encoding)
return Document(doctype_name, doctype_publicid, doctype_systemid,
[
child if isinstance(child, XMLNode) else Text.create(child)
for child in children
], omit_xml_declaration, encoding, True)
@property
def doctype(self):
'''\
The :class:`DocumentType` instance of the document.
On setting one of the following occurs:
1. If the value is :const:`None`, the document type's attributes are
set to :const:`None`.
2. If the value is a byte or Unicode string, the document type
document element name is set to this value (a byte string will be
converted to Unicode). The document type public and system IDs
will be set to :const:`None`.
3. If the value is a mapping, the items identified by the strings
``'name'``, ``'publicid'`` or ``'systemid'`` define the respective
attributes of the document type, the others are assumed to be
:const:`None`.
4. If the value is a sequence, the item at position zero defines the
document type document element name, the item at position one
defines the public ID and the item at position two defines the
system ID. If the sequence is shorter than three, non-available
items are assumed to be :const:`None`.
The document type values are converted to appropriate values and their
validity is checked if ``check_well_formedness`` is :const:`True`.
Example:
>>> doc = Document.create()
>>> doc.doctype
ecoxipy.pyxom.DocumentType(None, None, None)
>>> doc.doctype = {'name': 'test', 'systemid': 'foo bar'}
>>> doc.doctype
ecoxipy.pyxom.DocumentType('test', None, 'foo bar')
>>> doc.doctype = ('html', 'foo bar')
>>> doc.doctype
ecoxipy.pyxom.DocumentType('html', 'foo bar', None)
>>> doc.doctype = 'foo'
>>> doc.doctype
ecoxipy.pyxom.DocumentType('foo', None, None)
>>> doc.doctype = None
>>> doc.doctype
ecoxipy.pyxom.DocumentType(None, None, None)
'''
return self._doctype
@doctype.setter
def doctype(self, value):
if value is None:
name = None
publicid = None
systemid = None
else:
if value.__class__ is bytes:
value = _unicode(value)
try:
name = value.get('name', None)
publicid = value.get('publicid', None)
systemid = value.get('systemid', None)
except AttributeError:
if value.__class__ is _unicode:
name = value
publicid = None
systemid = None
else:
if len(value) > 2:
systemid = value[2]
else:
systemid = None
if len(value) > 1:
publicid = value[1]
else:
publicid = None
if len(value) > 0:
name = value[0]
else:
name = None
name, publicid, systemid = DocumentType._parse_values(
name, publicid, systemid)
self._doctype.name = name
self._doctype.publicid = publicid
self._doctype.systemid = systemid
@property
def omit_xml_declaration(self):
'''\
If :const:`True` the XML declaration is omitted.
'''
return self._omit_xml_declaration
@omit_xml_declaration.setter
def omit_xml_declaration(self, value):
self._omit_xml_declaration = bool(value)
@property
def encoding(self):
'''\
The encoding of the document. On setting if the value is
:const:`None` it is set to ``UTF-8``, otherwise it is converted to an
Unicode string.
'''
return self._encoding
@encoding.setter
def encoding(self, value):
if value is None:
value = u'UTF-8'
else:
value = _unicode(value)
self._encoding = value
def __bytes__(self):
'''\
Creates a byte string containing the XML representation of the
node with the encoding :meth:`encoding`.
'''
return self.create_str(encoding=self._encoding)
if _python2:
__str__ = __bytes__
del __bytes__
def __hash__(self):
return object.__hash__(self)
@_helpers.inherit_docstring(ContainerNode)
def create_sax_events(self, content_handler=None, out=None,
out_encoding='UTF-8', indent_incr=None):
return XMLNode.create_sax_events(self, content_handler, out,
self._encoding, indent_incr)
def _create_str(self, out):
return out.document(self._doctype.name, self._doctype.publicid,
self._doctype.systemid, self._children_strings(out),
self._omit_xml_declaration, self._encoding)
def _create_sax_events(self, content_handler, indent):
content_handler.startDocument()
try:
notationDecl = content_handler.notationDecl
except AttributeError:
pass
else:
notationDecl(self._doctype.name, self._doctype.publicid,
self._doctype.systemid)
for child in self:
child._create_sax_events(content_handler, indent)
content_handler.endDocument()
def __repr__(self):
return 'ecoxipy.pyxom.Document[{}, {}, {}]'.format(
repr(self._doctype),
repr(self._omit_xml_declaration),
_string_repr(self._encoding))
def __eq__(self, other):
if not(isinstance(other, Document)
and self._doctype == other._doctype
and self._omit_xml_declaration == other._omit_xml_declaration
and self._encoding == other._encoding
and len(self) == len(other)):
return False
for i in range(len(self)):
if self[i] != other[i]:
return False
return True
def __ne__(self, other):
if (not(isinstance(other, Document))
or self._doctype != other._doctype
or self._omit_xml_declaration != other._omit_xml_declaration
or self._encoding != other._encoding
or len(self) != len(other)):
return True
for i in range(len(self)):
if self[i] != other[i]:
return True
return False
@_helpers.inherit_docstring(ContainerNode)
def duplicate(self):
return Document(self._doctype.name, self._doctype.publicid,
self._doctype.systemid,
[child.duplicate() for child in self],
self._omit_xml_declaration, self._encoding)
element_by_id = IndexDescriptor(ElementByUniqueAttributeValueIndexer())
'''\
A :class:`ecoxipy.pyxom.indexing.IndexDescriptor` instance using a
:class:`ecoxipy.pyxom.indexing.ElementByUniqueAttributeValueIndexer`
for indexing.
Use it like a mapping to retrieve the element having an attribute ``id``
with the value being equal to the requested key, possibly throwing a
:class:`KeyError` if such an element does not exist.
**Important:** If the document's childs are relevantly modified (i.e. an
``id`` attribute was created, modified or deleted), :meth:`delete_indexes`
should be called or this attribute should be deleted on the instance,
which deletes the index.
'''
elements_by_name = IndexDescriptor(ElementsByNameIndexer())
'''\
A :class:`ecoxipy.pyxom.indexing.IndexDescriptor` instance using a
:class:`ecoxipy.pyxom.indexing.ElementsByNameIndexer` for indexing.
Use it like a mapping to retrieve an iterator over elements having a name
equal to the requested key, possibly throwing a :class:`KeyError` if such
an element does not exist.
**Important:** If the document's childs are relevantly modified (i.e. new
elements were added or deleted, elements' names were modified),
:meth:`delete_indexes` should be called or this attribute should be
deleted on the instance, which deletes the index.
'''
nodes_by_namespace = IndexDescriptor(NamespaceIndexer())
'''\
A :class:`ecoxipy.pyxom.indexing.IndexDescriptor` instance using a
:class:`ecoxipy.pyxom.indexing.NamespaceIndexer` for indexing.
**Important:** If the document's childs are relevantly modified (i.e. new
elements/attributes were added or deleted, elements'/attributes' names
were modified), :meth:`delete_indexes` should be called or this attribute
should be deleted on the instance, which deletes the index.
'''
def delete_indexes(self):
'''\
A shortcut to delete the indexes of :attr:`element_by_id` and
:attr:`elements_by_name`.
'''
del self.element_by_id
del self.elements_by_name
del self.nodes_by_namespace
del (IndexDescriptor, ElementByUniqueAttributeValueIndexer,
ElementsByNameIndexer, NamespaceIndexer) | mit | -2,128,559,108,914,202,400 | 37.534483 | 86 | 0.606578 | false |
status-im/status-react | test/appium/tests/conftest.py | 1 | 13196 | import requests
import pytest
import re
from _pytest.runner import runtestprotocol
from http.client import RemoteDisconnected
from support.device_stats_db import DeviceStatsDB
from support.test_rerun import should_rerun_test
from tests import test_suite_data, appium_container
from datetime import datetime
from os import environ
from io import BytesIO
from sauceclient import SauceClient, SauceException
from support.api.network_api import NetworkApi
from support.github_report import GithubHtmlReport
from support.testrail_report import TestrailReport
from tests.users import transaction_senders
import tests
sauce_username = environ.get('SAUCE_USERNAME')
sauce_access_key = environ.get('SAUCE_ACCESS_KEY')
github_token = environ.get('GIT_HUB_TOKEN')
sauce = SauceClient(sauce_username, sauce_access_key)
github_report = GithubHtmlReport()
testrail_report = TestrailReport()
def pytest_addoption(parser):
parser.addoption("--build",
action="store",
default=datetime.now().strftime('%Y-%m-%d-%H-%M'),
help="Specify build name")
parser.addoption('--apk',
action='store',
default=None,
help='Url or local path to apk')
parser.addoption('--env',
action='store',
default='sauce',
help='Specify environment: local/sauce/api')
parser.addoption('--platform_version',
action='store',
default='8.0',
help='Android device platform version')
parser.addoption('--log_steps',
action='store',
default=False,
help='Display each test step in terminal as plain text: True/False')
parser.addoption('--pr_number',
action='store',
default=None,
help='Pull Request number')
parser.addoption('--testrail_report',
action='store',
default=False,
help='boolean; For creating testrail report per run')
parser.addoption('--network',
action='store',
default='ropsten',
help='string; ropsten or rinkeby')
parser.addoption('--rerun_count',
action='store',
default=0,
help='How many times tests should be re-run if failed')
parser.addoption("--run_testrail_ids",
action="store",
metavar="NAME",
default=None,
help="only run tests matching the environment NAME.")
parser.addoption("--apk_upgrade",
action="store",
metavar="NAME",
default=None,
help='Url or local path to apk for upgrade')
# chat bot
parser.addoption('--messages_number',
action='store',
default=20,
help='Messages number')
parser.addoption('--public_keys',
action='store',
default='',
help='List of public keys for one-to-one chats')
parser.addoption('--running_time',
action='store',
default=600,
help='Running time in seconds')
parser.addoption('--chat_name',
action='store',
default='test_chat',
help='Public chat name')
parser.addoption('--device_number',
action='store',
default=2,
help='Public chat name')
# running tests using appium docker instance
parser.addoption('--docker',
action='store',
default=False,
help='Are you using the appium docker container to run the tests?')
parser.addoption('--docker_shared_volume',
action='store',
default=None,
help='Path to a directory with .apk that will be shared with docker instance. Test reports will be also saved there')
parser.addoption('--device_ip',
action='store',
default=None,
help='Android device IP address used for battery tests')
parser.addoption('--bugreport',
action='store',
default=False,
help='Should generate bugreport for each test?')
parser.addoption('--stats_db_host',
action='store',
default=None,
help='Host address for device stats database')
parser.addoption('--stats_db_port',
action='store',
default=8086,
help='Port for device stats db')
parser.addoption('--stats_db_username',
action='store',
default=None,
help='Username for device stats db')
parser.addoption('--stats_db_password',
action='store',
default=None,
help='Password for device stats db')
parser.addoption('--stats_db_database',
action='store',
default='example9',
help='Database name for device stats db')
def is_master(config):
return not hasattr(config, 'workerinput')
def is_uploaded():
stored_files = sauce.storage.get_stored_files()
for i in range(len(stored_files['files'])):
if stored_files['files'][i]['name'] == test_suite_data.apk_name:
return True
def pytest_configure(config):
tests.pytest_config_global = vars(config.option)
config.addinivalue_line("markers", "testrail_id(name): empty")
if config.getoption('log_steps'):
import logging
logging.basicConfig(level=logging.INFO)
if config.getoption('env') != 'api':
test_suite_data.apk_name = ([i for i in [i for i in config.getoption('apk').split('/')
if '.apk' in i]])[0]
if is_master(config):
pr_number = config.getoption('pr_number')
if config.getoption('testrail_report'):
if pr_number:
run_number = len(testrail_report.get_runs(pr_number)) + 1
run_name = 'PR-%s run #%s' % (pr_number, run_number)
else:
run_name = test_suite_data.apk_name
testrail_report.add_run(run_name)
if pr_number:
from github import Github
repo = Github(github_token).get_user('status-im').get_repo('status-react')
pull = repo.get_pull(int(pr_number))
pull.get_commits()[0].create_status(state='pending', context='Mobile e2e tests',
description='e2e tests are running')
if config.getoption('env') == 'sauce':
if not is_uploaded():
if 'http' in config.getoption('apk'):
response = requests.get(config.getoption('apk'), stream=True)
response.raise_for_status()
file = BytesIO(response.content)
del response
requests.post('http://saucelabs.com/rest/v1/storage/'
+ sauce_username + '/' + test_suite_data.apk_name + '?overwrite=true',
auth=(sauce_username, sauce_access_key),
data=file,
headers={'Content-Type': 'application/octet-stream'})
else:
sauce.storage.upload_file(config.getoption('apk'))
def pytest_unconfigure(config):
if is_master(config):
if config.getoption('testrail_report'):
testrail_report.add_results()
if config.getoption('pr_number'):
from github import Github
repo = Github(github_token).get_user('status-im').get_repo('status-react')
pull = repo.get_pull(int(config.getoption('pr_number')))
comment = pull.create_issue_comment(github_report.build_html_report(testrail_report.run_id))
if not testrail_report.is_run_successful():
pull.get_commits()[0].create_status(state='failure', context='Mobile e2e tests',
description='Failure - e2e tests are failed',
target_url=comment.html_url)
else:
pull.get_commits()[0].create_status(state='success', context='Mobile e2e tests',
description='Success - e2e tests are passed',
target_url=comment.html_url)
def should_save_device_stats(config):
db_args = [config.getoption(option) for option in
('stats_db_host', 'stats_db_port', 'stats_db_username', 'stats_db_password', 'stats_db_database')]
return all(db_args)
@pytest.mark.hookwrapper
def pytest_runtest_makereport(item, call):
outcome = yield
report = outcome.get_result()
if report.when == 'call':
is_sauce_env = item.config.getoption('env') == 'sauce'
current_test = test_suite_data.current_test
if report.failed:
error = report.longreprtext
exception = re.findall('E.*Message:|E.*Error:|E.*Failed:', error)
if exception:
error = error.replace(re.findall('E.*Message:|E.*Error:|E.*Failed:', report.longreprtext)[0], '')
current_test.testruns[-1].error = error
if is_sauce_env:
update_sauce_jobs(current_test.name, current_test.testruns[-1].jobs, report.passed)
if item.config.getoption('docker'):
device_stats = appium_container.get_device_stats()
if item.config.getoption('bugreport'):
appium_container.generate_bugreport(item.name)
build_name = item.config.getoption('apk')
# Find type of tests that are run on the device
if 'battery_consumption' in item.keywords._markers:
test_group = 'battery_consumption'
else:
test_group = None
if should_save_device_stats(item.config):
device_stats_db = DeviceStatsDB(
item.config.getoption('stats_db_host'),
item.config.getoption('stats_db_port'),
item.config.getoption('stats_db_username'),
item.config.getoption('stats_db_password'),
item.config.getoption('stats_db_database'),
)
device_stats_db.save_stats(build_name, item.name, test_group, not report.failed, device_stats)
def update_sauce_jobs(test_name, job_ids, passed):
for job_id in job_ids.keys():
try:
sauce.jobs.update_job(job_id, name=test_name, passed=passed)
except (RemoteDisconnected, SauceException):
pass
def get_testrail_case_id(item):
testrail_id = item.get_closest_marker('testrail_id')
if testrail_id:
return testrail_id.args[0]
def pytest_runtest_setup(item):
try:
testrail_id = [mark.args[0] for mark in item.iter_markers(name='testrail_id')][0]
except IndexError:
pass
run_testrail_ids = item.config.getoption("run_testrail_ids")
if run_testrail_ids:
if str(testrail_id) not in list(run_testrail_ids.split(",")):
pytest.skip("test requires testrail case id %s" % testrail_id)
test_suite_data.set_current_test(item.name, testrail_case_id=get_testrail_case_id(item))
test_suite_data.current_test.create_new_testrun()
def pytest_runtest_protocol(item, nextitem):
rerun_count = int(item.config.getoption('rerun_count'))
for i in range(rerun_count):
reports = runtestprotocol(item, nextitem=nextitem)
for report in reports:
if report.failed and should_rerun_test(report.longreprtext):
break # rerun
else:
return True # no need to rerun
@pytest.fixture(scope="session", autouse=False)
def faucet_for_senders():
network_api = NetworkApi()
for user in transaction_senders.values():
network_api.faucet(address=user['address'])
@pytest.fixture
def messages_number(request):
return int(request.config.getoption('messages_number'))
@pytest.fixture
def message_wait_time(request):
return int(request.config.getoption('message_wait_time'))
@pytest.fixture
def participants_number(request):
return int(request.config.getoption('participants_number'))
@pytest.fixture
def chat_name(request):
return request.config.getoption('chat_name')
@pytest.fixture
def user_public_key(request):
return request.config.getoption('user_public_key')
| mpl-2.0 | 520,488,454,995,145,340 | 40.2375 | 138 | 0.554032 | false |
Yangqing/caffe2 | caffe2/python/layers/fc.py | 1 | 3042 | # Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
## @package fc
# Module caffe2.python.layers.fc
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import schema
from caffe2.python.layers.layers import ModelLayer
from caffe2.python.layers.sampling_trainable_mixin import SamplingTrainableMixin
import math
import numpy as np
class FC(SamplingTrainableMixin, ModelLayer):
def __init__(self, model, input_record, output_dims, weight_init=None,
bias_init=None, weight_optim=None, bias_optim=None, name='fc',
weight_reg=None, bias_reg=None, **kwargs):
super(FC, self).__init__(model, name, input_record, **kwargs)
assert isinstance(input_record, schema.Scalar), (
"Incorrect input type {}".format(input_record))
assert len(input_record.field_types()[0].shape) > 0, (
"FC expects limited dimensions of the input tensor")
input_dims = input_record.field_types()[0].shape[0]
assert input_dims > 0, (
"FC expects input dimensions > 0, got {}".format(input_dims))
scale = math.sqrt(1.0 / input_dims)
weight_init = weight_init if weight_init else (
'UniformFill', {'min': -scale, 'max': scale})
bias_init = bias_init if bias_init else (
'UniformFill', {'min': -scale, 'max': scale})
self.w = self.create_param(param_name='w',
shape=[output_dims, input_dims],
initializer=weight_init,
optimizer=weight_optim,
regularizer=weight_reg)
self.b = self.create_param(param_name='b',
shape=[output_dims, ],
initializer=bias_init,
optimizer=bias_optim,
regularizer=bias_reg)
self.output_schema = schema.Scalar(
(np.float32, (output_dims, )),
self.get_next_blob_reference('output')
)
def _add_ops(self, net, params):
net.FC(self.input_record.field_blobs() + params,
self.output_schema.field_blobs(), **self.kwargs)
@property
def param_blobs(self):
return [self.w, self.b]
| apache-2.0 | -1,604,963,854,280,311,300 | 40.108108 | 80 | 0.589415 | false |
janpipek/boadata | boadata/gui/qt/views/histogram_view.py | 1 | 1199 | from .view import View
from ..backends.matplotlib import MatplotlibBackend
from boadata import unwrap
import seaborn as sns
# @View.register_view
class HistogramView(View):
def accepts(cls, data_object):
return True
def create_widget(self, parent=None, xcol=None, bins=50, **kwargs):
if xcol is not None:
try:
data = self.data_object.evaluate(xcol)
except:
data = self.data_object[xcol]
else:
data = self.data_object
xcol = "x"
data = unwrap(data.dropna().convert("numpy_array"))
widget, fig = MatplotlibBackend.create_figure_widget(parent=parent)
fig.add_subplot(111)
ax = fig.get_axes()
extra_args = {}
if not kwargs.get("hist"):
extra_args["kde_kws"] = {"shade": True}
sns.distplot(data, hist=kwargs.get("hist", False), kde=kwargs.get("kde", False),
bins=bins, rug=kwargs.get("rug", False), ax=ax[0], **extra_args)
xlabel = kwargs.get("xlabel", xcol)
ax[0].set_xlabel(xlabel)
if "title" in kwargs:
ax[0].set_title(kwargs["title"])
return widget | mit | -1,467,702,614,424,234,500 | 30.578947 | 88 | 0.57548 | false |
mycointest/owncoin | share/seeds/generate-seeds.py | 1 | 4297 | #!/usr/bin/python
# Copyright (c) 2014 Wladmir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from __future__ import print_function, division
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the owncoin network\n')
g.write(' * AUTOGENERATED by share/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 9887)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 19887)
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| mit | 6,910,685,491,095,861,000 | 30.82963 | 98 | 0.575983 | false |
fhqgfss/MoHa | moha/posthf/ci/configuration.py | 1 | 1441 | import numpy as np
import copy
import itertools
from moha.system.basis import SlaterDeterminant,NElectronBasisSet
class Configuration(object):
"""
"""
def __init__(self):
"""
"""
pass
@classmethod
def truncated(cls,hfwavefunction,excitation_level):
"""
excitation_level : CISD -> [1,2]
CID -> [2]
"""
Nelec = hfwavefunction.occ['alpha'] + hfwavefunction.occ['beta']
Dim = hfwavefunction.dim*2
reference = copy.deepcopy(hfwavefunction.configuration)
basis_set = NElectronBasisSet(1,[reference])
#basis_set = NElectronBasisSet()
for el in excitation_level:
for o_list in itertools.combinations(range(Nelec),el):
for u_list in itertools.combinations(range(Nelec,Dim),el):
reference = copy.deepcopy(hfwavefunction.configuration)
for o in o_list:
if o%2==0:
reference['alpha'][o//2] -= 1
elif o%2==1:
reference['beta'][o//2] -= 1
for u in u_list:
if u%2==0:
reference['alpha'][u//2] += 1
elif u%2==1:
reference['beta'][u//2] += 1
basis_set.add(reference)
return basis_set
| mit | -5,836,256,464,177,644,000 | 32.511628 | 75 | 0.485774 | false |
jcfr/girder | tests/test_plugins/test_plugin/server.py | 1 | 1795 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import os
from girder.api import access
from girder.api.describe import Description
from girder.api.rest import Resource
from girder.utility.server import staticFile
class CustomAppRoot(object):
"""
The webroot endpoint simply serves the main index HTML file.
"""
exposed = True
def GET(self):
return "hello world"
class Other(Resource):
def __init__(self):
self.resourceName = 'other'
self.route('GET', (), self.getResource)
@access.public
def getResource(self, params):
return ['custom REST route']
getResource.description = Description('Get something.')
def load(info):
info['serverRoot'], info['serverRoot'].girder = (
CustomAppRoot(), info['serverRoot'])
info['serverRoot'].api = info['serverRoot'].girder.api
del info['serverRoot'].girder.api
info['apiRoot'].other = Other()
path = os.path.join(globals()['PLUGIN_ROOT_DIR'], 'static.txt')
info['serverRoot'].static_route = staticFile(path)
| apache-2.0 | 4,997,767,409,729,860,000 | 30.491228 | 79 | 0.630641 | false |
mkobos/tree_crawler | concurrent_tree_crawler/tree_accessor.py | 1 | 3966 | import logging
import threading
from concurrent_tree_crawler.abstract_tree_accessor import \
AbstractTreeAccessor, NodeAction
from concurrent_tree_crawler.abstract_node import NodeState
class TreeAccessor(AbstractTreeAccessor):
"""
An interface for the tree made of L{AbstractNode}s.
Access to sensitive methods is protected by concurrent programming objects:
locks and conditions.
"""
def __init__(self, sentinel):
"""
@param sentinel: a technical node which will be made parent of the
root node.
@type sentinel: L{AbstractNode}
"""
self.__sentinel = sentinel
"""
The sentinel is a purely technical object. It shouldn't be
analyzed by the navigator. It is here just to make sure that the
root of the tree has a parent. This is because it is required by our
algorithm that all of the nodes in the tree have a parent.
"""
self.__root = None
"""The main business-level element of the tree"""
## The one and only child of the sentinel is the root node
if self.__sentinel.has_child("root"):
self.__root = self.__sentinel.get_child("root")
else:
self.__root = self.__sentinel.add_child("root", NodeState.OPEN)
def get_sentinel(self):
return self.__sentinel
def get_root(self):
return self.__root
def update_and_get_child(self, node, possible_children_names):
while True:
node.get_children_cond().acquire()
try:
child = node.update_and_get_child(possible_children_names)
if child is None: ## No accessible children are available
return None
state = child.get_state()
if state == NodeState.OPEN:
child.set_state(NodeState.PROCESSING)
return (child, NodeAction.TO_PROCESS)
elif state == NodeState.VISITED:
return (child, NodeAction.TO_VISIT)
elif state == NodeState.PROCESSING:
self.__log("Starting to wait on \"{}\" node children".\
format(node.get_name()))
node.get_children_cond().wait()
self.__log("Done waiting on \"{}\" node children".format(
node.get_name()))
else:
assert False, "Unknown node state: {}".format(state)
finally:
node.get_children_cond().release()
def set_node_type(self, node, is_leaf):
assert node != self.__sentinel, "Processing sentinel is not allowed"
parent = node.get_parent()
parent.get_children_cond().acquire()
try:
if is_leaf:
node.set_state(NodeState.CLOSED)
self.__internal_update_node_state(parent)
else:
node.set_state(NodeState.VISITED)
finally:
parent.get_children_cond().notify_all()
parent.get_children_cond().release()
def set_error(self, node):
self.__set_node_state_and_update(node, NodeState.ERROR)
def __set_node_state_and_update(self, node, new_state):
assert node != self.__sentinel, "Changing sentinel state is not allowed"
parent = node.get_parent()
parent.get_children_cond().acquire()
try:
node.set_state(new_state)
self.__internal_update_node_state(parent)
finally:
parent.get_children_cond().notify_all()
parent.get_children_cond().release()
def __internal_update_node_state(self, node):
"""@param node: L{AbstractNode}"""
if node == self.__sentinel:
## The state of the sentinel is undefined and not used
## in the program, it should not be changed
return
new_state = None
if node.all_children_are_in_one_of_states({NodeState.CLOSED}):
new_state = NodeState.CLOSED
elif node.all_children_are_in_one_of_states(
{NodeState.ERROR, NodeState.CLOSED}):
new_state = NodeState.ERROR
## Node state does not have to be changed
if new_state is None:
return
parent = node.get_parent()
parent.get_children_cond().acquire()
try:
node.set_state(new_state)
self.__internal_update_node_state(parent)
finally:
parent.get_children_cond().notify_all()
parent.get_children_cond().release()
def __log(self, message):
"""
@type message: string
"""
logging.debug("thread=\"{}\", {}".format(
threading.current_thread().name, message)) | mit | -5,910,058,468,039,156,000 | 30.736 | 76 | 0.688603 | false |
danakj/chromium | services/shell/public/tools/manifest/manifest_collator.py | 1 | 2806 | #!/usr/bin/env python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" A collator for Service Manifests """
import argparse
import json
import os
import shutil
import sys
import urlparse
eater_relative = '../../../../../../tools/json_comment_eater'
eater_relative = os.path.join(os.path.abspath(__file__), eater_relative)
sys.path.insert(0, os.path.normpath(eater_relative))
try:
import json_comment_eater
finally:
sys.path.pop(0)
def ParseJSONFile(filename):
with open(filename) as json_file:
try:
return json.loads(json_comment_eater.Nom(json_file.read()))
except ValueError:
print "%s is not a valid JSON document" % filename
return None
def MergeDicts(left, right):
for k, v in right.iteritems():
if k not in left:
left[k] = v
else:
if isinstance(v, dict):
assert isinstance(left[k], dict)
MergeDicts(left[k], v)
elif isinstance(v, list):
assert isinstance(left[k], list)
left[k].extend(v)
else:
raise "Refusing to merge conflicting non-collection values."
return left
def MergeBaseManifest(parent, base):
MergeDicts(parent["capabilities"], base["capabilities"])
if "services" in base:
if "services" not in parent:
parent["services"] = []
parent["services"].extend(base["services"])
if "process-group" in base:
parent["process-group"] = base["process-group"]
def main():
parser = argparse.ArgumentParser(
description="Collate Service Manifests.")
parser.add_argument("--parent")
parser.add_argument("--output")
parser.add_argument("--name")
parser.add_argument("--base-manifest", default=None)
args, children = parser.parse_known_args()
parent = ParseJSONFile(args.parent)
if parent == None:
return 1
if args.base_manifest:
base = ParseJSONFile(args.base_manifest)
if base == None:
return 1
MergeBaseManifest(parent, base)
service_path = parent['name'].split(':')[1]
if service_path.startswith('//'):
raise ValueError("Service name path component '%s' must not start " \
"with //" % service_path)
if args.name != service_path:
raise ValueError("Service name '%s' specified in build file does not " \
"match name '%s' specified in manifest." %
(args.name, service_path))
services = []
for child in children:
service = ParseJSONFile(child)
if service == None:
return 1
services.append(service)
if len(services) > 0:
parent['services'] = services
with open(args.output, 'w') as output_file:
json.dump(parent, output_file)
return 0
if __name__ == "__main__":
sys.exit(main())
| bsd-3-clause | 4,538,789,842,080,426,000 | 25.980769 | 76 | 0.650036 | false |
skevy/django | django/db/models/fields/related.py | 1 | 54908 | from django.conf import settings
from django.db import connection, router, transaction
from django.db.backends import util
from django.db.models import signals, get_model
from django.db.models.fields import (AutoField, Field, IntegerField,
PositiveIntegerField, PositiveSmallIntegerField, FieldDoesNotExist)
from django.db.models.related import RelatedObject
from django.db.models.query import QuerySet
from django.db.models.query_utils import QueryWrapper
from django.db.models.deletion import CASCADE
from django.utils.encoding import smart_unicode
from django.utils.translation import (ugettext_lazy as _, string_concat,
ungettext, ugettext)
from django.utils.functional import curry
from django.core import exceptions
from django import forms
RECURSIVE_RELATIONSHIP_CONSTANT = 'self'
pending_lookups = {}
def add_lazy_relation(cls, field, relation, operation):
"""
Adds a lookup on ``cls`` when a related field is defined using a string,
i.e.::
class MyModel(Model):
fk = ForeignKey("AnotherModel")
This string can be:
* RECURSIVE_RELATIONSHIP_CONSTANT (i.e. "self") to indicate a recursive
relation.
* The name of a model (i.e "AnotherModel") to indicate another model in
the same app.
* An app-label and model name (i.e. "someapp.AnotherModel") to indicate
another model in a different app.
If the other model hasn't yet been loaded -- almost a given if you're using
lazy relationships -- then the relation won't be set up until the
class_prepared signal fires at the end of model initialization.
operation is the work that must be performed once the relation can be resolved.
"""
# Check for recursive relations
if relation == RECURSIVE_RELATIONSHIP_CONSTANT:
app_label = cls._meta.app_label
model_name = cls.__name__
else:
# Look for an "app.Model" relation
try:
app_label, model_name = relation.split(".")
except ValueError:
# If we can't split, assume a model in current app
app_label = cls._meta.app_label
model_name = relation
except AttributeError:
# If it doesn't have a split it's actually a model class
app_label = relation._meta.app_label
model_name = relation._meta.object_name
# Try to look up the related model, and if it's already loaded resolve the
# string right away. If get_model returns None, it means that the related
# model isn't loaded yet, so we need to pend the relation until the class
# is prepared.
model = get_model(app_label, model_name,
seed_cache=False, only_installed=False)
if model:
operation(field, model, cls)
else:
key = (app_label, model_name)
value = (cls, field, operation)
pending_lookups.setdefault(key, []).append(value)
def do_pending_lookups(sender, **kwargs):
"""
Handle any pending relations to the sending model. Sent from class_prepared.
"""
key = (sender._meta.app_label, sender.__name__)
for cls, field, operation in pending_lookups.pop(key, []):
operation(field, sender, cls)
signals.class_prepared.connect(do_pending_lookups)
#HACK
class RelatedField(object):
def contribute_to_class(self, cls, name):
sup = super(RelatedField, self)
# Store the opts for related_query_name()
self.opts = cls._meta
if hasattr(sup, 'contribute_to_class'):
sup.contribute_to_class(cls, name)
if not cls._meta.abstract and self.rel.related_name:
self.rel.related_name = self.rel.related_name % {
'class': cls.__name__.lower(),
'app_label': cls._meta.app_label.lower(),
}
other = self.rel.to
if isinstance(other, basestring) or other._meta.pk is None:
def resolve_related_class(field, model, cls):
field.rel.to = model
field.do_related_class(model, cls)
add_lazy_relation(cls, self, other, resolve_related_class)
else:
self.do_related_class(other, cls)
def set_attributes_from_rel(self):
self.name = self.name or (self.rel.to._meta.object_name.lower() + '_' + self.rel.to._meta.pk.name)
if self.verbose_name is None:
self.verbose_name = self.rel.to._meta.verbose_name
self.rel.field_name = self.rel.field_name or self.rel.to._meta.pk.name
def do_related_class(self, other, cls):
self.set_attributes_from_rel()
self.related = RelatedObject(other, cls, self)
if not cls._meta.abstract:
self.contribute_to_related_class(other, self.related)
def get_prep_lookup(self, lookup_type, value):
if hasattr(value, 'prepare'):
return value.prepare()
if hasattr(value, '_prepare'):
return value._prepare()
# FIXME: lt and gt are explicitly allowed to make
# get_(next/prev)_by_date work; other lookups are not allowed since that
# gets messy pretty quick. This is a good candidate for some refactoring
# in the future.
if lookup_type in ['exact', 'gt', 'lt', 'gte', 'lte']:
return self._pk_trace(value, 'get_prep_lookup', lookup_type)
if lookup_type in ('range', 'in'):
return [self._pk_trace(v, 'get_prep_lookup', lookup_type) for v in value]
elif lookup_type == 'isnull':
return []
raise TypeError("Related Field has invalid lookup: %s" % lookup_type)
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
if not prepared:
value = self.get_prep_lookup(lookup_type, value)
if hasattr(value, 'get_compiler'):
value = value.get_compiler(connection=connection)
if hasattr(value, 'as_sql') or hasattr(value, '_as_sql'):
# If the value has a relabel_aliases method, it will need to
# be invoked before the final SQL is evaluated
if hasattr(value, 'relabel_aliases'):
return value
if hasattr(value, 'as_sql'):
sql, params = value.as_sql()
else:
sql, params = value._as_sql(connection=connection)
return QueryWrapper(('(%s)' % sql), params)
# FIXME: lt and gt are explicitly allowed to make
# get_(next/prev)_by_date work; other lookups are not allowed since that
# gets messy pretty quick. This is a good candidate for some refactoring
# in the future.
if lookup_type in ['exact', 'gt', 'lt', 'gte', 'lte']:
return [self._pk_trace(value, 'get_db_prep_lookup', lookup_type,
connection=connection, prepared=prepared)]
if lookup_type in ('range', 'in'):
return [self._pk_trace(v, 'get_db_prep_lookup', lookup_type,
connection=connection, prepared=prepared)
for v in value]
elif lookup_type == 'isnull':
return []
raise TypeError("Related Field has invalid lookup: %s" % lookup_type)
def _pk_trace(self, value, prep_func, lookup_type, **kwargs):
# Value may be a primary key, or an object held in a relation.
# If it is an object, then we need to get the primary key value for
# that object. In certain conditions (especially one-to-one relations),
# the primary key may itself be an object - so we need to keep drilling
# down until we hit a value that can be used for a comparison.
v = value
# In the case of an FK to 'self', this check allows to_field to be used
# for both forwards and reverse lookups across the FK. (For normal FKs,
# it's only relevant for forward lookups).
if isinstance(v, self.rel.to):
field_name = getattr(self.rel, "field_name", None)
else:
field_name = None
try:
while True:
if field_name is None:
field_name = v._meta.pk.name
v = getattr(v, field_name)
field_name = None
except AttributeError:
pass
except exceptions.ObjectDoesNotExist:
v = None
field = self
while field.rel:
if hasattr(field.rel, 'field_name'):
field = field.rel.to._meta.get_field(field.rel.field_name)
else:
field = field.rel.to._meta.pk
if lookup_type in ('range', 'in'):
v = [v]
v = getattr(field, prep_func)(lookup_type, v, **kwargs)
if isinstance(v, list):
v = v[0]
return v
def related_query_name(self):
# This method defines the name that can be used to identify this
# related object in a table-spanning query. It uses the lower-cased
# object_name by default, but this can be overridden with the
# "related_name" option.
return self.rel.related_name or self.opts.object_name.lower()
class SingleRelatedObjectDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# a single "remote" value, on the class pointed to by a related field.
# In the example "place.restaurant", the restaurant attribute is a
# SingleRelatedObjectDescriptor instance.
def __init__(self, related):
self.related = related
self.cache_name = related.get_cache_name()
def __get__(self, instance, instance_type=None):
if instance is None:
return self
try:
return getattr(instance, self.cache_name)
except AttributeError:
params = {'%s__pk' % self.related.field.name: instance._get_pk_val()}
db = router.db_for_read(self.related.model, instance=instance)
rel_obj = self.related.model._base_manager.using(db).get(**params)
setattr(instance, self.cache_name, rel_obj)
return rel_obj
def __set__(self, instance, value):
if instance is None:
raise AttributeError("%s must be accessed via instance" % self.related.opts.object_name)
# The similarity of the code below to the code in
# ReverseSingleRelatedObjectDescriptor is annoying, but there's a bunch
# of small differences that would make a common base class convoluted.
# If null=True, we can assign null here, but otherwise the value needs
# to be an instance of the related class.
if value is None and self.related.field.null == False:
raise ValueError('Cannot assign None: "%s.%s" does not allow null values.' %
(instance._meta.object_name, self.related.get_accessor_name()))
elif value is not None and not isinstance(value, self.related.model):
raise ValueError('Cannot assign "%r": "%s.%s" must be a "%s" instance.' %
(value, instance._meta.object_name,
self.related.get_accessor_name(), self.related.opts.object_name))
elif value is not None:
if instance._state.db is None:
instance._state.db = router.db_for_write(instance.__class__, instance=value)
elif value._state.db is None:
value._state.db = router.db_for_write(value.__class__, instance=instance)
elif value._state.db is not None and instance._state.db is not None:
if not router.allow_relation(value, instance):
raise ValueError('Cannot assign "%r": instance is on database "%s", value is on database "%s"' %
(value, instance._state.db, value._state.db))
# Set the value of the related field to the value of the related object's related field
setattr(value, self.related.field.attname, getattr(instance, self.related.field.rel.get_related_field().attname))
# Since we already know what the related object is, seed the related
# object caches now, too. This avoids another db hit if you get the
# object you just set.
setattr(instance, self.cache_name, value)
setattr(value, self.related.field.get_cache_name(), instance)
class ReverseSingleRelatedObjectDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# a single "remote" value, on the class that defines the related field.
# In the example "choice.poll", the poll attribute is a
# ReverseSingleRelatedObjectDescriptor instance.
def __init__(self, field_with_rel):
self.field = field_with_rel
def __get__(self, instance, instance_type=None):
if instance is None:
return self
cache_name = self.field.get_cache_name()
try:
return getattr(instance, cache_name)
except AttributeError:
val = getattr(instance, self.field.attname)
if val is None:
# If NULL is an allowed value, return it.
if self.field.null:
return None
raise self.field.rel.to.DoesNotExist
other_field = self.field.rel.get_related_field()
if other_field.rel:
params = {'%s__pk' % self.field.rel.field_name: val}
else:
params = {'%s__exact' % self.field.rel.field_name: val}
# If the related manager indicates that it should be used for
# related fields, respect that.
rel_mgr = self.field.rel.to._default_manager
db = router.db_for_read(self.field.rel.to, instance=instance)
if getattr(rel_mgr, 'use_for_related_fields', False):
rel_obj = rel_mgr.using(db).get(**params)
else:
rel_obj = QuerySet(self.field.rel.to).using(db).get(**params)
setattr(instance, cache_name, rel_obj)
return rel_obj
def __set__(self, instance, value):
if instance is None:
raise AttributeError("%s must be accessed via instance" % self._field.name)
# If null=True, we can assign null here, but otherwise the value needs
# to be an instance of the related class.
if value is None and self.field.null == False:
raise ValueError('Cannot assign None: "%s.%s" does not allow null values.' %
(instance._meta.object_name, self.field.name))
elif value is not None and not isinstance(value, self.field.rel.to):
raise ValueError('Cannot assign "%r": "%s.%s" must be a "%s" instance.' %
(value, instance._meta.object_name,
self.field.name, self.field.rel.to._meta.object_name))
elif value is not None:
if instance._state.db is None:
instance._state.db = router.db_for_write(instance.__class__, instance=value)
elif value._state.db is None:
value._state.db = router.db_for_write(value.__class__, instance=instance)
elif value._state.db is not None and instance._state.db is not None:
if not router.allow_relation(value, instance):
raise ValueError('Cannot assign "%r": instance is on database "%s", value is on database "%s"' %
(value, instance._state.db, value._state.db))
# If we're setting the value of a OneToOneField to None, we need to clear
# out the cache on any old related object. Otherwise, deleting the
# previously-related object will also cause this object to be deleted,
# which is wrong.
if value is None:
# Look up the previously-related object, which may still be available
# since we've not yet cleared out the related field.
# Use the cache directly, instead of the accessor; if we haven't
# populated the cache, then we don't care - we're only accessing
# the object to invalidate the accessor cache, so there's no
# need to populate the cache just to expire it again.
related = getattr(instance, self.field.get_cache_name(), None)
# If we've got an old related object, we need to clear out its
# cache. This cache also might not exist if the related object
# hasn't been accessed yet.
if related:
cache_name = self.field.related.get_cache_name()
try:
delattr(related, cache_name)
except AttributeError:
pass
# Set the value of the related field
try:
val = getattr(value, self.field.rel.get_related_field().attname)
except AttributeError:
val = None
setattr(instance, self.field.attname, val)
# Since we already know what the related object is, seed the related
# object cache now, too. This avoids another db hit if you get the
# object you just set.
setattr(instance, self.field.get_cache_name(), value)
class ForeignRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ForeignKey pointed at them by
# some other model. In the example "poll.choice_set", the choice_set
# attribute is a ForeignRelatedObjectsDescriptor instance.
def __init__(self, related):
self.related = related # RelatedObject instance
def __get__(self, instance, instance_type=None):
if instance is None:
return self
return self.create_manager(instance,
self.related.model._default_manager.__class__)
def __set__(self, instance, value):
if instance is None:
raise AttributeError("Manager must be accessed via instance")
manager = self.__get__(instance)
# If the foreign key can support nulls, then completely clear the related set.
# Otherwise, just move the named objects into the set.
if self.related.field.null:
manager.clear()
manager.add(*value)
def delete_manager(self, instance):
"""
Returns a queryset based on the related model's base manager (rather
than the default manager, as returned by __get__). Used by
Model.delete().
"""
return self.create_manager(instance,
self.related.model._base_manager.__class__)
def create_manager(self, instance, superclass):
"""
Creates the managers used by other methods (__get__() and delete()).
"""
rel_field = self.related.field
rel_model = self.related.model
class RelatedManager(superclass):
def get_query_set(self):
db = self._db or router.db_for_read(rel_model, instance=instance)
return superclass.get_query_set(self).using(db).filter(**(self.core_filters))
def add(self, *objs):
for obj in objs:
if not isinstance(obj, self.model):
raise TypeError("'%s' instance expected" % self.model._meta.object_name)
setattr(obj, rel_field.name, instance)
obj.save()
add.alters_data = True
def create(self, **kwargs):
kwargs.update({rel_field.name: instance})
db = router.db_for_write(rel_model, instance=instance)
return super(RelatedManager, self.db_manager(db)).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
# Update kwargs with the related object that this
# ForeignRelatedObjectsDescriptor knows about.
kwargs.update({rel_field.name: instance})
db = router.db_for_write(rel_model, instance=instance)
return super(RelatedManager, self.db_manager(db)).get_or_create(**kwargs)
get_or_create.alters_data = True
# remove() and clear() are only provided if the ForeignKey can have a value of null.
if rel_field.null:
def remove(self, *objs):
val = getattr(instance, rel_field.rel.get_related_field().attname)
for obj in objs:
# Is obj actually part of this descriptor set?
if getattr(obj, rel_field.attname) == val:
setattr(obj, rel_field.name, None)
obj.save()
else:
raise rel_field.rel.to.DoesNotExist("%r is not related to %r." % (obj, instance))
remove.alters_data = True
def clear(self):
for obj in self.all():
setattr(obj, rel_field.name, None)
obj.save()
clear.alters_data = True
manager = RelatedManager()
attname = rel_field.rel.get_related_field().name
manager.core_filters = {'%s__%s' % (rel_field.name, attname):
getattr(instance, attname)}
manager.model = self.related.model
return manager
def create_many_related_manager(superclass, rel=False):
"""Creates a manager that subclasses 'superclass' (which is a Manager)
and adds behavior for many-to-many related objects."""
through = rel.through
class ManyRelatedManager(superclass):
def __init__(self, model=None, core_filters=None, instance=None, symmetrical=None,
join_table=None, source_field_name=None, target_field_name=None,
reverse=False):
super(ManyRelatedManager, self).__init__()
self.core_filters = core_filters
self.model = model
self.symmetrical = symmetrical
self.instance = instance
self.source_field_name = source_field_name
self.target_field_name = target_field_name
self.through = through
self._pk_val = self.instance.pk
self.reverse = reverse
if self._pk_val is None:
raise ValueError("%r instance needs to have a primary key value before a many-to-many relationship can be used." % instance.__class__.__name__)
def get_query_set(self):
db = self._db or router.db_for_read(self.instance.__class__, instance=self.instance)
return superclass.get_query_set(self).using(db)._next_is_sticky().filter(**(self.core_filters))
# If the ManyToMany relation has an intermediary model,
# the add and remove methods do not exist.
if rel.through._meta.auto_created:
def add(self, *objs):
self._add_items(self.source_field_name, self.target_field_name, *objs)
# If this is a symmetrical m2m relation to self, add the mirror entry in the m2m table
if self.symmetrical:
self._add_items(self.target_field_name, self.source_field_name, *objs)
add.alters_data = True
def remove(self, *objs):
self._remove_items(self.source_field_name, self.target_field_name, *objs)
# If this is a symmetrical m2m relation to self, remove the mirror entry in the m2m table
if self.symmetrical:
self._remove_items(self.target_field_name, self.source_field_name, *objs)
remove.alters_data = True
def clear(self):
self._clear_items(self.source_field_name)
# If this is a symmetrical m2m relation to self, clear the mirror entry in the m2m table
if self.symmetrical:
self._clear_items(self.target_field_name)
clear.alters_data = True
def create(self, **kwargs):
# This check needs to be done here, since we can't later remove this
# from the method lookup table, as we do with add and remove.
if not rel.through._meta.auto_created:
opts = through._meta
raise AttributeError("Cannot use create() on a ManyToManyField which specifies an intermediary model. Use %s.%s's Manager instead." % (opts.app_label, opts.object_name))
db = router.db_for_write(self.instance.__class__, instance=self.instance)
new_obj = super(ManyRelatedManager, self.db_manager(db)).create(**kwargs)
self.add(new_obj)
return new_obj
create.alters_data = True
def get_or_create(self, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
obj, created = \
super(ManyRelatedManager, self.db_manager(db)).get_or_create(**kwargs)
# We only need to add() if created because if we got an object back
# from get() then the relationship already exists.
if created:
self.add(obj)
return obj, created
get_or_create.alters_data = True
def _add_items(self, source_field_name, target_field_name, *objs):
# join_table: name of the m2m link table
# source_field_name: the PK fieldname in join_table for the source object
# target_field_name: the PK fieldname in join_table for the target object
# *objs - objects to add. Either object instances, or primary keys of object instances.
# If there aren't any objects, there is nothing to do.
from django.db.models import Model
if objs:
new_ids = set()
for obj in objs:
if isinstance(obj, self.model):
if not router.allow_relation(obj, self.instance):
raise ValueError('Cannot add "%r": instance is on database "%s", value is on database "%s"' %
(obj, self.instance._state.db, obj._state.db))
new_ids.add(obj.pk)
elif isinstance(obj, Model):
raise TypeError("'%s' instance expected" % self.model._meta.object_name)
else:
new_ids.add(obj)
db = router.db_for_write(self.through, instance=self.instance)
vals = self.through._default_manager.using(db).values_list(target_field_name, flat=True)
vals = vals.filter(**{
source_field_name: self._pk_val,
'%s__in' % target_field_name: new_ids,
})
new_ids = new_ids - set(vals)
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are inserting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=rel.through, action='pre_add',
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=new_ids, using=db)
# Add the ones that aren't there already
for obj_id in new_ids:
self.through._default_manager.using(db).create(**{
'%s_id' % source_field_name: self._pk_val,
'%s_id' % target_field_name: obj_id,
})
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are inserting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=rel.through, action='post_add',
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=new_ids, using=db)
def _remove_items(self, source_field_name, target_field_name, *objs):
# source_col_name: the PK colname in join_table for the source object
# target_col_name: the PK colname in join_table for the target object
# *objs - objects to remove
# If there aren't any objects, there is nothing to do.
if objs:
# Check that all the objects are of the right type
old_ids = set()
for obj in objs:
if isinstance(obj, self.model):
old_ids.add(obj.pk)
else:
old_ids.add(obj)
# Work out what DB we're operating on
db = router.db_for_write(self.through, instance=self.instance)
# Send a signal to the other end if need be.
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are deleting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=rel.through, action="pre_remove",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=old_ids, using=db)
# Remove the specified objects from the join table
self.through._default_manager.using(db).filter(**{
source_field_name: self._pk_val,
'%s__in' % target_field_name: old_ids
}).delete()
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are deleting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=rel.through, action="post_remove",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=old_ids, using=db)
def _clear_items(self, source_field_name):
db = router.db_for_write(self.through, instance=self.instance)
# source_col_name: the PK colname in join_table for the source object
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are clearing the
# duplicate data rows for symmetrical reverse entries.
signals.m2m_changed.send(sender=rel.through, action="pre_clear",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=None, using=db)
self.through._default_manager.using(db).filter(**{
source_field_name: self._pk_val
}).delete()
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are clearing the
# duplicate data rows for symmetrical reverse entries.
signals.m2m_changed.send(sender=rel.through, action="post_clear",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=None, using=db)
return ManyRelatedManager
class ManyRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ManyToManyField pointed at them by
# some other model (rather than having a ManyToManyField themselves).
# In the example "publication.article_set", the article_set attribute is a
# ManyRelatedObjectsDescriptor instance.
def __init__(self, related):
self.related = related # RelatedObject instance
def __get__(self, instance, instance_type=None):
if instance is None:
return self
# Dynamically create a class that subclasses the related
# model's default manager.
rel_model = self.related.model
superclass = rel_model._default_manager.__class__
RelatedManager = create_many_related_manager(superclass, self.related.field.rel)
manager = RelatedManager(
model=rel_model,
core_filters={'%s__pk' % self.related.field.name: instance._get_pk_val()},
instance=instance,
symmetrical=False,
source_field_name=self.related.field.m2m_reverse_field_name(),
target_field_name=self.related.field.m2m_field_name(),
reverse=True
)
return manager
def __set__(self, instance, value):
if instance is None:
raise AttributeError("Manager must be accessed via instance")
if not self.related.field.rel.through._meta.auto_created:
opts = self.related.field.rel.through._meta
raise AttributeError("Cannot set values on a ManyToManyField which specifies an intermediary model. Use %s.%s's Manager instead." % (opts.app_label, opts.object_name))
manager = self.__get__(instance)
manager.clear()
manager.add(*value)
class ReverseManyRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ManyToManyField defined in their
# model (rather than having another model pointed *at* them).
# In the example "article.publications", the publications attribute is a
# ReverseManyRelatedObjectsDescriptor instance.
def __init__(self, m2m_field):
self.field = m2m_field
def _through(self):
# through is provided so that you have easy access to the through
# model (Book.authors.through) for inlines, etc. This is done as
# a property to ensure that the fully resolved value is returned.
return self.field.rel.through
through = property(_through)
def __get__(self, instance, instance_type=None):
if instance is None:
return self
# Dynamically create a class that subclasses the related
# model's default manager.
rel_model=self.field.rel.to
superclass = rel_model._default_manager.__class__
RelatedManager = create_many_related_manager(superclass, self.field.rel)
manager = RelatedManager(
model=rel_model,
core_filters={'%s__pk' % self.field.related_query_name(): instance._get_pk_val()},
instance=instance,
symmetrical=self.field.rel.symmetrical,
source_field_name=self.field.m2m_field_name(),
target_field_name=self.field.m2m_reverse_field_name(),
reverse=False
)
return manager
def __set__(self, instance, value):
if instance is None:
raise AttributeError("Manager must be accessed via instance")
if not self.field.rel.through._meta.auto_created:
opts = self.field.rel.through._meta
raise AttributeError("Cannot set values on a ManyToManyField which specifies an intermediary model. Use %s.%s's Manager instead." % (opts.app_label, opts.object_name))
manager = self.__get__(instance)
manager.clear()
manager.add(*value)
class ManyToOneRel(object):
def __init__(self, to, field_name, related_name=None, limit_choices_to=None,
parent_link=False, on_delete=None):
try:
to._meta
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, basestring), "'to' must be either a model, a model name or the string %r" % RECURSIVE_RELATIONSHIP_CONSTANT
self.to, self.field_name = to, field_name
self.related_name = related_name
if limit_choices_to is None:
limit_choices_to = {}
self.limit_choices_to = limit_choices_to
self.multiple = True
self.parent_link = parent_link
self.on_delete = on_delete
def is_hidden(self):
"Should the related object be hidden?"
return self.related_name and self.related_name[-1] == '+'
def get_related_field(self):
"""
Returns the Field in the 'to' object to which this relationship is
tied.
"""
data = self.to._meta.get_field_by_name(self.field_name)
if not data[2]:
raise FieldDoesNotExist("No related field named '%s'" %
self.field_name)
return data[0]
class OneToOneRel(ManyToOneRel):
def __init__(self, to, field_name, related_name=None, limit_choices_to=None,
parent_link=False, on_delete=None):
super(OneToOneRel, self).__init__(to, field_name,
related_name=related_name, limit_choices_to=limit_choices_to,
parent_link=parent_link, on_delete=on_delete
)
self.multiple = False
class ManyToManyRel(object):
def __init__(self, to, related_name=None, limit_choices_to=None,
symmetrical=True, through=None):
self.to = to
self.related_name = related_name
if limit_choices_to is None:
limit_choices_to = {}
self.limit_choices_to = limit_choices_to
self.symmetrical = symmetrical
self.multiple = True
self.through = through
def is_hidden(self):
"Should the related object be hidden?"
return self.related_name and self.related_name[-1] == '+'
def get_related_field(self):
"""
Returns the field in the to' object to which this relationship is tied
(this is always the primary key on the target model). Provided for
symmetry with ManyToOneRel.
"""
return self.to._meta.pk
class ForeignKey(RelatedField, Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _('Model %(model)s with pk %(pk)r does not exist.')
}
description = _("Foreign Key (type determined by related field)")
def __init__(self, to, to_field=None, rel_class=ManyToOneRel, **kwargs):
try:
to_name = to._meta.object_name.lower()
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, basestring), "%s(%r) is invalid. First parameter to ForeignKey must be either a model, a model name, or the string %r" % (self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT)
else:
assert not to._meta.abstract, "%s cannot define a relation with abstract class %s" % (self.__class__.__name__, to._meta.object_name)
# For backwards compatibility purposes, we need to *try* and set
# the to_field during FK construction. It won't be guaranteed to
# be correct until contribute_to_class is called. Refs #12190.
to_field = to_field or (to._meta.pk and to._meta.pk.name)
kwargs['verbose_name'] = kwargs.get('verbose_name', None)
if 'db_index' not in kwargs:
kwargs['db_index'] = True
kwargs['rel'] = rel_class(to, to_field,
related_name=kwargs.pop('related_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
parent_link=kwargs.pop('parent_link', False),
on_delete=kwargs.pop('on_delete', CASCADE),
)
Field.__init__(self, **kwargs)
def validate(self, value, model_instance):
if self.rel.parent_link:
return
super(ForeignKey, self).validate(value, model_instance)
if value is None:
return
using = router.db_for_read(model_instance.__class__, instance=model_instance)
qs = self.rel.to._default_manager.using(using).filter(
**{self.rel.field_name: value}
)
qs = qs.complex_filter(self.rel.limit_choices_to)
if not qs.exists():
raise exceptions.ValidationError(self.error_messages['invalid'] % {
'model': self.rel.to._meta.verbose_name, 'pk': value})
def get_attname(self):
return '%s_id' % self.name
def get_validator_unique_lookup_type(self):
return '%s__%s__exact' % (self.name, self.rel.get_related_field().name)
def get_default(self):
"Here we check if the default value is an object and return the to_field if so."
field_default = super(ForeignKey, self).get_default()
if isinstance(field_default, self.rel.to):
return getattr(field_default, self.rel.get_related_field().attname)
return field_default
def get_db_prep_save(self, value, connection):
if value == '' or value == None:
return None
else:
return self.rel.get_related_field().get_db_prep_save(value,
connection=connection)
def value_to_string(self, obj):
if not obj:
# In required many-to-one fields with only one available choice,
# select that one available choice. Note: For SelectFields
# we have to check that the length of choices is *2*, not 1,
# because SelectFields always have an initial "blank" value.
if not self.blank and self.choices:
choice_list = self.get_choices_default()
if len(choice_list) == 2:
return smart_unicode(choice_list[1][0])
return Field.value_to_string(self, obj)
def contribute_to_class(self, cls, name):
super(ForeignKey, self).contribute_to_class(cls, name)
setattr(cls, self.name, ReverseSingleRelatedObjectDescriptor(self))
if isinstance(self.rel.to, basestring):
target = self.rel.to
else:
target = self.rel.to._meta.db_table
cls._meta.duplicate_targets[self.column] = (target, "o2m")
def contribute_to_related_class(self, cls, related):
# Internal FK's - i.e., those with a related name ending with '+' -
# don't get a related descriptor.
if not self.rel.is_hidden():
setattr(cls, related.get_accessor_name(), ForeignRelatedObjectsDescriptor(related))
if self.rel.limit_choices_to:
cls._meta.related_fkey_lookups.append(self.rel.limit_choices_to)
if self.rel.field_name is None:
self.rel.field_name = cls._meta.pk.name
def formfield(self, **kwargs):
db = kwargs.pop('using', None)
defaults = {
'form_class': forms.ModelChoiceField,
'queryset': self.rel.to._default_manager.using(db).complex_filter(self.rel.limit_choices_to),
'to_field_name': self.rel.field_name,
}
defaults.update(kwargs)
return super(ForeignKey, self).formfield(**defaults)
def db_type(self, connection):
# The database column type of a ForeignKey is the column type
# of the field to which it points. An exception is if the ForeignKey
# points to an AutoField/PositiveIntegerField/PositiveSmallIntegerField,
# in which case the column type is simply that of an IntegerField.
# If the database needs similar types for key fields however, the only
# thing we can do is making AutoField an IntegerField.
rel_field = self.rel.get_related_field()
if (isinstance(rel_field, AutoField) or
(not connection.features.related_fields_match_type and
isinstance(rel_field, (PositiveIntegerField,
PositiveSmallIntegerField)))):
return IntegerField().db_type(connection=connection)
return rel_field.db_type(connection=connection)
class OneToOneField(ForeignKey):
"""
A OneToOneField is essentially the same as a ForeignKey, with the exception
that always carries a "unique" constraint with it and the reverse relation
always returns the object pointed to (since there will only ever be one),
rather than returning a list.
"""
description = _("One-to-one relationship")
def __init__(self, to, to_field=None, **kwargs):
kwargs['unique'] = True
super(OneToOneField, self).__init__(to, to_field, OneToOneRel, **kwargs)
def contribute_to_related_class(self, cls, related):
setattr(cls, related.get_accessor_name(),
SingleRelatedObjectDescriptor(related))
def formfield(self, **kwargs):
if self.rel.parent_link:
return None
return super(OneToOneField, self).formfield(**kwargs)
def save_form_data(self, instance, data):
if isinstance(data, self.rel.to):
setattr(instance, self.name, data)
else:
setattr(instance, self.attname, data)
def create_many_to_many_intermediary_model(field, klass):
from django.db import models
managed = True
if isinstance(field.rel.to, basestring) and field.rel.to != RECURSIVE_RELATIONSHIP_CONSTANT:
to_model = field.rel.to
to = to_model.split('.')[-1]
def set_managed(field, model, cls):
field.rel.through._meta.managed = model._meta.managed or cls._meta.managed
add_lazy_relation(klass, field, to_model, set_managed)
elif isinstance(field.rel.to, basestring):
to = klass._meta.object_name
to_model = klass
managed = klass._meta.managed
else:
to = field.rel.to._meta.object_name
to_model = field.rel.to
managed = klass._meta.managed or to_model._meta.managed
name = '%s_%s' % (klass._meta.object_name, field.name)
if field.rel.to == RECURSIVE_RELATIONSHIP_CONSTANT or to == klass._meta.object_name:
from_ = 'from_%s' % to.lower()
to = 'to_%s' % to.lower()
else:
from_ = klass._meta.object_name.lower()
to = to.lower()
meta = type('Meta', (object,), {
'db_table': field._get_m2m_db_table(klass._meta),
'managed': managed,
'auto_created': klass,
'app_label': klass._meta.app_label,
'unique_together': (from_, to),
'verbose_name': '%(from)s-%(to)s relationship' % {'from': from_, 'to': to},
'verbose_name_plural': '%(from)s-%(to)s relationships' % {'from': from_, 'to': to},
})
# Construct and return the new class.
return type(name, (models.Model,), {
'Meta': meta,
'__module__': klass.__module__,
from_: models.ForeignKey(klass, related_name='%s+' % name),
to: models.ForeignKey(to_model, related_name='%s+' % name)
})
class ManyToManyField(RelatedField, Field):
description = _("Many-to-many relationship")
def __init__(self, to, **kwargs):
try:
assert not to._meta.abstract, "%s cannot define a relation with abstract class %s" % (self.__class__.__name__, to._meta.object_name)
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, basestring), "%s(%r) is invalid. First parameter to ManyToManyField must be either a model, a model name, or the string %r" % (self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT)
kwargs['verbose_name'] = kwargs.get('verbose_name', None)
kwargs['rel'] = ManyToManyRel(to,
related_name=kwargs.pop('related_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
symmetrical=kwargs.pop('symmetrical', to==RECURSIVE_RELATIONSHIP_CONSTANT),
through=kwargs.pop('through', None))
self.db_table = kwargs.pop('db_table', None)
if kwargs['rel'].through is not None:
assert self.db_table is None, "Cannot specify a db_table if an intermediary model is used."
Field.__init__(self, **kwargs)
msg = _('Hold down "Control", or "Command" on a Mac, to select more than one.')
self.help_text = string_concat(self.help_text, ' ', msg)
def get_choices_default(self):
return Field.get_choices(self, include_blank=False)
def _get_m2m_db_table(self, opts):
"Function that can be curried to provide the m2m table name for this relation"
if self.rel.through is not None:
return self.rel.through._meta.db_table
elif self.db_table:
return self.db_table
else:
return util.truncate_name('%s_%s' % (opts.db_table, self.name),
connection.ops.max_name_length())
def _get_m2m_attr(self, related, attr):
"Function that can be curried to provide the source accessor or DB column name for the m2m table"
cache_attr = '_m2m_%s_cache' % attr
if hasattr(self, cache_attr):
return getattr(self, cache_attr)
for f in self.rel.through._meta.fields:
if hasattr(f,'rel') and f.rel and f.rel.to == related.model:
setattr(self, cache_attr, getattr(f, attr))
return getattr(self, cache_attr)
def _get_m2m_reverse_attr(self, related, attr):
"Function that can be curried to provide the related accessor or DB column name for the m2m table"
cache_attr = '_m2m_reverse_%s_cache' % attr
if hasattr(self, cache_attr):
return getattr(self, cache_attr)
found = False
for f in self.rel.through._meta.fields:
if hasattr(f,'rel') and f.rel and f.rel.to == related.parent_model:
if related.model == related.parent_model:
# If this is an m2m-intermediate to self,
# the first foreign key you find will be
# the source column. Keep searching for
# the second foreign key.
if found:
setattr(self, cache_attr, getattr(f, attr))
break
else:
found = True
else:
setattr(self, cache_attr, getattr(f, attr))
break
return getattr(self, cache_attr)
def value_to_string(self, obj):
data = ''
if obj:
qs = getattr(obj, self.name).all()
data = [instance._get_pk_val() for instance in qs]
else:
# In required many-to-many fields with only one available choice,
# select that one available choice.
if not self.blank:
choices_list = self.get_choices_default()
if len(choices_list) == 1:
data = [choices_list[0][0]]
return smart_unicode(data)
def contribute_to_class(self, cls, name):
# To support multiple relations to self, it's useful to have a non-None
# related name on symmetrical relations for internal reasons. The
# concept doesn't make a lot of sense externally ("you want me to
# specify *what* on my non-reversible relation?!"), so we set it up
# automatically. The funky name reduces the chance of an accidental
# clash.
if self.rel.symmetrical and (self.rel.to == "self" or self.rel.to == cls._meta.object_name):
self.rel.related_name = "%s_rel_+" % name
super(ManyToManyField, self).contribute_to_class(cls, name)
# The intermediate m2m model is not auto created if:
# 1) There is a manually specified intermediate, or
# 2) The class owning the m2m field is abstract.
if not self.rel.through and not cls._meta.abstract:
self.rel.through = create_many_to_many_intermediary_model(self, cls)
# Add the descriptor for the m2m relation
setattr(cls, self.name, ReverseManyRelatedObjectsDescriptor(self))
# Set up the accessor for the m2m table name for the relation
self.m2m_db_table = curry(self._get_m2m_db_table, cls._meta)
# Populate some necessary rel arguments so that cross-app relations
# work correctly.
if isinstance(self.rel.through, basestring):
def resolve_through_model(field, model, cls):
field.rel.through = model
add_lazy_relation(cls, self, self.rel.through, resolve_through_model)
if isinstance(self.rel.to, basestring):
target = self.rel.to
else:
target = self.rel.to._meta.db_table
cls._meta.duplicate_targets[self.column] = (target, "m2m")
def contribute_to_related_class(self, cls, related):
# Internal M2Ms (i.e., those with a related name ending with '+')
# don't get a related descriptor.
if not self.rel.is_hidden():
setattr(cls, related.get_accessor_name(), ManyRelatedObjectsDescriptor(related))
# Set up the accessors for the column names on the m2m table
self.m2m_column_name = curry(self._get_m2m_attr, related, 'column')
self.m2m_reverse_name = curry(self._get_m2m_reverse_attr, related, 'column')
self.m2m_field_name = curry(self._get_m2m_attr, related, 'name')
self.m2m_reverse_field_name = curry(self._get_m2m_reverse_attr, related, 'name')
get_m2m_rel = curry(self._get_m2m_attr, related, 'rel')
self.m2m_target_field_name = lambda: get_m2m_rel().field_name
get_m2m_reverse_rel = curry(self._get_m2m_reverse_attr, related, 'rel')
self.m2m_reverse_target_field_name = lambda: get_m2m_reverse_rel().field_name
def set_attributes_from_rel(self):
pass
def value_from_object(self, obj):
"Returns the value of this field in the given model instance."
return getattr(obj, self.attname).all()
def save_form_data(self, instance, data):
setattr(instance, self.attname, data)
def formfield(self, **kwargs):
db = kwargs.pop('using', None)
defaults = {
'form_class': forms.ModelMultipleChoiceField,
'queryset': self.rel.to._default_manager.using(db).complex_filter(self.rel.limit_choices_to)
}
defaults.update(kwargs)
# If initial is passed in, it's a list of related objects, but the
# MultipleChoiceField takes a list of IDs.
if defaults.get('initial') is not None:
initial = defaults['initial']
if callable(initial):
initial = initial()
defaults['initial'] = [i._get_pk_val() for i in initial]
return super(ManyToManyField, self).formfield(**defaults)
def db_type(self, connection):
# A ManyToManyField is not represented by a single column,
# so return None.
return None
| bsd-3-clause | -3,232,830,973,483,323,000 | 45.889838 | 222 | 0.602153 | false |
noironetworks/heat | heat/engine/resources/openstack/designate/zone.py | 1 | 5889 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from heat.common import exception
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine import support
class DesignateZone(resource.Resource):
"""Heat Template Resource for Designate Zone.
Designate provides DNS-as-a-Service services for OpenStack. So, zone, part
of domain is a realm with an identification string, unique in DNS.
"""
support_status = support.SupportStatus(
version='8.0.0')
PROPERTIES = (
NAME, TTL, DESCRIPTION, EMAIL, TYPE, MASTERS
) = (
'name', 'ttl', 'description', 'email', 'type', 'masters'
)
ATTRIBUTES = (
SERIAL,
) = (
'serial',
)
TYPES = (
PRIMARY, SECONDARY
) = (
'PRIMARY', 'SECONDARY'
)
properties_schema = {
# Based on RFC 1035, length of name is set to max of 255
NAME: properties.Schema(
properties.Schema.STRING,
_('DNS Name for the zone.'),
required=True,
constraints=[constraints.Length(max=255)]
),
# Based on RFC 1035, range for ttl is set to 1 to signed 32 bit number
TTL: properties.Schema(
properties.Schema.INTEGER,
_('Time To Live (Seconds) for the zone.'),
update_allowed=True,
constraints=[constraints.Range(min=1,
max=2147483647)]
),
# designate mandates to the max length of 160 for description
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description of zone.'),
update_allowed=True,
constraints=[constraints.Length(max=160)]
),
EMAIL: properties.Schema(
properties.Schema.STRING,
_('E-mail for the zone. Used in SOA records for the zone. '
'It is required for PRIMARY Type, otherwise ignored.'),
update_allowed=True,
),
TYPE: properties.Schema(
properties.Schema.STRING,
_('Type of zone. PRIMARY is controlled by Designate, SECONDARY '
'zones are slaved from another DNS Server.'),
default=PRIMARY,
constraints=[constraints.AllowedValues(
allowed=TYPES)]
),
MASTERS: properties.Schema(
properties.Schema.LIST,
_('The servers to slave from to get DNS information and is '
'mandatory for zone type SECONDARY, otherwise ignored.'),
update_allowed=True
)
}
attributes_schema = {
SERIAL: attributes.Schema(
_("DNS zone serial number."),
type=attributes.Schema.STRING
),
}
default_client_name = 'designate'
entity = 'zones'
def client(self):
return super(DesignateZone,
self).client(version=self.client_plugin().V2)
def validate(self):
super(DesignateZone, self).validate()
def raise_invalid_exception(zone_type, prp):
if self.properties.get(self.TYPE) == zone_type:
if not self.properties.get(prp):
msg = _('Property %(prp)s is required for zone type '
'%(zone_type)s') % {
"prp": prp,
"zone_type": zone_type
}
raise exception.StackValidationFailed(message=msg)
raise_invalid_exception(self.PRIMARY, self.EMAIL)
raise_invalid_exception(self.SECONDARY, self.MASTERS)
def handle_create(self):
args = dict((k, v) for k, v in six.iteritems(self.properties) if v)
args['type_'] = args.pop(self.TYPE)
zone = self.client().zones.create(**args)
self.resource_id_set(zone['id'])
def _check_status_complete(self):
zone = self.client().zones.get(self.resource_id)
if zone['status'] == 'ERROR':
raise exception.ResourceInError(
resource_status=zone['status'],
status_reason=_('Error in zone'))
return zone['status'] != 'PENDING'
def check_create_complete(self, handler_data=None):
return self._check_status_complete()
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
args = dict()
for prp in (self.EMAIL, self.TTL, self.DESCRIPTION, self.MASTERS):
if prop_diff.get(prp):
args[prp] = prop_diff.get(prp)
if len(args.keys()) > 0:
self.client().zones.update(self.resource_id, args)
def check_update_complete(self, handler_data=None):
return self._check_status_complete()
def _resolve_attribute(self, name):
if self.resource_id is None:
return
if name == self.SERIAL:
zone = self.client().zones.get(self.resource_id)
return zone[name]
def check_delete_complete(self, handler_data=None):
if handler_data:
with self.client_plugin().ignore_not_found:
return self._check_status_complete()
return True
def resource_mapping():
return {
'OS::Designate::Zone': DesignateZone
}
| apache-2.0 | 857,568,286,610,432,500 | 31.716667 | 78 | 0.587366 | false |
selboo/starl-mangle | webvirtmgr/webvirtmgr/server.py | 1 | 30677 | # Utility functions used for guest installation
#
import libvirt
from libvirt import VIR_DOMAIN_XML_SECURE
from network.IPy import IP
import re
import time
import libxml2
from datetime import datetime
import string
def get_xml_path(xml, path=None, func=None):
"""
Return the content from the passed xml xpath, or return the result
of a passed function (receives xpathContext as its only arg)
"""
doc = None
ctx = None
result = None
try:
doc = libxml2.parseDoc(xml)
ctx = doc.xpathNewContext()
if path:
ret = ctx.xpathEval(path)
if ret is not None:
if type(ret) == list:
if len(ret) >= 1:
result = ret[0].content
else:
result = ret
elif func:
result = func(ctx)
else:
raise ValueError("'path' or 'func' is required.")
finally:
if doc:
doc.freeDoc()
if ctx:
ctx.xpathFreeContext()
return result
def network_size(net, dhcp=None):
"""
Func return gateway, mask and dhcp pool.
"""
mask = IP(net).strNetmask()
addr = IP(net)
if addr[0].strNormal()[-1] == '0':
gateway = addr[1].strNormal()
dhcp_pool = [addr[2].strNormal(), addr[addr.len() - 2].strNormal()]
else:
gateway = addr[0].strNormal()
dhcp_pool = [addr[1].strNormal(), addr[addr.len() - 2].strNormal()]
if dhcp:
return gateway, mask, dhcp_pool
else:
return gateway, mask, None
class ConnServer(object):
def __init__(self, host):
"""
Return connection object.
"""
self.login = host.login
self.host = host.hostname
self.passwd = host.password
self.type = host.type
self.port = host.port
if self.type == 'tcp':
def creds(credentials, user_data):
for credential in credentials:
if credential[0] == libvirt.VIR_CRED_AUTHNAME:
credential[4] = self.login
if len(credential[4]) == 0:
credential[4] = credential[3]
elif credential[0] == libvirt.VIR_CRED_PASSPHRASE:
credential[4] = self.passwd
else:
return -1
return 0
flags = [libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_PASSPHRASE]
auth = [flags, creds, None]
uri = 'qemu+tcp://%s/system' % self.host
self.conn = libvirt.openAuth(uri, auth, 0)
if self.type == 'ssh':
uri = 'qemu+ssh://%s@%s:%s/system' % (self.login, self.host, self.port)
self.conn = libvirt.open(uri)
def lookupVM(self, vname):
"""
Return VM object.
"""
try:
dom = self.conn.lookupByName(vname)
except:
dom = None
return dom
def storagePool(self, storage):
"""
Return storage object.
"""
try:
stg = self.conn.storagePoolLookupByName(storage)
except:
stg = None
return stg
def networkPool(self, network):
"""
Return network object.
"""
try:
net = self.conn.networkLookupByName(network)
except:
net = None
return net
def storageVol(self, volume, storage):
"""
Return volume object.
"""
stg = self.storagePool(storage)
stg_type = get_xml_path(stg.XMLDesc(0), "/pool/@type")
if stg_type == 'dir':
volume += '.img'
stg_volume = stg.storageVolLookupByName(volume)
return stg_volume
def storageVolPath(self, volume):
"""
Return volume object by path.
"""
stg_volume = self.conn.storageVolLookupByPath(volume)
return stg_volume
def hard_accel_node(self):
"""
Check hardware acceleration.
"""
xml = self.conn.getCapabilities()
kvm = re.search('kvm', xml)
if kvm:
return True
else:
return False
def add_vm(self, name, ram, cpu, host_model, images, nets, virtio, storages, passwd=None):
"""
Create VM function
"""
ram = int(ram) * 1024
iskvm = re.search('kvm', self.conn.getCapabilities())
if iskvm:
dom_type = 'kvm'
else:
dom_type = 'qemu'
machine = get_xml_path(self.conn.getCapabilities(), "/capabilities/guest/arch/machine/@canonical")
if not machine:
machine = 'pc-1.0'
if re.findall('/usr/libexec/qemu-kvm', self.conn.getCapabilities()):
emulator = '/usr/libexec/qemu-kvm'
elif re.findall('/usr/bin/kvm', self.conn.getCapabilities()):
emulator = '/usr/bin/kvm'
elif re.findall('/usr/bin/qemu-kvm', self.conn.getCapabilities()):
emulator = '/usr/bin/qemu-kvm'
else:
emulator = '/usr/bin/qemu-system-x86_64'
disks = []
for image in images:
img = self.storageVolPath(image)
image_type = self.get_vol_image_type(storages, img.name())
disks.append({'image': image, 'type': image_type})
xml = """<domain type='%s'>
<name>%s</name>
<description>None</description>
<memory unit='KiB'>%s</memory>
<vcpu>%s</vcpu>""" % (dom_type, name, ram, cpu)
if host_model:
xml += """<cpu mode='host-model'/>"""
xml += """<os>
<type arch='x86_64' machine='%s'>hvm</type>
<boot dev='hd'/>
<boot dev='cdrom'/>
<bootmenu enable='yes'/>
</os>
<features>
<acpi/>
<apic/>
<pae/>
</features>
<clock offset='utc'/>
<on_poweroff>destroy</on_poweroff>
<on_reboot>restart</on_reboot>
<on_crash>restart</on_crash>
<devices>
<emulator>%s</emulator>""" % (machine, emulator)
disk_letters = list(string.lowercase)
for disk in disks:
xml += """<disk type='file' device='disk'>
<driver name='qemu' type='%s'/>
<source file='%s'/>""" % (disk['type'], disk['image'])
if virtio:
xml += """<target dev='vd%s' bus='virtio'/>""" % (disk_letters.pop(0),)
else:
xml += """<target dev='hd%s' bus='ide'/>""" % (disk_letters.pop(0),)
xml += """</disk>"""
xml += """<disk type='file' device='cdrom'>
<driver name='qemu' type='raw'/>
<source file=''/>
<target dev='sda' bus='ide'/>
<readonly/>
</disk>"""
for net in nets.split(','):
xml += """
<interface type='network'>
<source network='%s'/>""" % net
if virtio:
xml += """<model type='virtio'/>"""
xml += """
</interface>"""
xml += """
<input type='tablet' bus='usb'/>
<input type='mouse' bus='ps2'/>
<graphics type='vnc' port='-1' autoport='yes' listen='0.0.0.0' passwd='%s'>
<listen type='address' address='0.0.0.0'/>
</graphics>
<memballoon model='virtio'/>
</devices>
</domain>""" % (passwd)
self.conn.defineXML(xml)
dom = self.lookupVM(name)
dom.setAutostart(1)
def get_vol_image_type(self, storages, vol):
for storage in storages:
stg = self.storagePool(storage)
if stg.info()[0] != 0:
stg.refresh(0)
for img in stg.listVolumes():
if img == vol:
vol = stg.storageVolLookupByName(img)
xml = vol.XMLDesc(0)
image_type = get_xml_path(xml, "/volume/target/format/@type")
return image_type
def vds_get_node(self):
"""
Get all VM in host server
"""
vname = {}
for vm_id in self.conn.listDomainsID():
vm_id = int(vm_id)
dom = self.conn.lookupByID(vm_id)
vname[dom.name()] = dom.info()[0]
for name in self.conn.listDefinedDomains():
dom = self.lookupVM(name)
vname[dom.name()] = dom.info()[0]
return vname
def networks_get_node(self):
"""
Function return host server virtual networks.
"""
virtnet = {}
for network in self.conn.listNetworks():
net = self.conn.networkLookupByName(network)
status = net.isActive()
virtnet[network] = status
for network in self.conn.listDefinedNetworks():
net = self.networkPool(network)
status = net.isActive()
virtnet[network] = status
return virtnet
def storages_get_node(self):
"""
Function return host server storages.
"""
storages = {}
for storage in self.conn.listStoragePools():
stg = self.conn.storagePoolLookupByName(storage)
status = stg.isActive()
storages[storage] = status
for storage in self.conn.listDefinedStoragePools():
stg = self.storagePool(storage)
status = stg.isActive()
storages[storage] = status
return storages
def node_get_info(self):
"""
Function return host server information: hostname, cpu, memory, ...
"""
info = []
info.append(self.conn.getHostname())
info.append(self.conn.getInfo()[0])
info.append(self.conn.getInfo()[2])
try:
info.append(get_xml_path(self.conn.getSysinfo(0),
"/sysinfo/processor/entry[6]"))
except:
info.append('Unknown')
info.append(self.conn.getURI())
info.append(self.conn.getLibVersion())
return info
def memory_get_usage(self):
"""
Function return memory usage on node.
"""
allmem = self.conn.getInfo()[1] * 1048576
get_freemem = self.conn.getMemoryStats(-1, 0)
if type(get_freemem) == dict:
freemem = (get_freemem.values()[0] + \
get_freemem.values()[2] + \
get_freemem.values()[3]) * 1024
percent = (freemem * 100) / allmem
percent = 100 - percent
memusage = (allmem - freemem)
else:
memusage = None
percent = None
return allmem, memusage, percent
def cpu_get_usage(self):
"""
Function return cpu usage on node.
"""
prev_idle = 0
prev_total = 0
cpu = self.conn.getCPUStats(-1, 0)
if type(cpu) == dict:
for num in range(2):
idle = self.conn.getCPUStats(-1, 0).values()[1]
total = sum(self.conn.getCPUStats(-1, 0).values())
diff_idle = idle - prev_idle
diff_total = total - prev_total
diff_usage = (1000 * (diff_total - diff_idle) / diff_total + 5) / 10
prev_total = total
prev_idle = idle
if num == 0:
time.sleep(1)
else:
if diff_usage < 0:
diff_usage = 0
else:
diff_usage = None
return diff_usage
def new_volume(self, storage, name, size, format='qcow2'):
"""
Add new volume in storage
"""
stg = self.storagePool(storage)
size = int(size) * 1073741824
stg_type = get_xml_path(stg.XMLDesc(0), "/pool/@type")
if stg_type == 'dir':
name += '.img'
alloc = 0
else:
alloc = size
xml = """
<volume>
<name>%s</name>
<capacity>%s</capacity>
<allocation>%s</allocation>
<target>
<format type='%s'/>
</target>
</volume>""" % (name, size, alloc, format)
stg.createXML(xml, 0)
def clone_volume(self, storage, img, new_img, format=None):
"""
Function clone volume
"""
stg = self.storagePool(storage)
stg_type = get_xml_path(stg.XMLDesc(0), "/pool/@type")
if stg_type == 'dir':
new_img += '.img'
vol = stg.storageVolLookupByName(img)
if not format:
xml = vol.XMLDesc(0)
format = get_xml_path(xml, "/volume/target/format/@type")
xml = """
<volume>
<name>%s</name>
<capacity>0</capacity>
<allocation>0</allocation>
<target>
<format type='%s'/>
</target>
</volume>""" % (new_img, format)
stg.createXMLFrom(xml, vol, 0)
def images_get_storages(self, storages):
"""
Function return all images on all storages
"""
disk = []
for storage in storages:
stg = self.storagePool(storage)
if stg.info()[0] != 0:
stg.refresh(0)
for img in stg.listVolumes():
if re.findall(".img", img):
disk.append(img)
return disk
def image_get_path(self, vol, storages):
"""
Function return volume path.
"""
for storage in storages:
stg = self.storagePool(storage)
for img in stg.listVolumes():
if vol == img:
stg_volume = stg.storageVolLookupByName(vol)
return stg_volume.path()
def storage_get_info(self, storage):
"""
Function return storage info.
"""
stg = self.storagePool(storage)
if stg:
if stg.info()[3] == 0:
percent = 0
else:
percent = (stg.info()[2] * 100) / stg.info()[1]
info = stg.info()[1:4]
info.append(int(percent))
info.append(stg.isActive())
xml = stg.XMLDesc(0)
info.append(get_xml_path(xml, "/pool/@type"))
info.append(get_xml_path(xml, "/pool/target/path"))
else:
info = [None] * 7
return info
def new_storage_pool(self, type_pool, name, source, target):
"""
Function create storage pool.
"""
xml = """
<pool type='%s'>
<name>%s</name>""" % (type_pool, name)
if type_pool == 'logical':
xml += """
<source>
<device path='%s'/>
<name>%s</name>
<format type='lvm2'/>
</source>""" % (source, name)
if type_pool == 'logical':
target = '/dev/' + name
xml += """
<target>
<path>%s</path>
</target>
</pool>""" % target
self.conn.storagePoolDefineXML(xml, 0)
stg = self.storagePool(name)
if type_pool == 'logical':
stg.build(0)
stg.create(0)
stg.setAutostart(1)
def volumes_get_info(self, storage):
"""
Function return volume info.
"""
stg = self.storagePool(storage)
volume_info = {}
for name in stg.listVolumes():
if re.findall(".img", name) or re.findall(".iso", name):
vol = stg.storageVolLookupByName(name)
xml = vol.XMLDesc(0)
size = vol.info()[1]
volume_format = get_xml_path(xml, "/volume/target/format/@type")
volume_info[name] = size, volume_format
return volume_info
def new_network_pool(self, name, forward, gateway, mask, dhcp, bridge_name):
"""
Function create network pool.
"""
xml = """
<network>
<name>%s</name>""" % name
if forward in ['nat', 'route', 'bridge']:
xml += """<forward mode='%s'/>""" % forward
xml += """<bridge """
if forward in ['nat', 'route', 'none']:
xml += """stp='on' delay='0'"""
if forward == 'bridge':
xml += """name='%s'""" % bridge_name
xml += """/>"""
if forward != 'bridge':
xml += """
<ip address='%s' netmask='%s'>""" % (gateway, mask)
if dhcp:
xml += """<dhcp>
<range start='%s' end='%s' />
</dhcp>""" % (dhcp[0], dhcp[1])
xml += """</ip>"""
xml += """</network>"""
self.conn.networkDefineXML(xml)
net = self.networkPool(name)
net.create()
net.setAutostart(1)
def network_get_info(self, network):
"""
Function return network info.
"""
info = []
net = self.networkPool(network)
if net:
info.append(net.isActive())
info.append(net.bridgeName())
else:
info = [None] * 2
return info
def network_get_subnet(self, network):
"""
Function return virtual network info: ip, netmask, dhcp, type forward.
"""
net = self.networkPool(network)
xml_net = net.XMLDesc(0)
ipv4 = []
fw_type = get_xml_path(xml_net, "/network/forward/@mode")
fw_dev = get_xml_path(xml_net, "/network/forward/@dev")
if fw_type:
ipv4.append([fw_type, fw_dev])
else:
ipv4.append(None)
# Subnet block
addr_str = get_xml_path(xml_net, "/network/ip/@address")
mask_str = get_xml_path(xml_net, "/network/ip/@netmask")
if addr_str and mask_str:
netmask = IP(mask_str)
gateway = IP(addr_str)
network = IP(gateway.int() & netmask.int())
ipv4.append(IP(str(network) + "/" + mask_str))
else:
ipv4.append(None)
# DHCP block
dhcp_start = get_xml_path(xml_net, "/network/ip/dhcp/range[1]/@start")
dhcp_end = get_xml_path(xml_net, "/network/ip/dhcp/range[1]/@end")
if not dhcp_start or not dhcp_end:
pass
else:
ipv4.append([IP(dhcp_start), IP(dhcp_end)])
return ipv4
def snapshots_get_node(self):
"""
Function return all snaphots on node.
"""
vname = {}
for vm_id in self.conn.listDomainsID():
vm_id = int(vm_id)
dom = self.conn.lookupByID(vm_id)
if dom.snapshotNum(0) != 0:
vname[dom.name()] = dom.info()[0]
for name in self.conn.listDefinedDomains():
dom = self.lookupVM(name)
if dom.snapshotNum(0) != 0:
vname[dom.name()] = dom.info()[0]
return vname
def snapshots_get_vds(self, vname):
"""
Function return all vds snaphots.
"""
snapshots = {}
dom = self.lookupVM(vname)
all_snapshot = dom.snapshotListNames(0)
for snapshot in all_snapshot:
snapshots[snapshot] = (datetime.fromtimestamp(int(snapshot)), dom.info()[0])
return snapshots
def snapshot_delete(self, vname, name_snap):
"""
Function delete vds snaphots.
"""
dom = self.lookupVM(vname)
snap = dom.snapshotLookupByName(name_snap, 0)
snap.delete(0)
def snapshot_revert(self, vname, name_snap):
"""
Function revert vds snaphots.
"""
dom = self.lookupVM(vname)
snap = dom.snapshotLookupByName(name_snap, 0)
dom.revertToSnapshot(snap, 0)
def vnc_get_port(self, vname):
"""
Function rever vds snaphots.
"""
dom = self.lookupVM(vname)
port = get_xml_path(dom.XMLDesc(0), "/domain/devices/graphics/@port")
return port
def vds_mount_iso(self, vname, image):
"""
Function mount iso image on vds. Changes on XML config.
"""
storages = self.storages_get_node()
dom = self.lookupVM(vname)
for storage in storages:
stg = self.storagePool(storage)
for img in stg.listVolumes():
if image == img:
if dom.info()[0] == 1:
vol = stg.storageVolLookupByName(image)
xml = """<disk type='file' device='cdrom'>
<driver name='qemu' type='raw'/>
<target dev='sda' bus='ide'/>
<source file='%s'/>
</disk>""" % vol.path()
dom.attachDevice(xml)
xmldom = dom.XMLDesc(VIR_DOMAIN_XML_SECURE)
self.conn.defineXML(xmldom)
if dom.info()[0] == 5:
vol = stg.storageVolLookupByName(image)
xml = dom.XMLDesc(VIR_DOMAIN_XML_SECURE)
newxml = "<disk type='file' device='cdrom'>\n <driver name='qemu' type='raw'/>\n <source file='%s'/>" % vol.path()
xmldom = xml.replace(
"<disk type='file' device='cdrom'>\n <driver name='qemu' type='raw'/>", newxml)
self.conn.defineXML(xmldom)
def vds_umount_iso(self, vname, image):
"""
Function umount iso image on vds. Changes on XML config.
"""
dom = self.lookupVM(vname)
if dom.info()[0] == 1:
xml = """<disk type='file' device='cdrom'>
<driver name="qemu" type='raw'/>
<target dev='sda' bus='ide'/>
<readonly/>
</disk>"""
dom.attachDevice(xml)
xmldom = dom.XMLDesc(VIR_DOMAIN_XML_SECURE)
self.conn.defineXML(xmldom)
if dom.info()[0] == 5:
xml = dom.XMLDesc(VIR_DOMAIN_XML_SECURE)
xmldom = xml.replace("<source file='%s'/>\n" % image, '')
self.conn.defineXML(xmldom)
def vds_cpu_usage(self, vname):
"""
Function return vds cpu usage.
"""
dom = self.lookupVM(vname)
if dom.info()[0] == 1:
nbcore = self.conn.getInfo()[2]
cpu_use_ago = dom.info()[4]
time.sleep(1)
cpu_use_now = dom.info()[4]
diff_usage = cpu_use_now - cpu_use_ago
cpu_usage = 100 * diff_usage / (1 * nbcore * 10 ** 9L)
else:
cpu_usage = 0
return cpu_usage
def vds_memory_usage(self, vname):
"""
Function return vds memory usage.
"""
dom = self.lookupVM(vname)
allmem = self.conn.getInfo()[1] * 1048576
if dom.info()[0] == 1:
dom_mem = dom.info()[1] * 1024
percent = (dom_mem * 100) / allmem
else:
percent = 0
return allmem, percent
def vds_get_info(self, vname):
"""
Function return vds info.
"""
info = []
dom = self.lookupVM(vname)
xml = dom.XMLDesc(0)
info.append(get_xml_path(xml, "/domain/vcpu"))
mem = get_xml_path(xml, "/domain/memory")
mem = int(mem) / 1024
info.append(int(mem))
def get_networks(ctx):
result = []
for interface in ctx.xpathEval('/domain/devices/interface'):
mac = interface.xpathEval('mac/@address')[0].content
nic = interface.xpathEval('source/@network|source/@bridge')[0].content
result.append({'mac': mac, 'nic': nic})
return result
info.append(get_xml_path(xml, func=get_networks))
description = get_xml_path(xml, "/domain/description")
info.append(description)
return info
def vds_get_hdd(self, vname):
"""
Function return vds hdd info.
"""
all_hdd_dev = {}
storages = self.storages_get_node()
dom = self.lookupVM(vname)
xml = dom.XMLDesc(0)
for num in range(1, 5):
hdd_dev = get_xml_path(xml, "/domain/devices/disk[%s]/@device" % (num))
if hdd_dev == 'disk':
dev_bus = get_xml_path(xml, "/domain/devices/disk[%s]/target/@dev" % (num))
hdd = get_xml_path(xml, "/domain/devices/disk[%s]/source/@file" % (num))
# If xml create custom
if not hdd:
hdd = get_xml_path(xml, "/domain/devices/disk[%s]/source/@dev" % (num))
try:
img = self.storageVolPath(hdd)
img_vol = img.name()
for storage in storages:
stg = self.storagePool(storage)
if stg.info()[0] != 0:
stg.refresh(0)
for img in stg.listVolumes():
if img == img_vol:
vol = img
vol_stg = storage
all_hdd_dev[dev_bus] = vol, vol_stg
except:
all_hdd_dev[dev_bus] = hdd, 'Not in the pool'
return all_hdd_dev
def vds_get_media(self, vname):
"""
Function return vds media info.
"""
dom = self.lookupVM(vname)
xml = dom.XMLDesc(0)
for num in range(1, 5):
hdd_dev = get_xml_path(xml, "/domain/devices/disk[%s]/@device" % (num))
if hdd_dev == 'cdrom':
media = get_xml_path(xml, "/domain/devices/disk[%s]/source/@file" % (num))
if media:
try:
vol = self.storageVolPath(media)
return vol.name(), vol.path()
except:
return media, media
else:
return None, None
return None, None
def vds_set_vnc_passwd(self, vname, passwd):
"""
Function set vnc password to vds.
"""
dom = self.lookupVM(vname)
xml = dom.XMLDesc(VIR_DOMAIN_XML_SECURE)
find_tag = re.findall('<graphics.*/>', xml)
if find_tag:
close_tag = '/'
else:
close_tag = ''
newxml = "<graphics type='vnc' passwd='%s'%s>" % (passwd, close_tag)
xmldom = re.sub('<graphics.*>', newxml, xml)
self.conn.defineXML(xmldom)
def vds_edit(self, vname, description, ram, vcpu):
"""
Function change ram and cpu on vds.
"""
dom = self.lookupVM(vname)
xml = dom.XMLDesc(VIR_DOMAIN_XML_SECURE)
memory = int(ram) * 1024
xml_memory = "<memory unit='KiB'>%s</memory>" % memory
xml_memory_change = re.sub('<memory.*memory>', xml_memory, xml)
xml_curmemory = "<currentMemory unit='KiB'>%s</currentMemory>" % memory
xml_curmemory_change = re.sub('<currentMemory.*currentMemory>', xml_curmemory, xml_memory_change)
xml_vcpu = "<vcpu>%s</vcpu>" % vcpu
xml_vcpu_change = re.sub('<vcpu.*vcpu>', xml_vcpu, xml_curmemory_change)
xml_description = "<description>%s</description>" % description
xml_description_change = re.sub('<description.*description>', xml_description, xml_vcpu_change)
self.conn.defineXML(xml_description_change)
def defineXML(self, xml):
"""
Funciton define VM config
"""
self.conn.defineXML(xml)
def get_all_media(self):
"""
Function return all media.
"""
iso = []
storages = self.storages_get_node()
for storage in storages:
stg = self.storagePool(storage)
if stg.info()[0] != 0:
stg.refresh(0)
for img in stg.listVolumes():
if re.findall(".iso", img):
iso.append(img)
return iso
def vds_remove_hdd(self, vname):
"""
Function delete vds hdd.
"""
dom = self.lookupVM(vname)
img = get_xml_path(dom.XMLDesc(0), "/domain/devices/disk[1]/source/@file")
vol = self.storageVolPath(img)
vol.delete(0)
def vds_create_snapshot(self, vname):
"""
Function create vds snapshot.
"""
dom = self.lookupVM(vname)
xml = """<domainsnapshot>\n
<name>%d</name>\n
<state>shutoff</state>\n
<creationTime>%d</creationTime>\n""" % (time.time(), time.time())
xml += dom.XMLDesc(VIR_DOMAIN_XML_SECURE)
xml += """<active>0</active>\n
</domainsnapshot>"""
dom.snapshotCreateXML(xml, 0)
def vds_on_cluster(self):
"""
Function show all host and vds
"""
vname = {}
host_mem = self.conn.getInfo()[1] * 1048576
for vm_id in self.conn.listDomainsID():
vm_id = int(vm_id)
dom = self.conn.lookupByID(vm_id)
mem = get_xml_path(dom.XMLDesc(0), "/domain/memory")
mem = int(mem) * 1024
mem_usage = (mem * 100) / host_mem
vcpu = get_xml_path(dom.XMLDesc(0), "/domain/vcpu")
vname[dom.name()] = (dom.info()[0], vcpu, mem, mem_usage)
for name in self.conn.listDefinedDomains():
dom = self.lookupVM(name)
mem = get_xml_path(dom.XMLDesc(0), "/domain/memory")
mem = int(mem) * 1024
mem_usage = (mem * 100) / host_mem
vcpu = get_xml_path(dom.XMLDesc(0), "/domain/vcpu")
vname[dom.name()] = (dom.info()[0], vcpu, mem, mem_usage)
return vname
def close(self):
"""
Close libvirt connection.
"""
self.conn.close()
| apache-2.0 | -4,393,413,968,680,874,500 | 29.800201 | 148 | 0.478991 | false |
mitodl/open-discussions | open_discussions/authentication.py | 1 | 2405 | """Custom authentication for DRF"""
import logging
from django.contrib.auth import get_user_model
import jwt
from rest_framework.authentication import BaseAuthentication
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
User = get_user_model()
HEADER_PREFIX = "Token "
HEADER_PREFIX_LENGTH = len(HEADER_PREFIX)
logger = logging.getLogger()
class IgnoreExpiredJwtAuthentication(JSONWebTokenAuthentication):
"""Version of JSONWebTokenAuthentication that ignores JWT values if they're expired"""
def get_jwt_value(self, request):
"""Returns the JWT values as long as it's not expired"""
value = super().get_jwt_value(request)
try:
# try to decode the value just to see if it's expired
from rest_framework_jwt.settings import api_settings
jwt_decode_handler = api_settings.JWT_DECODE_HANDLER
jwt_decode_handler(value)
except jwt.ExpiredSignature:
# if it is expired, treat it as if the user never passed a token
logger.debug("Ignoring expired JWT")
return None
except: # pylint: disable=bare-except
# we're only interested in jwt.ExpiredSignature above
# exception handling in general is already handled in the base class
pass
return value
class StatelessTokenAuthentication(BaseAuthentication):
"""
Stateless authentication via a authorization token
NOTE: this is a highly trusting version of authentication and should only be
used for certain things such as email unsubscribes
"""
def authenticate(self, request):
"""
Attempts to authenticate using a stateless token
"""
from open_discussions.auth_utils import unsign_and_verify_username_from_token
if "HTTP_AUTHORIZATION" in request.META:
header_value = request.META["HTTP_AUTHORIZATION"]
if not header_value.startswith(HEADER_PREFIX):
return None
token = header_value[HEADER_PREFIX_LENGTH:]
username = unsign_and_verify_username_from_token(token)
if not username:
return None
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
return None
return (user, None)
return None
| bsd-3-clause | -7,893,392,934,490,459,000 | 30.233766 | 90 | 0.656133 | false |
UnitedThruAction/Data | Tools/StandardizeAddress.py | 1 | 4188 | """Standardize a list of addresses using the USPS API.
Multi-threaded, since the API response time is slow.
Get an API key at https://registration.shippingapis.com.
"""
from __future__ import print_function
import threading
import sys
import pandas as pd
from tqdm import tqdm
from collections import deque
from pyusps import address_information
NUM_THREADS = 100
def standardize_address(
df,
type='vf',
col1=None,
col2=None,
key=None,
usps_key=None,
new_col='standardized_address'):
"""Standardize a list of addresses using the USPS API.
Arguments:
df: a DataFrame of data
type: 'vf' (NY State Voter File)
or 'raw', two columns
col1: if using 'raw', column name for first line of address
col2: if using 'raw', column name for second line of address
key: if using 'raw', column name for the key to lookup on
usps_key: USPS API key
new_col: name of new column to add."""
threads = deque()
results = {}
for obj in tqdm(df.iterrows(), total=df.shape[0]):
row = obj[1]
if len(threads) < NUM_THREADS:
if type == 'vf':
t = threading.Thread(
target=vf_standardize_address, args=(
row, results, usps_key))
elif type == 'raw':
t = threading.Thread(
target=gen_standardize_address, args=(
row[col1], row[col2], row[key], results, usps_key))
else:
raise Exception("type not recognized")
t.start()
threads.append(t)
continue
else:
t = threads.popleft()
t.join()
continue
while threads:
t = threads.popleft()
t.join()
sys.stderr.flush()
sys.stdout.flush()
if type == 'vf':
df[new_col] = df['SBOEID'].map(results)
elif type == 'raw':
df[new_col] = df[key].map(results)
def vf_standardize_address(row, results, usps_key):
"""Used for the NY State Voter File only."""
rhalfcode = '' if pd.isnull(row['RHALFCODE']) else row['RHALFCODE']
raddnumber = '' if pd.isnull(row['RADDNUMBER']) else row['RADDNUMBER']
rpredirection = '' if pd.isnull(
row['RPREDIRECTION']) else row['RPREDIRECTION']
rstreetname = '' if pd.isnull(row['RSTREETNAME']) else row['RSTREETNAME']
rpostdirection = '' if pd.isnull(
row['RPOSTDIRECTION']) else row['RPOSTDIRECTION']
rapartment = '' if pd.isnull(row['RAPARTMENT']) else row['RAPARTMENT']
if ('APT' in str(row['RAPARTMENT']).upper()) \
or ('UNIT' in str(row['RAPARTMENT']).upper()) \
or (row['RAPARTMENT'] == ''):
address = "{} {} {} {} {} {}".format(
raddnumber,
rhalfcode,
rpredirection,
rstreetname,
rpostdirection,
rapartment)
else:
address = "{} {} {} {} {} APT {}".format(
raddnumber,
rhalfcode,
rpredirection,
rstreetname,
rpostdirection,
rapartment)
try:
address = address.upper()
addr = {'address': address, 'city': row['RCITY'], 'state': 'NY'}
result = address_information.verify(usps_key, addr)
zip4 = "-{}".format(result['zip4']) if result['zip4'] else ''
results[row['SBOEID']] = "{}, {} {} {}{}".format(
result['address'], result['city'], result['state'], result['zip5'], zip4)
except Exception:
results[row['SBOEID']] = address
def gen_standardize_address(addr1, addr2, key, results, usps_key):
addr = {'address': addr1, 'city': addr2, 'state': 'NY'}
try:
result = address_information.verify(usps_key, addr)
zip4 = "-{}".format(result['zip4']) if ('zip4' in result) and result['zip4'] else ''
results[key] = "{}, {} {} {}{}".format(
result['address'],
result['city'],
result['state'],
result['zip5'],
zip4)
except Exception as e:
results[key] = "{}, {}".format(addr1, addr2)
| apache-2.0 | 227,579,348,872,957,340 | 32.504 | 92 | 0.546323 | false |
pombredanne/pytype | pytype/pyc/opcodes_test.py | 1 | 52430 | from pytype.pyc import opcodes
import unittest
class Python2Test(unittest.TestCase):
"""Test bytecodes.dis for Python 2 opcodes."""
PYTHON_VERSION = (2, 7, 6)
def dis(self, data):
return opcodes.dis(data, self.PYTHON_VERSION)
def test_stop_code(self):
self.assertEquals(self.dis('\x00')[0].name, 'STOP_CODE')
def test_pop_top(self):
self.assertEquals(self.dis('\x01')[0].name, 'POP_TOP')
def test_rot_two(self):
self.assertEquals(self.dis('\x02')[0].name, 'ROT_TWO')
def test_rot_three(self):
self.assertEquals(self.dis('\x03')[0].name, 'ROT_THREE')
def test_dup_top(self):
self.assertEquals(self.dis('\x04')[0].name, 'DUP_TOP')
def test_rot_four(self):
self.assertEquals(self.dis('\x05')[0].name, 'ROT_FOUR')
def test_nop(self):
self.assertEquals(self.dis('\t')[0].name, 'NOP')
def test_unary_positive(self):
self.assertEquals(self.dis('\n')[0].name, 'UNARY_POSITIVE')
def test_unary_negative(self):
self.assertEquals(self.dis('\x0b')[0].name, 'UNARY_NEGATIVE')
def test_unary_not(self):
self.assertEquals(self.dis('\x0c')[0].name, 'UNARY_NOT')
def test_unary_convert(self):
self.assertEquals(self.dis('\r')[0].name, 'UNARY_CONVERT')
def test_unary_invert(self):
self.assertEquals(self.dis('\x0f')[0].name, 'UNARY_INVERT')
def test_binary_power(self):
self.assertEquals(self.dis('\x13')[0].name, 'BINARY_POWER')
def test_binary_multiply(self):
self.assertEquals(self.dis('\x14')[0].name, 'BINARY_MULTIPLY')
def test_binary_divide(self):
self.assertEquals(self.dis('\x15')[0].name, 'BINARY_DIVIDE')
def test_binary_modulo(self):
self.assertEquals(self.dis('\x16')[0].name, 'BINARY_MODULO')
def test_binary_add(self):
self.assertEquals(self.dis('\x17')[0].name, 'BINARY_ADD')
def test_binary_subtract(self):
self.assertEquals(self.dis('\x18')[0].name, 'BINARY_SUBTRACT')
def test_binary_subscr(self):
self.assertEquals(self.dis('\x19')[0].name, 'BINARY_SUBSCR')
def test_binary_floor_divide(self):
self.assertEquals(self.dis('\x1a')[0].name, 'BINARY_FLOOR_DIVIDE')
def test_binary_true_divide(self):
self.assertEquals(self.dis('\x1b')[0].name, 'BINARY_TRUE_DIVIDE')
def test_inplace_floor_divide(self):
self.assertEquals(self.dis('\x1c')[0].name, 'INPLACE_FLOOR_DIVIDE')
def test_inplace_true_divide(self):
self.assertEquals(self.dis('\x1d')[0].name, 'INPLACE_TRUE_DIVIDE')
def test_slice_0(self):
self.assertEquals(self.dis('\x1e')[0].name, 'SLICE_0')
def test_slice_1(self):
self.assertEquals(self.dis('\x1f')[0].name, 'SLICE_1')
def test_slice_2(self):
self.assertEquals(self.dis(' ')[0].name, 'SLICE_2')
def test_slice_3(self):
self.assertEquals(self.dis('!')[0].name, 'SLICE_3')
def test_store_slice_0(self):
self.assertEquals(self.dis('(')[0].name, 'STORE_SLICE_0')
def test_store_slice_1(self):
self.assertEquals(self.dis(')')[0].name, 'STORE_SLICE_1')
def test_store_slice_2(self):
self.assertEquals(self.dis('*')[0].name, 'STORE_SLICE_2')
def test_store_slice_3(self):
self.assertEquals(self.dis('+')[0].name, 'STORE_SLICE_3')
def test_delete_slice_0(self):
self.assertEquals(self.dis('2')[0].name, 'DELETE_SLICE_0')
def test_delete_slice_1(self):
self.assertEquals(self.dis('3')[0].name, 'DELETE_SLICE_1')
def test_delete_slice_2(self):
self.assertEquals(self.dis('4')[0].name, 'DELETE_SLICE_2')
def test_delete_slice_3(self):
self.assertEquals(self.dis('5')[0].name, 'DELETE_SLICE_3')
def test_store_map(self):
self.assertEquals(self.dis('6')[0].name, 'STORE_MAP')
def test_inplace_add(self):
self.assertEquals(self.dis('7')[0].name, 'INPLACE_ADD')
def test_inplace_subtract(self):
self.assertEquals(self.dis('8')[0].name, 'INPLACE_SUBTRACT')
def test_inplace_multiply(self):
self.assertEquals(self.dis('9')[0].name, 'INPLACE_MULTIPLY')
def test_inplace_divide(self):
self.assertEquals(self.dis(':')[0].name, 'INPLACE_DIVIDE')
def test_inplace_modulo(self):
self.assertEquals(self.dis(';')[0].name, 'INPLACE_MODULO')
def test_store_subscr(self):
self.assertEquals(self.dis('<')[0].name, 'STORE_SUBSCR')
def test_delete_subscr(self):
self.assertEquals(self.dis('=')[0].name, 'DELETE_SUBSCR')
def test_binary_lshift(self):
self.assertEquals(self.dis('>')[0].name, 'BINARY_LSHIFT')
def test_binary_rshift(self):
self.assertEquals(self.dis('?')[0].name, 'BINARY_RSHIFT')
def test_binary_and(self):
self.assertEquals(self.dis('@')[0].name, 'BINARY_AND')
def test_binary_xor(self):
self.assertEquals(self.dis('A')[0].name, 'BINARY_XOR')
def test_binary_or(self):
self.assertEquals(self.dis('B')[0].name, 'BINARY_OR')
def test_inplace_power(self):
self.assertEquals(self.dis('C')[0].name, 'INPLACE_POWER')
def test_get_iter(self):
self.assertEquals(self.dis('D')[0].name, 'GET_ITER')
def test_print_expr(self):
self.assertEquals(self.dis('F')[0].name, 'PRINT_EXPR')
def test_print_item(self):
self.assertEquals(self.dis('G')[0].name, 'PRINT_ITEM')
def test_print_newline(self):
self.assertEquals(self.dis('H')[0].name, 'PRINT_NEWLINE')
def test_print_item_to(self):
self.assertEquals(self.dis('I')[0].name, 'PRINT_ITEM_TO')
def test_print_newline_to(self):
self.assertEquals(self.dis('J')[0].name, 'PRINT_NEWLINE_TO')
def test_inplace_lshift(self):
self.assertEquals(self.dis('K')[0].name, 'INPLACE_LSHIFT')
def test_inplace_rshift(self):
self.assertEquals(self.dis('L')[0].name, 'INPLACE_RSHIFT')
def test_inplace_and(self):
self.assertEquals(self.dis('M')[0].name, 'INPLACE_AND')
def test_inplace_xor(self):
self.assertEquals(self.dis('N')[0].name, 'INPLACE_XOR')
def test_inplace_or(self):
self.assertEquals(self.dis('O')[0].name, 'INPLACE_OR')
def test_break_loop(self):
self.assertEquals(self.dis('P')[0].name, 'BREAK_LOOP')
def test_with_cleanup(self):
self.assertEquals(self.dis('Q')[0].name, 'WITH_CLEANUP')
def test_load_locals(self):
self.assertEquals(self.dis('R')[0].name, 'LOAD_LOCALS')
def test_return_value(self):
self.assertEquals(self.dis('S')[0].name, 'RETURN_VALUE')
def test_import_star(self):
self.assertEquals(self.dis('T')[0].name, 'IMPORT_STAR')
def test_exec_stmt(self):
self.assertEquals(self.dis('U')[0].name, 'EXEC_STMT')
def test_yield_value(self):
self.assertEquals(self.dis('V')[0].name, 'YIELD_VALUE')
def test_pop_block(self):
self.assertEquals(self.dis('W')[0].name, 'POP_BLOCK')
def test_end_finally(self):
self.assertEquals(self.dis('X')[0].name, 'END_FINALLY')
def test_build_class(self):
self.assertEquals(self.dis('Y')[0].name, 'BUILD_CLASS')
def test_store_name(self):
self.assertEquals(self.dis('Z\x00\x00')[0].name, 'STORE_NAME')
def test_delete_name(self):
self.assertEquals(self.dis('[\x00\x00')[0].name, 'DELETE_NAME')
def test_unpack_sequence(self):
self.assertEquals(self.dis('\\\x00\x00')[0].name, 'UNPACK_SEQUENCE')
def test_for_iter(self):
self.assertEquals(self.dis(']\x00\x00\t')[0].name, 'FOR_ITER')
def test_list_append(self):
self.assertEquals(self.dis('^\x00\x00')[0].name, 'LIST_APPEND')
def test_store_attr(self):
self.assertEquals(self.dis('_\x00\x00')[0].name, 'STORE_ATTR')
def test_delete_attr(self):
self.assertEquals(self.dis('`\x00\x00')[0].name, 'DELETE_ATTR')
def test_store_global(self):
self.assertEquals(self.dis('a\x00\x00')[0].name, 'STORE_GLOBAL')
def test_delete_global(self):
self.assertEquals(self.dis('b\x00\x00')[0].name, 'DELETE_GLOBAL')
def test_dup_topx(self):
self.assertEquals(self.dis('c\x00\x00')[0].name, 'DUP_TOPX')
def test_load_const(self):
self.assertEquals(self.dis('d\x00\x00')[0].name, 'LOAD_CONST')
def test_load_name(self):
self.assertEquals(self.dis('e\x00\x00')[0].name, 'LOAD_NAME')
def test_build_tuple(self):
self.assertEquals(self.dis('f\x00\x00')[0].name, 'BUILD_TUPLE')
def test_build_list(self):
self.assertEquals(self.dis('g\x00\x00')[0].name, 'BUILD_LIST')
def test_build_set(self):
self.assertEquals(self.dis('h\x00\x00')[0].name, 'BUILD_SET')
def test_build_map(self):
self.assertEquals(self.dis('i\x00\x00')[0].name, 'BUILD_MAP')
def test_load_attr(self):
self.assertEquals(self.dis('j\x00\x00')[0].name, 'LOAD_ATTR')
def test_compare_op(self):
self.assertEquals(self.dis('k\x00\x00')[0].name, 'COMPARE_OP')
def test_import_name(self):
self.assertEquals(self.dis('l\x00\x00')[0].name, 'IMPORT_NAME')
def test_import_from(self):
self.assertEquals(self.dis('m\x00\x00')[0].name, 'IMPORT_FROM')
def test_jump_forward(self):
self.assertEquals(self.dis('n\x00\x00\t')[0].name, 'JUMP_FORWARD')
def test_jump_if_false_or_pop(self):
self.assertEquals(self.dis('o\x03\x00\t')[0].name, 'JUMP_IF_FALSE_OR_POP')
def test_jump_if_true_or_pop(self):
self.assertEquals(self.dis('p\x03\x00\t')[0].name, 'JUMP_IF_TRUE_OR_POP')
def test_jump_absolute(self):
self.assertEquals(self.dis('q\x03\x00\t')[0].name, 'JUMP_ABSOLUTE')
def test_pop_jump_if_false(self):
self.assertEquals(self.dis('r\x03\x00\t')[0].name, 'POP_JUMP_IF_FALSE')
def test_pop_jump_if_true(self):
self.assertEquals(self.dis('s\x03\x00\t')[0].name, 'POP_JUMP_IF_TRUE')
def test_load_global(self):
self.assertEquals(self.dis('t\x00\x00')[0].name, 'LOAD_GLOBAL')
def test_continue_loop(self):
self.assertEquals(self.dis('w\x03\x00\t')[0].name, 'CONTINUE_LOOP')
def test_setup_loop(self):
self.assertEquals(self.dis('x\x00\x00\t')[0].name, 'SETUP_LOOP')
def test_setup_except(self):
self.assertEquals(self.dis('y\x00\x00\t')[0].name, 'SETUP_EXCEPT')
def test_setup_finally(self):
self.assertEquals(self.dis('z\x00\x00\t')[0].name, 'SETUP_FINALLY')
def test_load_fast(self):
self.assertEquals(self.dis('|\x00\x00')[0].name, 'LOAD_FAST')
def test_store_fast(self):
self.assertEquals(self.dis('}\x00\x00')[0].name, 'STORE_FAST')
def test_delete_fast(self):
self.assertEquals(self.dis('~\x00\x00')[0].name, 'DELETE_FAST')
def test_raise_varargs(self):
self.assertEquals(self.dis('\x82\x00\x00')[0].name, 'RAISE_VARARGS')
def test_call_function(self):
self.assertEquals(self.dis('\x83\x00\x00')[0].name, 'CALL_FUNCTION')
def test_make_function(self):
self.assertEquals(self.dis('\x84\x00\x00')[0].name, 'MAKE_FUNCTION')
def test_build_slice(self):
self.assertEquals(self.dis('\x85\x00\x00')[0].name, 'BUILD_SLICE')
def test_make_closure(self):
self.assertEquals(self.dis('\x86\x00\x00')[0].name, 'MAKE_CLOSURE')
def test_load_closure(self):
self.assertEquals(self.dis('\x87\x00\x00')[0].name, 'LOAD_CLOSURE')
def test_load_deref(self):
self.assertEquals(self.dis('\x88\x00\x00')[0].name, 'LOAD_DEREF')
def test_store_deref(self):
self.assertEquals(self.dis('\x89\x00\x00')[0].name, 'STORE_DEREF')
def test_call_function_var(self):
self.assertEquals(self.dis('\x8c\x00\x00')[0].name, 'CALL_FUNCTION_VAR')
def test_call_function_kw(self):
self.assertEquals(self.dis('\x8d\x00\x00')[0].name, 'CALL_FUNCTION_KW')
def test_call_function_var_kw(self):
self.assertEquals(self.dis('\x8e\x00\x00')[0].name, 'CALL_FUNCTION_VAR_KW')
def test_setup_with(self):
self.assertEquals(self.dis('\x8f\x00\x00\t')[0].name, 'SETUP_WITH')
def test_set_add(self):
self.assertEquals(self.dis('\x92\x00\x00')[0].name, 'SET_ADD')
def test_map_add(self):
self.assertEquals(self.dis('\x93\x00\x00')[0].name, 'MAP_ADD')
def test_binary(self):
code = ''.join(chr(c) for c in ([
0x7c, 0, 0, # 0 LOAD_FAST, arg=0,
0x7c, 0, 0, # 3 LOAD_FAST, arg=0,
0x17, # 6 BINARY_ADD,
0x01, # 7 POP_TOP,
0x7c, 0, 0, # 8 LOAD_FAST, arg=0,
0x7c, 0, 0, # 11 LOAD_FAST, arg=0,
0x14, # 14 BINARY_MULTIPLY,
0x01, # 15 POP_TOP,
0x7c, 0, 0, # 16 LOAD_FAST, arg=0,
0x7c, 0, 0, # 19 LOAD_FAST, arg=0,
0x16, # 22 BINARY_MODULO,
0x01, # 23 POP_TOP,
0x7c, 0, 0, # 24 LOAD_FAST, arg=0,
0x7c, 0, 0, # 27 LOAD_FAST, arg=0,
0x15, # 30 BINARY_DIVIDE,
0x01, # 31 POP_TOP,
0x64, 0, 0, # 32 LOAD_CONST, arg=0,
0x53, # 35 RETURN_VALUE
]))
ops = opcodes.dis(code, self.PYTHON_VERSION)
self.assertEquals(len(ops), 18)
self.assertEquals(ops[0].name, 'LOAD_FAST')
self.assertEquals(ops[0].arg, 0)
self.assertEquals(ops[1].name, 'LOAD_FAST')
self.assertEquals(ops[1].arg, 0)
self.assertEquals(ops[2].name, 'BINARY_ADD')
self.assertEquals(ops[3].name, 'POP_TOP')
self.assertEquals(ops[4].name, 'LOAD_FAST')
self.assertEquals(ops[4].arg, 0)
self.assertEquals(ops[5].name, 'LOAD_FAST')
self.assertEquals(ops[5].arg, 0)
self.assertEquals(ops[6].name, 'BINARY_MULTIPLY')
self.assertEquals(ops[7].name, 'POP_TOP')
self.assertEquals(ops[8].name, 'LOAD_FAST')
self.assertEquals(ops[8].arg, 0)
self.assertEquals(ops[9].name, 'LOAD_FAST')
self.assertEquals(ops[9].arg, 0)
self.assertEquals(ops[10].name, 'BINARY_MODULO')
self.assertEquals(ops[11].name, 'POP_TOP')
self.assertEquals(ops[12].name, 'LOAD_FAST')
self.assertEquals(ops[12].arg, 0)
self.assertEquals(ops[13].name, 'LOAD_FAST')
self.assertEquals(ops[13].arg, 0)
self.assertEquals(ops[14].name, 'BINARY_DIVIDE')
self.assertEquals(ops[15].name, 'POP_TOP')
self.assertEquals(ops[16].name, 'LOAD_CONST')
self.assertEquals(ops[16].arg, 0)
self.assertEquals(ops[17].name, 'RETURN_VALUE')
def test_break(self):
code = ''.join(chr(c) for c in ([
0x78, 4, 0, # 0 SETUP_LOOP, dest=7,
0x50, # 3 BREAK_LOOP,
0x71, 3, 0, # 4 JUMP_ABSOLUTE, dest=3,
0x64, 0, 0, # 7 LOAD_CONST, arg=0,
0x53, # 10 RETURN_VALUE
]))
ops = opcodes.dis(code, self.PYTHON_VERSION)
self.assertEquals(len(ops), 5)
self.assertEquals(ops[0].name, 'SETUP_LOOP')
self.assertEquals(ops[0].arg, 3)
self.assertEquals(ops[0].target, ops[3])
self.assertEquals(ops[1].name, 'BREAK_LOOP')
self.assertEquals(ops[2].name, 'JUMP_ABSOLUTE')
self.assertEquals(ops[2].arg, 1)
self.assertEquals(ops[2].target, ops[1])
self.assertEquals(ops[3].name, 'LOAD_CONST')
self.assertEquals(ops[3].arg, 0)
self.assertEquals(ops[4].name, 'RETURN_VALUE')
def test_call(self):
code = ''.join(chr(c) for c in ([
0x74, 0, 0, # 0 LOAD_GLOBAL, arg=0,
0x83, 0, 0, # 3 CALL_FUNCTION, arg=0,
0x01, # 6 POP_TOP,
0x64, 0, 0, # 7 LOAD_CONST, arg=0,
0x53, # 10 RETURN_VALUE
]))
ops = opcodes.dis(code, self.PYTHON_VERSION)
self.assertEquals(len(ops), 5)
self.assertEquals(ops[0].name, 'LOAD_GLOBAL')
self.assertEquals(ops[0].arg, 0)
self.assertEquals(ops[1].name, 'CALL_FUNCTION')
self.assertEquals(ops[1].arg, 0)
self.assertEquals(ops[2].name, 'POP_TOP')
self.assertEquals(ops[3].name, 'LOAD_CONST')
self.assertEquals(ops[3].arg, 0)
self.assertEquals(ops[4].name, 'RETURN_VALUE')
def test_continue(self):
code = ''.join(chr(c) for c in ([
0x78, 6, 0, # 0 SETUP_LOOP, dest=9,
0x71, 3, 0, # 3 JUMP_ABSOLUTE, dest=3,
0x71, 3, 0, # 6 JUMP_ABSOLUTE, dest=3,
0x64, 0, 0, # 9 LOAD_CONST, arg=0,
0x53, # 12 RETURN_VALUE
]))
ops = opcodes.dis(code, self.PYTHON_VERSION)
self.assertEquals(len(ops), 5)
self.assertEquals(ops[0].name, 'SETUP_LOOP')
self.assertEquals(ops[0].arg, 3)
self.assertEquals(ops[0].target, ops[3])
self.assertEquals(ops[1].name, 'JUMP_ABSOLUTE')
self.assertEquals(ops[1].arg, 1)
self.assertEquals(ops[1].target, ops[1])
self.assertEquals(ops[2].name, 'JUMP_ABSOLUTE')
self.assertEquals(ops[2].arg, 1)
self.assertEquals(ops[2].target, ops[1])
self.assertEquals(ops[3].name, 'LOAD_CONST')
self.assertEquals(ops[3].arg, 0)
self.assertEquals(ops[4].name, 'RETURN_VALUE')
def test_except(self):
code = ''.join(chr(c) for c in ([
0x79, 4, 0, # 0 SETUP_EXCEPT, dest=7,
0x57, # 3 POP_BLOCK,
0x6e, 7, 0, # 4 JUMP_FORWARD, dest=14,
0x01, # 7 POP_TOP,
0x01, # 8 POP_TOP,
0x01, # 9 POP_TOP,
0x6e, 1, 0, # 10 JUMP_FORWARD, dest=14,
0x58, # 13 END_FINALLY,
0x64, 0, 0, # 14 LOAD_CONST, arg=0,
0x53, # 17 RETURN_VALUE
]))
ops = opcodes.dis(code, self.PYTHON_VERSION)
self.assertEquals(len(ops), 10)
self.assertEquals(ops[0].name, 'SETUP_EXCEPT')
self.assertEquals(ops[0].arg, 3)
self.assertEquals(ops[0].target, ops[3])
self.assertEquals(ops[1].name, 'POP_BLOCK')
self.assertEquals(ops[2].name, 'JUMP_FORWARD')
self.assertEquals(ops[2].arg, 8)
self.assertEquals(ops[2].target, ops[8])
self.assertEquals(ops[3].name, 'POP_TOP')
self.assertEquals(ops[4].name, 'POP_TOP')
self.assertEquals(ops[5].name, 'POP_TOP')
self.assertEquals(ops[6].name, 'JUMP_FORWARD')
self.assertEquals(ops[6].arg, 8)
self.assertEquals(ops[6].target, ops[8])
self.assertEquals(ops[7].name, 'END_FINALLY')
self.assertEquals(ops[8].name, 'LOAD_CONST')
self.assertEquals(ops[8].arg, 0)
self.assertEquals(ops[9].name, 'RETURN_VALUE')
def test_finally(self):
code = ''.join(chr(c) for c in ([
0x7a, 4, 0, # 0 SETUP_FINALLY, dest=7,
0x57, # 3 POP_BLOCK,
0x64, 0, 0, # 4 LOAD_CONST, arg=0,
0x58, # 7 END_FINALLY,
0x64, 0, 0, # 8 LOAD_CONST, arg=0,
0x53, # 11 RETURN_VALUE
]))
ops = opcodes.dis(code, self.PYTHON_VERSION)
self.assertEquals(len(ops), 6)
self.assertEquals(ops[0].name, 'SETUP_FINALLY')
self.assertEquals(ops[0].arg, 3)
self.assertEquals(ops[0].target, ops[3])
self.assertEquals(ops[1].name, 'POP_BLOCK')
self.assertEquals(ops[2].name, 'LOAD_CONST')
self.assertEquals(ops[2].arg, 0)
self.assertEquals(ops[3].name, 'END_FINALLY')
self.assertEquals(ops[4].name, 'LOAD_CONST')
self.assertEquals(ops[4].arg, 0)
self.assertEquals(ops[5].name, 'RETURN_VALUE')
def test_inplace(self):
code = ''.join(chr(c) for c in ([
0x7c, 0, 0, # 0 LOAD_FAST, arg=0,
0x7c, 0, 0, # 3 LOAD_FAST, arg=0,
0x4b, # 6 INPLACE_LSHIFT,
0x7d, 0, 0, # 7 STORE_FAST, arg=0,
0x7c, 0, 0, # 10 LOAD_FAST, arg=0,
0x7c, 0, 0, # 13 LOAD_FAST, arg=0,
0x4c, # 16 INPLACE_RSHIFT,
0x7d, 0, 0, # 17 STORE_FAST, arg=0,
0x7c, 0, 0, # 20 LOAD_FAST, arg=0,
0x7c, 0, 0, # 23 LOAD_FAST, arg=0,
0x37, # 26 INPLACE_ADD,
0x7d, 0, 0, # 27 STORE_FAST, arg=0,
0x7c, 0, 0, # 30 LOAD_FAST, arg=0,
0x7c, 0, 0, # 33 LOAD_FAST, arg=0,
0x38, # 36 INPLACE_SUBTRACT,
0x7d, 0, 0, # 37 STORE_FAST, arg=0,
0x64, 0, 0, # 40 LOAD_CONST, arg=0,
0x53, # 43 RETURN_VALUE
]))
ops = opcodes.dis(code, self.PYTHON_VERSION)
self.assertEquals(len(ops), 18)
self.assertEquals(ops[0].name, 'LOAD_FAST')
self.assertEquals(ops[0].arg, 0)
self.assertEquals(ops[1].name, 'LOAD_FAST')
self.assertEquals(ops[1].arg, 0)
self.assertEquals(ops[2].name, 'INPLACE_LSHIFT')
self.assertEquals(ops[3].name, 'STORE_FAST')
self.assertEquals(ops[3].arg, 0)
self.assertEquals(ops[4].name, 'LOAD_FAST')
self.assertEquals(ops[4].arg, 0)
self.assertEquals(ops[5].name, 'LOAD_FAST')
self.assertEquals(ops[5].arg, 0)
self.assertEquals(ops[6].name, 'INPLACE_RSHIFT')
self.assertEquals(ops[7].name, 'STORE_FAST')
self.assertEquals(ops[7].arg, 0)
self.assertEquals(ops[8].name, 'LOAD_FAST')
self.assertEquals(ops[8].arg, 0)
self.assertEquals(ops[9].name, 'LOAD_FAST')
self.assertEquals(ops[9].arg, 0)
self.assertEquals(ops[10].name, 'INPLACE_ADD')
self.assertEquals(ops[11].name, 'STORE_FAST')
self.assertEquals(ops[11].arg, 0)
self.assertEquals(ops[12].name, 'LOAD_FAST')
self.assertEquals(ops[12].arg, 0)
self.assertEquals(ops[13].name, 'LOAD_FAST')
self.assertEquals(ops[13].arg, 0)
self.assertEquals(ops[14].name, 'INPLACE_SUBTRACT')
self.assertEquals(ops[15].name, 'STORE_FAST')
self.assertEquals(ops[15].arg, 0)
self.assertEquals(ops[16].name, 'LOAD_CONST')
self.assertEquals(ops[16].arg, 0)
self.assertEquals(ops[17].name, 'RETURN_VALUE')
def test_list(self):
code = ''.join(chr(c) for c in ([
0x67, 0, 0, # 0 BUILD_LIST, arg=0,
0x7c, 0, 0, # 3 LOAD_FAST, arg=0,
0x44, # 6 GET_ITER,
0x5d, 12, 0, # 7 FOR_ITER, dest=22,
0x7d, 1, 0, # 10 STORE_FAST, arg=1,
0x7c, 1, 0, # 13 LOAD_FAST, arg=1,
0x5e, 2, 0, # 16 LIST_APPEND, arg=2,
0x71, 7, 0, # 19 JUMP_ABSOLUTE, dest=7,
0x01, # 22 POP_TOP,
0x64, 0, 0, # 23 LOAD_CONST, arg=0,
0x53, # 26 RETURN_VALUE
]))
ops = opcodes.dis(code, self.PYTHON_VERSION)
self.assertEquals(len(ops), 11)
self.assertEquals(ops[0].name, 'BUILD_LIST')
self.assertEquals(ops[0].arg, 0)
self.assertEquals(ops[1].name, 'LOAD_FAST')
self.assertEquals(ops[1].arg, 0)
self.assertEquals(ops[2].name, 'GET_ITER')
self.assertEquals(ops[3].name, 'FOR_ITER')
self.assertEquals(ops[3].arg, 8)
self.assertEquals(ops[3].target, ops[8])
self.assertEquals(ops[4].name, 'STORE_FAST')
self.assertEquals(ops[4].arg, 1)
self.assertEquals(ops[5].name, 'LOAD_FAST')
self.assertEquals(ops[5].arg, 1)
self.assertEquals(ops[6].name, 'LIST_APPEND')
self.assertEquals(ops[6].arg, 2)
self.assertEquals(ops[7].name, 'JUMP_ABSOLUTE')
self.assertEquals(ops[7].arg, 3)
self.assertEquals(ops[7].target, ops[3])
self.assertEquals(ops[8].name, 'POP_TOP')
self.assertEquals(ops[9].name, 'LOAD_CONST')
self.assertEquals(ops[9].arg, 0)
self.assertEquals(ops[10].name, 'RETURN_VALUE')
def test_loop(self):
code = ''.join(chr(c) for c in ([
0x78, 10, 0, # 0 SETUP_LOOP, dest=13,
0x74, 0, 0, # 3 LOAD_GLOBAL, arg=0,
0x72, 12, 0, # 6 POP_JUMP_IF_FALSE, dest=12,
0x71, 3, 0, # 9 JUMP_ABSOLUTE, dest=3,
0x57, # 12 POP_BLOCK,
0x64, 0, 0, # 13 LOAD_CONST, arg=0,
0x53, # 16 RETURN_VALUE
]))
ops = opcodes.dis(code, self.PYTHON_VERSION)
self.assertEquals(len(ops), 7)
self.assertEquals(ops[0].name, 'SETUP_LOOP')
self.assertEquals(ops[0].arg, 5)
self.assertEquals(ops[0].target, ops[5])
self.assertEquals(ops[1].name, 'LOAD_GLOBAL')
self.assertEquals(ops[1].arg, 0)
self.assertEquals(ops[2].name, 'POP_JUMP_IF_FALSE')
self.assertEquals(ops[2].arg, 4)
self.assertEquals(ops[2].target, ops[4])
self.assertEquals(ops[3].name, 'JUMP_ABSOLUTE')
self.assertEquals(ops[3].arg, 1)
self.assertEquals(ops[3].target, ops[1])
self.assertEquals(ops[4].name, 'POP_BLOCK')
self.assertEquals(ops[5].name, 'LOAD_CONST')
self.assertEquals(ops[5].arg, 0)
self.assertEquals(ops[6].name, 'RETURN_VALUE')
def test_raise_one(self):
code = ''.join(chr(c) for c in ([
0x64, 0, 0, # 0 LOAD_CONST, arg=0,
0x82, 1, 0, # 3 RAISE_VARARGS, arg=1,
0x64, 0, 0, # 6 LOAD_CONST, arg=0,
0x53, # 9 RETURN_VALUE
]))
ops = opcodes.dis(code, self.PYTHON_VERSION)
self.assertEquals(len(ops), 4)
self.assertEquals(ops[0].name, 'LOAD_CONST')
self.assertEquals(ops[0].arg, 0)
self.assertEquals(ops[1].name, 'RAISE_VARARGS')
self.assertEquals(ops[1].arg, 1)
self.assertEquals(ops[2].name, 'LOAD_CONST')
self.assertEquals(ops[2].arg, 0)
self.assertEquals(ops[3].name, 'RETURN_VALUE')
def test_unary(self):
code = ''.join(chr(c) for c in ([
0x7c, 0, 0, # 0 LOAD_FAST, arg=0,
0x0b, # 3 UNARY_NEGATIVE,
0x01, # 4 POP_TOP,
0x7c, 0, 0, # 5 LOAD_FAST, arg=0,
0x0f, # 8 UNARY_INVERT,
0x01, # 9 POP_TOP,
0x7c, 0, 0, # 10 LOAD_FAST, arg=0,
0x0a, # 13 UNARY_POSITIVE,
0x01, # 14 POP_TOP,
0x64, 0, 0, # 15 LOAD_CONST, arg=0,
0x53, # 18 RETURN_VALUE
]))
ops = opcodes.dis(code, self.PYTHON_VERSION)
self.assertEquals(len(ops), 11)
self.assertEquals(ops[0].name, 'LOAD_FAST')
self.assertEquals(ops[0].arg, 0)
self.assertEquals(ops[1].name, 'UNARY_NEGATIVE')
self.assertEquals(ops[2].name, 'POP_TOP')
self.assertEquals(ops[3].name, 'LOAD_FAST')
self.assertEquals(ops[3].arg, 0)
self.assertEquals(ops[4].name, 'UNARY_INVERT')
self.assertEquals(ops[5].name, 'POP_TOP')
self.assertEquals(ops[6].name, 'LOAD_FAST')
self.assertEquals(ops[6].arg, 0)
self.assertEquals(ops[7].name, 'UNARY_POSITIVE')
self.assertEquals(ops[8].name, 'POP_TOP')
self.assertEquals(ops[9].name, 'LOAD_CONST')
self.assertEquals(ops[9].arg, 0)
self.assertEquals(ops[10].name, 'RETURN_VALUE')
def test_with(self):
code = ''.join(chr(c) for c in ([
0x64, 0, 0, # 0 LOAD_CONST, arg=0,
0x8f, 5, 0, # 3 SETUP_WITH, dest=11,
0x01, # 6 POP_TOP,
0x57, # 7 POP_BLOCK,
0x64, 0, 0, # 8 LOAD_CONST, arg=0,
0x51, # 11 WITH_CLEANUP,
0x58, # 12 END_FINALLY,
0x64, 0, 0, # 13 LOAD_CONST, arg=0,
0x53, # 16 RETURN_VALUE
]))
ops = opcodes.dis(code, self.PYTHON_VERSION)
self.assertEquals(len(ops), 9)
self.assertEquals(ops[0].name, 'LOAD_CONST')
self.assertEquals(ops[0].arg, 0)
self.assertEquals(ops[1].name, 'SETUP_WITH')
self.assertEquals(ops[1].arg, 5)
self.assertEquals(ops[1].target, ops[5])
self.assertEquals(ops[2].name, 'POP_TOP')
self.assertEquals(ops[3].name, 'POP_BLOCK')
self.assertEquals(ops[4].name, 'LOAD_CONST')
self.assertEquals(ops[4].arg, 0)
self.assertEquals(ops[5].name, 'WITH_CLEANUP')
self.assertEquals(ops[6].name, 'END_FINALLY')
self.assertEquals(ops[7].name, 'LOAD_CONST')
self.assertEquals(ops[7].arg, 0)
self.assertEquals(ops[8].name, 'RETURN_VALUE')
class Python3Test(unittest.TestCase):
"""Test bytecodes.dis for Python 3 opcodes."""
PYTHON_VERSION = (3, 3, 0)
def dis(self, data):
return opcodes.dis(data, self.PYTHON_VERSION)
def test_pop_top(self):
self.assertEquals(self.dis('\x01')[0].name, 'POP_TOP')
def test_rot_two(self):
self.assertEquals(self.dis('\x02')[0].name, 'ROT_TWO')
def test_rot_three(self):
self.assertEquals(self.dis('\x03')[0].name, 'ROT_THREE')
def test_dup_top(self):
self.assertEquals(self.dis('\x04')[0].name, 'DUP_TOP')
def test_dup_top_two(self):
self.assertEquals(self.dis('\x05')[0].name, 'DUP_TOP_TWO')
def test_nop(self):
self.assertEquals(self.dis('\t')[0].name, 'NOP')
def test_unary_positive(self):
self.assertEquals(self.dis('\n')[0].name, 'UNARY_POSITIVE')
def test_unary_negative(self):
self.assertEquals(self.dis('\x0b')[0].name, 'UNARY_NEGATIVE')
def test_unary_not(self):
self.assertEquals(self.dis('\x0c')[0].name, 'UNARY_NOT')
def test_unary_invert(self):
self.assertEquals(self.dis('\x0f')[0].name, 'UNARY_INVERT')
def test_binary_power(self):
self.assertEquals(self.dis('\x13')[0].name, 'BINARY_POWER')
def test_binary_multiply(self):
self.assertEquals(self.dis('\x14')[0].name, 'BINARY_MULTIPLY')
def test_binary_modulo(self):
self.assertEquals(self.dis('\x16')[0].name, 'BINARY_MODULO')
def test_binary_add(self):
self.assertEquals(self.dis('\x17')[0].name, 'BINARY_ADD')
def test_binary_subtract(self):
self.assertEquals(self.dis('\x18')[0].name, 'BINARY_SUBTRACT')
def test_binary_subscr(self):
self.assertEquals(self.dis('\x19')[0].name, 'BINARY_SUBSCR')
def test_binary_floor_divide(self):
self.assertEquals(self.dis('\x1a')[0].name, 'BINARY_FLOOR_DIVIDE')
def test_binary_true_divide(self):
self.assertEquals(self.dis('\x1b')[0].name, 'BINARY_TRUE_DIVIDE')
def test_inplace_floor_divide(self):
self.assertEquals(self.dis('\x1c')[0].name, 'INPLACE_FLOOR_DIVIDE')
def test_inplace_true_divide(self):
self.assertEquals(self.dis('\x1d')[0].name, 'INPLACE_TRUE_DIVIDE')
def test_store_map(self):
self.assertEquals(self.dis('6')[0].name, 'STORE_MAP')
def test_inplace_add(self):
self.assertEquals(self.dis('7')[0].name, 'INPLACE_ADD')
def test_inplace_subtract(self):
self.assertEquals(self.dis('8')[0].name, 'INPLACE_SUBTRACT')
def test_inplace_multiply(self):
self.assertEquals(self.dis('9')[0].name, 'INPLACE_MULTIPLY')
def test_inplace_modulo(self):
self.assertEquals(self.dis(';')[0].name, 'INPLACE_MODULO')
def test_store_subscr(self):
self.assertEquals(self.dis('<')[0].name, 'STORE_SUBSCR')
def test_delete_subscr(self):
self.assertEquals(self.dis('=')[0].name, 'DELETE_SUBSCR')
def test_binary_lshift(self):
self.assertEquals(self.dis('>')[0].name, 'BINARY_LSHIFT')
def test_binary_rshift(self):
self.assertEquals(self.dis('?')[0].name, 'BINARY_RSHIFT')
def test_binary_and(self):
self.assertEquals(self.dis('@')[0].name, 'BINARY_AND')
def test_binary_xor(self):
self.assertEquals(self.dis('A')[0].name, 'BINARY_XOR')
def test_binary_or(self):
self.assertEquals(self.dis('B')[0].name, 'BINARY_OR')
def test_inplace_power(self):
self.assertEquals(self.dis('C')[0].name, 'INPLACE_POWER')
def test_get_iter(self):
self.assertEquals(self.dis('D')[0].name, 'GET_ITER')
def test_print_expr(self):
self.assertEquals(self.dis('F')[0].name, 'PRINT_EXPR')
def test_load_build_class(self):
self.assertEquals(self.dis('G')[0].name, 'LOAD_BUILD_CLASS')
def test_yield_from(self):
self.assertEquals(self.dis('H')[0].name, 'YIELD_FROM')
def test_inplace_lshift(self):
self.assertEquals(self.dis('K')[0].name, 'INPLACE_LSHIFT')
def test_inplace_rshift(self):
self.assertEquals(self.dis('L')[0].name, 'INPLACE_RSHIFT')
def test_inplace_and(self):
self.assertEquals(self.dis('M')[0].name, 'INPLACE_AND')
def test_inplace_xor(self):
self.assertEquals(self.dis('N')[0].name, 'INPLACE_XOR')
def test_inplace_or(self):
self.assertEquals(self.dis('O')[0].name, 'INPLACE_OR')
def test_break_loop(self):
self.assertEquals(self.dis('P')[0].name, 'BREAK_LOOP')
def test_with_cleanup(self):
self.assertEquals(self.dis('Q')[0].name, 'WITH_CLEANUP')
def test_return_value(self):
self.assertEquals(self.dis('S')[0].name, 'RETURN_VALUE')
def test_import_star(self):
self.assertEquals(self.dis('T')[0].name, 'IMPORT_STAR')
def test_yield_value(self):
self.assertEquals(self.dis('V')[0].name, 'YIELD_VALUE')
def test_pop_block(self):
self.assertEquals(self.dis('W')[0].name, 'POP_BLOCK')
def test_end_finally(self):
self.assertEquals(self.dis('X')[0].name, 'END_FINALLY')
def test_pop_except(self):
self.assertEquals(self.dis('Y')[0].name, 'POP_EXCEPT')
def test_store_name(self):
self.assertEquals(self.dis('Z\x00\x00')[0].name, 'STORE_NAME')
def test_delete_name(self):
self.assertEquals(self.dis('[\x00\x00')[0].name, 'DELETE_NAME')
def test_unpack_sequence(self):
self.assertEquals(self.dis('\\\x00\x00')[0].name, 'UNPACK_SEQUENCE')
def test_for_iter(self):
self.assertEquals(self.dis(']\x00\x00\t')[0].name, 'FOR_ITER')
def test_unpack_ex(self):
self.assertEquals(self.dis('^\x00\x00')[0].name, 'UNPACK_EX')
def test_store_attr(self):
self.assertEquals(self.dis('_\x00\x00')[0].name, 'STORE_ATTR')
def test_delete_attr(self):
self.assertEquals(self.dis('`\x00\x00')[0].name, 'DELETE_ATTR')
def test_store_global(self):
self.assertEquals(self.dis('a\x00\x00')[0].name, 'STORE_GLOBAL')
def test_delete_global(self):
self.assertEquals(self.dis('b\x00\x00')[0].name, 'DELETE_GLOBAL')
def test_load_const(self):
self.assertEquals(self.dis('d\x00\x00')[0].name, 'LOAD_CONST')
def test_load_name(self):
self.assertEquals(self.dis('e\x00\x00')[0].name, 'LOAD_NAME')
def test_build_tuple(self):
self.assertEquals(self.dis('f\x00\x00')[0].name, 'BUILD_TUPLE')
def test_build_list(self):
self.assertEquals(self.dis('g\x00\x00')[0].name, 'BUILD_LIST')
def test_build_set(self):
self.assertEquals(self.dis('h\x00\x00')[0].name, 'BUILD_SET')
def test_build_map(self):
self.assertEquals(self.dis('i\x00\x00')[0].name, 'BUILD_MAP')
def test_load_attr(self):
self.assertEquals(self.dis('j\x00\x00')[0].name, 'LOAD_ATTR')
def test_compare_op(self):
self.assertEquals(self.dis('k\x00\x00')[0].name, 'COMPARE_OP')
def test_import_name(self):
self.assertEquals(self.dis('l\x00\x00')[0].name, 'IMPORT_NAME')
def test_import_from(self):
self.assertEquals(self.dis('m\x00\x00')[0].name, 'IMPORT_FROM')
def test_jump_forward(self):
self.assertEquals(self.dis('n\x00\x00\t')[0].name, 'JUMP_FORWARD')
def test_jump_if_false_or_pop(self):
self.assertEquals(self.dis('o\x03\x00\t')[0].name, 'JUMP_IF_FALSE_OR_POP')
def test_jump_if_true_or_pop(self):
self.assertEquals(self.dis('p\x03\x00\t')[0].name, 'JUMP_IF_TRUE_OR_POP')
def test_jump_absolute(self):
self.assertEquals(self.dis('q\x03\x00\t')[0].name, 'JUMP_ABSOLUTE')
def test_pop_jump_if_false(self):
self.assertEquals(self.dis('r\x03\x00\t')[0].name, 'POP_JUMP_IF_FALSE')
def test_pop_jump_if_true(self):
self.assertEquals(self.dis('s\x03\x00\t')[0].name, 'POP_JUMP_IF_TRUE')
def test_load_global(self):
self.assertEquals(self.dis('t\x00\x00')[0].name, 'LOAD_GLOBAL')
def test_continue_loop(self):
self.assertEquals(self.dis('w\x03\x00\t')[0].name, 'CONTINUE_LOOP')
def test_setup_loop(self):
self.assertEquals(self.dis('x\x00\x00\t')[0].name, 'SETUP_LOOP')
def test_setup_except(self):
self.assertEquals(self.dis('y\x00\x00\t')[0].name, 'SETUP_EXCEPT')
def test_setup_finally(self):
self.assertEquals(self.dis('z\x00\x00\t')[0].name, 'SETUP_FINALLY')
def test_load_fast(self):
self.assertEquals(self.dis('|\x00\x00')[0].name, 'LOAD_FAST')
def test_store_fast(self):
self.assertEquals(self.dis('}\x00\x00')[0].name, 'STORE_FAST')
def test_delete_fast(self):
self.assertEquals(self.dis('~\x00\x00')[0].name, 'DELETE_FAST')
def test_raise_varargs(self):
self.assertEquals(self.dis('\x82\x00\x00')[0].name, 'RAISE_VARARGS')
def test_call_function(self):
self.assertEquals(self.dis('\x83\x00\x00')[0].name, 'CALL_FUNCTION')
def test_make_function(self):
self.assertEquals(self.dis('\x84\x00\x00')[0].name, 'MAKE_FUNCTION')
def test_build_slice(self):
self.assertEquals(self.dis('\x85\x00\x00')[0].name, 'BUILD_SLICE')
def test_make_closure(self):
self.assertEquals(self.dis('\x86\x00\x00')[0].name, 'MAKE_CLOSURE')
def test_load_closure(self):
self.assertEquals(self.dis('\x87\x00\x00')[0].name, 'LOAD_CLOSURE')
def test_load_deref(self):
self.assertEquals(self.dis('\x88\x00\x00')[0].name, 'LOAD_DEREF')
def test_store_deref(self):
self.assertEquals(self.dis('\x89\x00\x00')[0].name, 'STORE_DEREF')
def test_delete_deref(self):
self.assertEquals(self.dis('\x8a\x00\x00')[0].name, 'DELETE_DEREF')
def test_call_function_var(self):
self.assertEquals(self.dis('\x8c\x00\x00')[0].name, 'CALL_FUNCTION_VAR')
def test_call_function_kw(self):
self.assertEquals(self.dis('\x8d\x00\x00')[0].name, 'CALL_FUNCTION_KW')
def test_call_function_var_kw(self):
self.assertEquals(self.dis('\x8e\x00\x00')[0].name, 'CALL_FUNCTION_VAR_KW')
def test_setup_with(self):
self.assertEquals(self.dis('\x8f\x00\x00\t')[0].name, 'SETUP_WITH')
def test_list_append(self):
self.assertEquals(self.dis('\x91\x00\x00')[0].name, 'LIST_APPEND')
def test_set_add(self):
self.assertEquals(self.dis('\x92\x00\x00')[0].name, 'SET_ADD')
def test_map_add(self):
self.assertEquals(self.dis('\x93\x00\x00')[0].name, 'MAP_ADD')
def test_load_classderef(self):
self.assertEquals(self.dis('\x94\x00\x00')[0].name, 'LOAD_CLASSDEREF')
def test_binary(self):
code = ''.join(chr(c) for c in ([
0x7c, 0, 0, # 0 LOAD_FAST, arg=0,
0x7c, 0, 0, # 3 LOAD_FAST, arg=0,
0x17, # 6 BINARY_ADD,
0x01, # 7 POP_TOP,
0x7c, 0, 0, # 8 LOAD_FAST, arg=0,
0x7c, 0, 0, # 11 LOAD_FAST, arg=0,
0x14, # 14 BINARY_MULTIPLY,
0x01, # 15 POP_TOP,
0x7c, 0, 0, # 16 LOAD_FAST, arg=0,
0x7c, 0, 0, # 19 LOAD_FAST, arg=0,
0x16, # 22 BINARY_MODULO,
0x01, # 23 POP_TOP,
0x7c, 0, 0, # 24 LOAD_FAST, arg=0,
0x7c, 0, 0, # 27 LOAD_FAST, arg=0,
0x1b, # 30 BINARY_TRUE_DIVIDE,
0x01, # 31 POP_TOP,
0x64, 0, 0, # 32 LOAD_CONST, arg=0,
0x53, # 35 RETURN_VALUE
]))
ops = opcodes.dis(code, self.PYTHON_VERSION)
self.assertEquals(len(ops), 18)
self.assertEquals(ops[0].name, 'LOAD_FAST')
self.assertEquals(ops[0].arg, 0)
self.assertEquals(ops[1].name, 'LOAD_FAST')
self.assertEquals(ops[1].arg, 0)
self.assertEquals(ops[2].name, 'BINARY_ADD')
self.assertEquals(ops[3].name, 'POP_TOP')
self.assertEquals(ops[4].name, 'LOAD_FAST')
self.assertEquals(ops[4].arg, 0)
self.assertEquals(ops[5].name, 'LOAD_FAST')
self.assertEquals(ops[5].arg, 0)
self.assertEquals(ops[6].name, 'BINARY_MULTIPLY')
self.assertEquals(ops[7].name, 'POP_TOP')
self.assertEquals(ops[8].name, 'LOAD_FAST')
self.assertEquals(ops[8].arg, 0)
self.assertEquals(ops[9].name, 'LOAD_FAST')
self.assertEquals(ops[9].arg, 0)
self.assertEquals(ops[10].name, 'BINARY_MODULO')
self.assertEquals(ops[11].name, 'POP_TOP')
self.assertEquals(ops[12].name, 'LOAD_FAST')
self.assertEquals(ops[12].arg, 0)
self.assertEquals(ops[13].name, 'LOAD_FAST')
self.assertEquals(ops[13].arg, 0)
self.assertEquals(ops[14].name, 'BINARY_TRUE_DIVIDE')
self.assertEquals(ops[15].name, 'POP_TOP')
self.assertEquals(ops[16].name, 'LOAD_CONST')
self.assertEquals(ops[16].arg, 0)
self.assertEquals(ops[17].name, 'RETURN_VALUE')
def test_break(self):
code = ''.join(chr(c) for c in ([
0x78, 4, 0, # 0 SETUP_LOOP, dest=7,
0x50, # 3 BREAK_LOOP,
0x71, 3, 0, # 4 JUMP_ABSOLUTE, dest=3,
0x64, 0, 0, # 7 LOAD_CONST, arg=0,
0x53, # 10 RETURN_VALUE
]))
ops = opcodes.dis(code, self.PYTHON_VERSION)
self.assertEquals(len(ops), 5)
self.assertEquals(ops[0].name, 'SETUP_LOOP')
self.assertEquals(ops[0].arg, 3)
self.assertEquals(ops[0].target, ops[3])
self.assertEquals(ops[1].name, 'BREAK_LOOP')
self.assertEquals(ops[2].name, 'JUMP_ABSOLUTE')
self.assertEquals(ops[2].arg, 1)
self.assertEquals(ops[2].target, ops[1])
self.assertEquals(ops[3].name, 'LOAD_CONST')
self.assertEquals(ops[3].arg, 0)
self.assertEquals(ops[4].name, 'RETURN_VALUE')
def test_call(self):
code = ''.join(chr(c) for c in ([
0x74, 0, 0, # 0 LOAD_GLOBAL, arg=0,
0x83, 0, 0, # 3 CALL_FUNCTION, arg=0,
0x01, # 6 POP_TOP,
0x64, 0, 0, # 7 LOAD_CONST, arg=0,
0x53, # 10 RETURN_VALUE
]))
ops = opcodes.dis(code, self.PYTHON_VERSION)
self.assertEquals(len(ops), 5)
self.assertEquals(ops[0].name, 'LOAD_GLOBAL')
self.assertEquals(ops[0].arg, 0)
self.assertEquals(ops[1].name, 'CALL_FUNCTION')
self.assertEquals(ops[1].arg, 0)
self.assertEquals(ops[2].name, 'POP_TOP')
self.assertEquals(ops[3].name, 'LOAD_CONST')
self.assertEquals(ops[3].arg, 0)
self.assertEquals(ops[4].name, 'RETURN_VALUE')
def test_continue(self):
code = ''.join(chr(c) for c in ([
0x78, 6, 0, # 0 SETUP_LOOP, dest=9,
0x71, 3, 0, # 3 JUMP_ABSOLUTE, dest=3,
0x71, 3, 0, # 6 JUMP_ABSOLUTE, dest=3,
0x64, 0, 0, # 9 LOAD_CONST, arg=0,
0x53, # 12 RETURN_VALUE
]))
ops = opcodes.dis(code, self.PYTHON_VERSION)
self.assertEquals(len(ops), 5)
self.assertEquals(ops[0].name, 'SETUP_LOOP')
self.assertEquals(ops[0].arg, 3)
self.assertEquals(ops[0].target, ops[3])
self.assertEquals(ops[1].name, 'JUMP_ABSOLUTE')
self.assertEquals(ops[1].arg, 1)
self.assertEquals(ops[1].target, ops[1])
self.assertEquals(ops[2].name, 'JUMP_ABSOLUTE')
self.assertEquals(ops[2].arg, 1)
self.assertEquals(ops[2].target, ops[1])
self.assertEquals(ops[3].name, 'LOAD_CONST')
self.assertEquals(ops[3].arg, 0)
self.assertEquals(ops[4].name, 'RETURN_VALUE')
def test_except(self):
code = ''.join(chr(c) for c in ([
0x79, 4, 0, # 0 SETUP_EXCEPT, dest=7,
0x57, # 3 POP_BLOCK,
0x6e, 8, 0, # 4 JUMP_FORWARD, dest=15,
0x01, # 7 POP_TOP,
0x01, # 8 POP_TOP,
0x01, # 9 POP_TOP,
0x59, # 10 POP_EXCEPT,
0x6e, 1, 0, # 11 JUMP_FORWARD, dest=15,
0x58, # 14 END_FINALLY,
0x64, 0, 0, # 15 LOAD_CONST, arg=0,
0x53, # 18 RETURN_VALUE
]))
ops = opcodes.dis(code, self.PYTHON_VERSION)
self.assertEquals(len(ops), 11)
self.assertEquals(ops[0].name, 'SETUP_EXCEPT')
self.assertEquals(ops[0].arg, 3)
self.assertEquals(ops[0].target, ops[3])
self.assertEquals(ops[1].name, 'POP_BLOCK')
self.assertEquals(ops[2].name, 'JUMP_FORWARD')
self.assertEquals(ops[2].arg, 9)
self.assertEquals(ops[2].target, ops[9])
self.assertEquals(ops[3].name, 'POP_TOP')
self.assertEquals(ops[4].name, 'POP_TOP')
self.assertEquals(ops[5].name, 'POP_TOP')
self.assertEquals(ops[6].name, 'POP_EXCEPT')
self.assertEquals(ops[7].name, 'JUMP_FORWARD')
self.assertEquals(ops[7].arg, 9)
self.assertEquals(ops[7].target, ops[9])
self.assertEquals(ops[8].name, 'END_FINALLY')
self.assertEquals(ops[9].name, 'LOAD_CONST')
self.assertEquals(ops[9].arg, 0)
self.assertEquals(ops[10].name, 'RETURN_VALUE')
def test_finally(self):
code = ''.join(chr(c) for c in ([
0x7a, 4, 0, # 0 SETUP_FINALLY, dest=7,
0x57, # 3 POP_BLOCK,
0x64, 0, 0, # 4 LOAD_CONST, arg=0,
0x58, # 7 END_FINALLY,
0x64, 0, 0, # 8 LOAD_CONST, arg=0,
0x53, # 11 RETURN_VALUE
]))
ops = opcodes.dis(code, self.PYTHON_VERSION)
self.assertEquals(len(ops), 6)
self.assertEquals(ops[0].name, 'SETUP_FINALLY')
self.assertEquals(ops[0].arg, 3)
self.assertEquals(ops[0].target, ops[3])
self.assertEquals(ops[1].name, 'POP_BLOCK')
self.assertEquals(ops[2].name, 'LOAD_CONST')
self.assertEquals(ops[2].arg, 0)
self.assertEquals(ops[3].name, 'END_FINALLY')
self.assertEquals(ops[4].name, 'LOAD_CONST')
self.assertEquals(ops[4].arg, 0)
self.assertEquals(ops[5].name, 'RETURN_VALUE')
def test_inplace(self):
code = ''.join(chr(c) for c in ([
0x7c, 0, 0, # 0 LOAD_FAST, arg=0,
0x7c, 0, 0, # 3 LOAD_FAST, arg=0,
0x4b, # 6 INPLACE_LSHIFT,
0x7d, 0, 0, # 7 STORE_FAST, arg=0,
0x7c, 0, 0, # 10 LOAD_FAST, arg=0,
0x7c, 0, 0, # 13 LOAD_FAST, arg=0,
0x4c, # 16 INPLACE_RSHIFT,
0x7d, 0, 0, # 17 STORE_FAST, arg=0,
0x7c, 0, 0, # 20 LOAD_FAST, arg=0,
0x7c, 0, 0, # 23 LOAD_FAST, arg=0,
0x37, # 26 INPLACE_ADD,
0x7d, 0, 0, # 27 STORE_FAST, arg=0,
0x7c, 0, 0, # 30 LOAD_FAST, arg=0,
0x7c, 0, 0, # 33 LOAD_FAST, arg=0,
0x38, # 36 INPLACE_SUBTRACT,
0x7d, 0, 0, # 37 STORE_FAST, arg=0,
0x64, 0, 0, # 40 LOAD_CONST, arg=0,
0x53, # 43 RETURN_VALUE
]))
ops = opcodes.dis(code, self.PYTHON_VERSION)
self.assertEquals(len(ops), 18)
self.assertEquals(ops[0].name, 'LOAD_FAST')
self.assertEquals(ops[0].arg, 0)
self.assertEquals(ops[1].name, 'LOAD_FAST')
self.assertEquals(ops[1].arg, 0)
self.assertEquals(ops[2].name, 'INPLACE_LSHIFT')
self.assertEquals(ops[3].name, 'STORE_FAST')
self.assertEquals(ops[3].arg, 0)
self.assertEquals(ops[4].name, 'LOAD_FAST')
self.assertEquals(ops[4].arg, 0)
self.assertEquals(ops[5].name, 'LOAD_FAST')
self.assertEquals(ops[5].arg, 0)
self.assertEquals(ops[6].name, 'INPLACE_RSHIFT')
self.assertEquals(ops[7].name, 'STORE_FAST')
self.assertEquals(ops[7].arg, 0)
self.assertEquals(ops[8].name, 'LOAD_FAST')
self.assertEquals(ops[8].arg, 0)
self.assertEquals(ops[9].name, 'LOAD_FAST')
self.assertEquals(ops[9].arg, 0)
self.assertEquals(ops[10].name, 'INPLACE_ADD')
self.assertEquals(ops[11].name, 'STORE_FAST')
self.assertEquals(ops[11].arg, 0)
self.assertEquals(ops[12].name, 'LOAD_FAST')
self.assertEquals(ops[12].arg, 0)
self.assertEquals(ops[13].name, 'LOAD_FAST')
self.assertEquals(ops[13].arg, 0)
self.assertEquals(ops[14].name, 'INPLACE_SUBTRACT')
self.assertEquals(ops[15].name, 'STORE_FAST')
self.assertEquals(ops[15].arg, 0)
self.assertEquals(ops[16].name, 'LOAD_CONST')
self.assertEquals(ops[16].arg, 0)
self.assertEquals(ops[17].name, 'RETURN_VALUE')
def test_list(self):
code = ''.join(chr(c) for c in ([
0x64, 1, 0, # 0 LOAD_CONST, arg=1,
0x64, 2, 0, # 3 LOAD_CONST, arg=2,
0x84, 0, 0, # 6 MAKE_FUNCTION, arg=0,
0x7c, 0, 0, # 9 LOAD_FAST, arg=0,
0x44, # 12 GET_ITER,
0x83, 1, 0, # 13 CALL_FUNCTION, arg=1,
0x01, # 16 POP_TOP,
0x64, 0, 0, # 17 LOAD_CONST, arg=0,
0x53, # 20 RETURN_VALUE
]))
ops = opcodes.dis(code, self.PYTHON_VERSION)
self.assertEquals(len(ops), 9)
self.assertEquals(ops[0].name, 'LOAD_CONST')
self.assertEquals(ops[0].arg, 1)
self.assertEquals(ops[1].name, 'LOAD_CONST')
self.assertEquals(ops[1].arg, 2)
self.assertEquals(ops[2].name, 'MAKE_FUNCTION')
self.assertEquals(ops[2].arg, 0)
self.assertEquals(ops[3].name, 'LOAD_FAST')
self.assertEquals(ops[3].arg, 0)
self.assertEquals(ops[4].name, 'GET_ITER')
self.assertEquals(ops[5].name, 'CALL_FUNCTION')
self.assertEquals(ops[5].arg, 1)
self.assertEquals(ops[6].name, 'POP_TOP')
self.assertEquals(ops[7].name, 'LOAD_CONST')
self.assertEquals(ops[7].arg, 0)
self.assertEquals(ops[8].name, 'RETURN_VALUE')
def test_loop(self):
code = ''.join(chr(c) for c in ([
0x78, 3, 0, # 0 SETUP_LOOP, dest=6,
0x71, 3, 0, # 3 JUMP_ABSOLUTE, dest=3,
0x64, 0, 0, # 6 LOAD_CONST, arg=0,
0x53, # 9 RETURN_VALUE
]))
ops = opcodes.dis(code, self.PYTHON_VERSION)
self.assertEquals(len(ops), 4)
self.assertEquals(ops[0].name, 'SETUP_LOOP')
self.assertEquals(ops[0].arg, 2)
self.assertEquals(ops[0].target, ops[2])
self.assertEquals(ops[1].name, 'JUMP_ABSOLUTE')
self.assertEquals(ops[1].arg, 1)
self.assertEquals(ops[1].target, ops[1])
self.assertEquals(ops[2].name, 'LOAD_CONST')
self.assertEquals(ops[2].arg, 0)
self.assertEquals(ops[3].name, 'RETURN_VALUE')
def test_raise_zero(self):
code = ''.join(chr(c) for c in ([
0x82, 0, 0, # 0 RAISE_VARARGS, arg=0,
0x64, 0, 0, # 3 LOAD_CONST, arg=0,
0x53, # 6 RETURN_VALUE
]))
ops = opcodes.dis(code, self.PYTHON_VERSION)
self.assertEquals(len(ops), 3)
self.assertEquals(ops[0].name, 'RAISE_VARARGS')
self.assertEquals(ops[0].arg, 0)
self.assertEquals(ops[1].name, 'LOAD_CONST')
self.assertEquals(ops[1].arg, 0)
self.assertEquals(ops[2].name, 'RETURN_VALUE')
def test_raise_one(self):
code = ''.join(chr(c) for c in ([
0x64, 0, 0, # 0 LOAD_CONST, arg=0,
0x82, 1, 0, # 3 RAISE_VARARGS, arg=1,
0x64, 0, 0, # 6 LOAD_CONST, arg=0,
0x53, # 9 RETURN_VALUE
]))
ops = opcodes.dis(code, self.PYTHON_VERSION)
self.assertEquals(len(ops), 4)
self.assertEquals(ops[0].name, 'LOAD_CONST')
self.assertEquals(ops[0].arg, 0)
self.assertEquals(ops[1].name, 'RAISE_VARARGS')
self.assertEquals(ops[1].arg, 1)
self.assertEquals(ops[2].name, 'LOAD_CONST')
self.assertEquals(ops[2].arg, 0)
self.assertEquals(ops[3].name, 'RETURN_VALUE')
def test_raise_two(self):
code = ''.join(chr(c) for c in ([
0x74, 0, 0, # 0 LOAD_GLOBAL, arg=0,
0x74, 1, 0, # 3 LOAD_GLOBAL, arg=1,
0x82, 2, 0, # 6 RAISE_VARARGS, arg=2,
0x64, 0, 0, # 9 LOAD_CONST, arg=0,
0x53, # 12 RETURN_VALUE
]))
ops = opcodes.dis(code, self.PYTHON_VERSION)
self.assertEquals(len(ops), 5)
self.assertEquals(ops[0].name, 'LOAD_GLOBAL')
self.assertEquals(ops[0].arg, 0)
self.assertEquals(ops[1].name, 'LOAD_GLOBAL')
self.assertEquals(ops[1].arg, 1)
self.assertEquals(ops[2].name, 'RAISE_VARARGS')
self.assertEquals(ops[2].arg, 2)
self.assertEquals(ops[3].name, 'LOAD_CONST')
self.assertEquals(ops[3].arg, 0)
self.assertEquals(ops[4].name, 'RETURN_VALUE')
def test_raise_three(self):
code = ''.join(chr(c) for c in ([
0x74, 0, 0, # 0 LOAD_GLOBAL, arg=0,
0x74, 1, 0, # 3 LOAD_GLOBAL, arg=1,
0x64, 1, 0, # 6 LOAD_CONST, arg=1,
0x82, 3, 0, # 9 RAISE_VARARGS, arg=3,
0x64, 0, 0, # 12 LOAD_CONST, arg=0,
0x53, # 15 RETURN_VALUE
]))
ops = opcodes.dis(code, self.PYTHON_VERSION)
self.assertEquals(len(ops), 6)
self.assertEquals(ops[0].name, 'LOAD_GLOBAL')
self.assertEquals(ops[0].arg, 0)
self.assertEquals(ops[1].name, 'LOAD_GLOBAL')
self.assertEquals(ops[1].arg, 1)
self.assertEquals(ops[2].name, 'LOAD_CONST')
self.assertEquals(ops[2].arg, 1)
self.assertEquals(ops[3].name, 'RAISE_VARARGS')
self.assertEquals(ops[3].arg, 3)
self.assertEquals(ops[4].name, 'LOAD_CONST')
self.assertEquals(ops[4].arg, 0)
self.assertEquals(ops[5].name, 'RETURN_VALUE')
def test_unary(self):
code = ''.join(chr(c) for c in ([
0x7c, 0, 0, # 0 LOAD_FAST, arg=0,
0x0b, # 3 UNARY_NEGATIVE,
0x01, # 4 POP_TOP,
0x7c, 0, 0, # 5 LOAD_FAST, arg=0,
0x0f, # 8 UNARY_INVERT,
0x01, # 9 POP_TOP,
0x7c, 0, 0, # 10 LOAD_FAST, arg=0,
0x0a, # 13 UNARY_POSITIVE,
0x01, # 14 POP_TOP,
0x64, 0, 0, # 15 LOAD_CONST, arg=0,
0x53, # 18 RETURN_VALUE
]))
ops = opcodes.dis(code, self.PYTHON_VERSION)
self.assertEquals(len(ops), 11)
self.assertEquals(ops[0].name, 'LOAD_FAST')
self.assertEquals(ops[0].arg, 0)
self.assertEquals(ops[1].name, 'UNARY_NEGATIVE')
self.assertEquals(ops[2].name, 'POP_TOP')
self.assertEquals(ops[3].name, 'LOAD_FAST')
self.assertEquals(ops[3].arg, 0)
self.assertEquals(ops[4].name, 'UNARY_INVERT')
self.assertEquals(ops[5].name, 'POP_TOP')
self.assertEquals(ops[6].name, 'LOAD_FAST')
self.assertEquals(ops[6].arg, 0)
self.assertEquals(ops[7].name, 'UNARY_POSITIVE')
self.assertEquals(ops[8].name, 'POP_TOP')
self.assertEquals(ops[9].name, 'LOAD_CONST')
self.assertEquals(ops[9].arg, 0)
self.assertEquals(ops[10].name, 'RETURN_VALUE')
def test_with(self):
code = ''.join(chr(c) for c in ([
0x64, 0, 0, # 0 LOAD_CONST, arg=0,
0x8f, 5, 0, # 3 SETUP_WITH, dest=11,
0x01, # 6 POP_TOP,
0x57, # 7 POP_BLOCK,
0x64, 0, 0, # 8 LOAD_CONST, arg=0,
0x51, # 11 WITH_CLEANUP,
0x58, # 12 END_FINALLY,
0x64, 0, 0, # 13 LOAD_CONST, arg=0,
0x53, # 16 RETURN_VALUE
]))
ops = opcodes.dis(code, self.PYTHON_VERSION)
self.assertEquals(len(ops), 9)
self.assertEquals(ops[0].name, 'LOAD_CONST')
self.assertEquals(ops[0].arg, 0)
self.assertEquals(ops[1].name, 'SETUP_WITH')
self.assertEquals(ops[1].arg, 5)
self.assertEquals(ops[1].target, ops[5])
self.assertEquals(ops[2].name, 'POP_TOP')
self.assertEquals(ops[3].name, 'POP_BLOCK')
self.assertEquals(ops[4].name, 'LOAD_CONST')
self.assertEquals(ops[4].arg, 0)
self.assertEquals(ops[5].name, 'WITH_CLEANUP')
self.assertEquals(ops[6].name, 'END_FINALLY')
self.assertEquals(ops[7].name, 'LOAD_CONST')
self.assertEquals(ops[7].arg, 0)
self.assertEquals(ops[8].name, 'RETURN_VALUE')
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -2,192,197,559,645,915,400 | 35.034364 | 79 | 0.625424 | false |
ikargis/horizon_fod | openstack_dashboard/api/glance.py | 1 | 3362 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import itertools
import logging
import thread
import urlparse
from django.conf import settings # noqa
import glanceclient as glance_client
from horizon.utils import functions as utils
from openstack_dashboard.api import base
LOG = logging.getLogger(__name__)
def glanceclient(request):
o = urlparse.urlparse(base.url_for(request, 'image'))
url = "://".join((o.scheme, o.netloc))
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None)
LOG.debug('glanceclient connection created using token "%s" and url "%s"'
% (request.user.token.id, url))
return glance_client.Client('1', url, token=request.user.token.id,
insecure=insecure, cacert=cacert)
def image_delete(request, image_id):
return glanceclient(request).images.delete(image_id)
def image_get(request, image_id):
"""Returns an Image object populated with metadata for image
with supplied identifier.
"""
image = glanceclient(request).images.get(image_id)
if not hasattr(image, 'name'):
image.name = None
return image
def image_list_detailed(request, marker=None, filters=None, paginate=False):
limit = getattr(settings, 'API_RESULT_LIMIT', 1000)
page_size = utils.get_page_size(request)
if paginate:
request_size = page_size + 1
else:
request_size = limit
kwargs = {'filters': filters or {}}
if marker:
kwargs['marker'] = marker
images_iter = glanceclient(request).images.list(page_size=request_size,
limit=limit,
**kwargs)
has_more_data = False
if paginate:
images = list(itertools.islice(images_iter, request_size))
if len(images) > page_size:
images.pop(-1)
has_more_data = True
else:
images = list(images_iter)
return (images, has_more_data)
def image_update(request, image_id, **kwargs):
return glanceclient(request).images.update(image_id, **kwargs)
def image_create(request, **kwargs):
copy_from = None
if kwargs.get('copy_from'):
copy_from = kwargs.pop('copy_from')
image = glanceclient(request).images.create(**kwargs)
if copy_from:
thread.start_new_thread(image_update,
(request, image.id),
{'copy_from': copy_from})
return image
| apache-2.0 | 2,677,192,707,964,886,500 | 29.844037 | 78 | 0.649018 | false |
alex/changes | changes/api/project_test_history.py | 1 | 2669 | from __future__ import absolute_import, division, unicode_literals
from flask.ext.restful import reqparse
from sqlalchemy.orm import contains_eager, joinedload
from changes.api.base import APIView
from changes.constants import Status
from changes.models import Project, TestCase, Job, Source
class ProjectTestHistoryAPIView(APIView):
get_parser = reqparse.RequestParser()
get_parser.add_argument('per_page', type=int, location='args',
default=100)
def get(self, project_id, test_hash):
project = Project.get(project_id)
if not project:
return '', 404
# use the most recent test run to find basic details
test = TestCase.query.filter(
TestCase.project_id == project_id,
TestCase.name_sha == test_hash,
).order_by(TestCase.date_created.desc()).limit(1).first()
if not test:
return '', 404
args = self.get_parser.parse_args()
num_results = args.per_page
# restrict the join to the last N jobs otherwise this can get
# significantly expensive as we have to seek quite a ways
job_sq = Job.query.filter(
Job.status == Status.finished,
Job.project_id == project_id,
).order_by(Job.date_created.desc()).limit(num_results * 10).subquery()
recent_runs = list(TestCase.query.options(
contains_eager('job', alias=job_sq),
contains_eager('job.source'),
joinedload('job.build'),
joinedload('job.build.author'),
joinedload('job.build.source'),
joinedload('job.build.source.revision'),
).join(
job_sq, TestCase.job_id == job_sq.c.id,
).join(
Source, job_sq.c.source_id == Source.id,
).filter(
Source.repository_id == project.repository_id,
Source.patch_id == None, # NOQA
Source.revision_sha != None, # NOQA
TestCase.name_sha == test.name_sha,
).order_by(job_sq.c.date_created.desc())[:num_results])
jobs = set(r.job for r in recent_runs)
builds = set(j.build for j in jobs)
serialized_jobs = dict(zip(jobs, self.serialize(jobs)))
serialized_builds = dict(zip(builds, self.serialize(builds)))
results = []
for recent_run, s_recent_run in zip(recent_runs, self.serialize(recent_runs)):
s_recent_run['job'] = serialized_jobs[recent_run.job]
s_recent_run['job']['build'] = serialized_builds[recent_run.job.build]
results.append(s_recent_run)
return self.respond(results, serialize=False)
| apache-2.0 | 2,706,287,891,616,317,400 | 37.128571 | 86 | 0.606969 | false |
masfaraud/volmdlr | scripts/distance/tore_tore.py | 1 | 4675 | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 2 14:15:50 2020
@author: Mack Pro
"""
import numpy as npy
import volmdlr as volmdlr
import volmdlr.primitives3D as primitives3D
import volmdlr.primitives2D as primitives2D
import matplotlib.pyplot as plt
import random
import math
rmin, rmax = 100, 1000
posmin, posmax = -100, 100
x1, y1, z1 = random.randrange(posmin, posmax, 1)/100, random.randrange(posmin, posmax, 1)/100, random.randrange(posmin, posmax, 1)/100
x2, y2, z2 = random.randrange(posmin, posmax, 1)/100, random.randrange(posmin, posmax, 1)/100, random.randrange(posmin, posmax, 1)/100
R1, R2 = random.randrange(rmin, rmax, 1)/1000, random.randrange(rmin, rmax, 1)/1000 #Radius of the generative arc3D
r1, r2 = random.randrange(rmin/10, rmax/10, 1)/1000, random.randrange(rmin/10, rmax/10, 1)/1000 #Radius of the arc3d generated
c1, c2 = volmdlr.Point3D([x1,y1,z1]), volmdlr.Point3D([x2,y2,z2]) #Choose the coordinate of the center
x3, y3, z3 = random.randrange(posmin, posmax, 1)/100, random.randrange(posmin, posmax, 1)/100, random.randrange(posmin, posmax, 1)/100
x4, y4, z4 = random.randrange(posmin, posmax, 1)/100, random.randrange(posmin, posmax, 1)/100, random.randrange(posmin, posmax, 1)/100
n1, n2 = volmdlr.Vector3D([x3,y3,z3]), volmdlr.Vector3D([x4,y4,z4]) #Choose the normal
n1.Normalize() #Normalize the normal if it is not the case
n2.Normalize()
plane1, plane2 = volmdlr.Plane3D.from_normal(c1, n1), volmdlr.Plane3D.from_normal(c2, n2) #Create a plane to give us two others vector
frame1 = volmdlr.Frame3D(c1, plane1.vectors[0], plane1.vectors[1], n1) #Frame in the center of the Tore
frame2 = volmdlr.Frame3D(c2, plane2.vectors[0], plane2.vectors[1], n2)
toresurface1 = volmdlr.ToroidalSurface3D(frame1, R1, r1)
toresurface2 = volmdlr.ToroidalSurface3D(frame2, R2, r2)
angle_min, angle_max = 0, 2*3.14*100
theta1 = random.randrange(angle_min, angle_max, 20)/100 #Tore's length
phi1 = 2*math.pi #angle of circle
offset_theta1 = random.randrange(angle_min, angle_max, 20)/100 #Theta's offset if you want to turn it with normal's reference
offset_phi1 = random.randrange(angle_min, angle_max, 20)/100 #Idem but with circle's normal
print('param1', phi1, theta1, offset_phi1, offset_theta1)
#You have to create a cutting pattern in 2D
pt1, pt2, pt3, pt4 = volmdlr.Point2D((offset_theta1, offset_phi1)), volmdlr.Point2D((offset_theta1, offset_phi1+phi1)), volmdlr.Point2D((offset_theta1+theta1, offset_phi1+phi1)), volmdlr.Point2D((offset_theta1+theta1, offset_phi1))
seg1, seg2, seg3, seg4 = volmdlr.LineSegment2D(pt1, pt2), volmdlr.LineSegment2D(pt2, pt3), volmdlr.LineSegment2D(pt3, pt4), volmdlr.LineSegment2D(pt4, pt1)
edges = [seg1, seg2, seg3, seg4]
contours2d = [volmdlr.Contour2D(edges)]
points = [theta1, phi1]
theta2 = random.randrange(angle_min, angle_max, 20)/100 #Tore's length
phi2 = random.randrange(angle_min, angle_max, 20)/100 #angle of circle
offset_theta2 = random.randrange(angle_min, angle_max, 20)/100 #Theta's offset if you want to turn it with normal's reference
offset_phi2 = random.randrange(angle_min, angle_max, 20)/100 #Idem but with circle's normal
print('param2', phi2, theta2, offset_phi2, offset_theta2)
#You have to create a cutting pattern in 2D
pt1_2, pt2_2, pt3_2, pt4_2 = volmdlr.Point2D((offset_theta2, offset_phi2)), volmdlr.Point2D((offset_theta2, offset_phi2+phi2)), volmdlr.Point2D((offset_theta2+theta2, offset_phi2+phi2)), volmdlr.Point2D((offset_theta2+theta2, offset_phi2))
seg1_2, seg2_2, seg3_2, seg4_2 = volmdlr.LineSegment2D(pt1_2, pt2_2), volmdlr.LineSegment2D(pt2_2, pt3_2), volmdlr.LineSegment2D(pt3_2, pt4_2), volmdlr.LineSegment2D(pt4_2, pt1_2)
edges_2 = [seg1_2, seg2_2, seg3_2, seg4_2]
contours2d_2 = [volmdlr.Contour2D(edges_2)]
points_2 = [theta2, phi2]
toroidalface1 = volmdlr.ToroidalFace3D(contours2d, toresurface1, points)
toroidalface2 = volmdlr.ToroidalFace3D(contours2d_2, toresurface2, points_2)
pts1, tangle1 = toroidalface1.triangulation(resolution=10)
pts2, tangle2 = toroidalface2.triangulation(resolution=10)
p1, p2 = toroidalface1.minimum_distance_points_tore(toroidalface2)
print('p1, p2', p1,p2)
print(p1.point_distance(p2))
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# [pt.MPLPlot(ax=ax) for pt in pts1]
# [pt.MPLPlot(ax=ax) for pt in pts2]
# p1.MPLPlot(ax=ax, color='r')
# p2.MPLPlot(ax=ax, color='b')
# toroidalface1.start.MPLPlot(ax=ax, color='m')
# toroidalface2.start.MPLPlot(ax=ax, color='g')
# LS = volmdlr.LineSegment3D(p1, p2)
shell = volmdlr.Shell3D([toroidalface1,toroidalface2])
vol = volmdlr.VolumeModel([shell, p1, p2])
vol.babylonjs_from_script()
# m = volmdlr.VolumeModel([shell])
# m.babylonjs()
| gpl-3.0 | 296,104,524,173,989,760 | 45.75 | 239 | 0.73861 | false |
thinkobscure/PantheROV | topside/SDL/endian.py | 1 | 1027 | #!/usr/bin/env python
'''Functions for converting to native byte order
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import sys
import SDL.constants
def SDL_Swap16(x):
return (x << 8 & 0xff00) | \
(x >> 8 & 0x00ff)
def SDL_Swap32(x):
return (x << 24 & 0xff000000) | \
(x << 8 & 0x00ff0000) | \
(x >> 8 & 0x0000ff00) | \
(x >> 24 & 0x000000ff)
def SDL_Swap64(x):
return (SDL_Swap32(x & 0xffffffff) << 32) | \
(SDL_Swap32(x >> 32 & 0xffffffff))
def _noop(x):
return x
if sys.byteorder == 'big':
SDL_BYTEORDER = SDL.constants.SDL_BIG_ENDIAN
SDL_SwapLE16 = SDL_Swap16
SDL_SwapLE32 = SDL_Swap32
SDL_SwapLE64 = SDL_Swap64
SDL_SwapBE16 = _noop
SDL_SwapBE32 = _noop
SDL_SwapBE64 = _noop
else:
SDL_BYTEORDER = SDL.constants.SDL_LIL_ENDIAN
SDL_SwapLE16 = _noop
SDL_SwapLE32 = _noop
SDL_SwapLE64 = _noop
SDL_SwapBE16 = SDL_Swap16
SDL_SwapBE32 = SDL_Swap32
SDL_SwapBE64 = SDL_Swap64
| gpl-3.0 | -3,626,432,104,365,442,600 | 21.326087 | 49 | 0.584226 | false |
gmariotti/lassim | source/core/lassim_context.py | 1 | 4587 | from logging import Logger
from typing import Dict, Type, Callable, List, Optional
import psutil
from core.base_solution import BaseSolution
from core.core_system import CoreSystem
from core.utilities.type_aliases import Vector
__author__ = "Guido Pio Mariotti"
__copyright__ = "Copyright (C) 2016 Guido Pio Mariotti"
__license__ = "GNU General Public License v3.0"
__version__ = "0.2.0"
class LassimContext:
"""
Represents the context of the current optimization. Should allow dependency
injection of common parameters, like the class that represents the
solutions, the ode function to use, ..
"""
def __init__(self, core: CoreSystem, primary_opt: List['OptimizationArgs'],
ode_fun: Callable[..., Vector], pert_fun: Callable[..., float],
solution_class: Type[BaseSolution],
secondary_opt: List['OptimizationArgs'] = None):
self.__core_system = core
if len(primary_opt) == 0:
raise ValueError("Primary optimization list can't be empty")
self.__primary_opt = primary_opt
self.__ode_function = ode_fun
self.__pert_function = pert_fun
self.__solution_class = solution_class
self.__secondary_opt = list()
if secondary_opt is not None:
self.__secondary_opt = secondary_opt
@property
def core(self) -> CoreSystem:
return self.__core_system
@property
def primary_opts(self) -> List['OptimizationArgs']:
# recreate the list in order to not allowing the possibility to modify
# the main one
return [val for val in self.__primary_opt]
@property
def primary_first(self) -> 'OptimizationArgs':
return self.primary_opts[0]
@property
def secondary_opts(self) -> List['OptimizationArgs']:
return [val for val in self.__secondary_opt]
# FIXME - use my Optional
@property
def secondary_first(self) -> Optional['OptimizationArgs']:
if len(self.secondary_opts) > 0:
return self.secondary_opts[0]
else:
return None
@property
def ode(self) -> Callable[..., Vector]:
return self.__ode_function
@property
def perturbation(self) -> Callable[..., float]:
return self.__pert_function
@property
def SolutionClass(self) -> Type[BaseSolution]:
return self.__solution_class
def __str__(self) -> str:
# TODO
return "LassimContext"
__repr__ = __str__
class OptimizationArgs:
"""
This class represents the list of arguments for an optimization. Except for
the number of cores, each argument is read-only and is initialized at class
instantiation.
"""
def __init__(self, opt_type: str, params: Dict, num_cores: int,
evolutions: int, individuals: int, pert_factor: float):
self.__type = opt_type
self.__params = params
self.__islands = num_cores
self.__evolutions = evolutions
self.__individuals = individuals
self.__pert_factor = pert_factor
@property
def type(self) -> str:
return self.__type
@property
def params(self) -> Dict:
return self.__params
@property
def num_islands(self) -> int:
return self.__islands
@num_islands.setter
def num_islands(self, num_islands: int):
# if the number is less than one, then use all the CPUs available
if num_islands < 1:
self.__islands = psutil.cpu_count
self.__islands = num_islands
@property
def num_evolutions(self) -> int:
return self.__evolutions
@property
def num_individuals(self) -> int:
return self.__individuals
@property
def pert_factor(self) -> float:
return self.__pert_factor
def log_args(self, logger: Logger, is_pert: bool = False):
"""
Used to log the optimization arguments inserted by the user.
:param logger: the logging object to use
:param is_pert: if the presence of the perturbations factor has to be
logged or not.
"""
logger.info("Algorithm used is {}".format(self.__type))
logger.info("Number of cores is {}".format(self.__islands))
logger.info("Number of evolutions for archipelago is {}".format(
self.__evolutions
))
logger.info("Number of individuals for each island is {}".format(
self.__individuals
))
if is_pert:
logger.info("Perturbations factor is {}".format(self.__pert_factor))
| gpl-3.0 | 5,525,869,121,874,791,000 | 30.417808 | 80 | 0.615871 | false |
jingriver/stocktracker | pytoolkit/randombitmap/bitmap.py | 1 | 1072 | from PIL import Image
import random
size = (640, 640)
black = (0,0,0)
white = (255,255,255)
def draw(size):
im = Image.new("RGB", size)
ll = []
for i in range(size[0]):
for j in range(size[1]):
if random.random()>0.5:
ll.append(white)
else:
ll.append(black)
im.putdata(ll)
im.show()
im.save("1.png")
def drawColor(size):
im = Image.new("RGB", size)
ll = []
for i in range(size[0]):
for j in range(size[1]):
ll.append((random.randint(1,255),random.randint(1, 255),random.randint(1,255)))
im.putdata(ll)
im.show()
im.save("2.png")
def drawStyle(size):
im = Image.new("RGB", size)
ll = []
for i in range(size[0]):
for j in range(size[1]):
c = (i+j)%255
ll.append((i%255,c,j%255))
im.putdata(ll)
im.show()
im.save("3.png")
if __name__ == "__main__":
draw(size)
drawColor(size)
drawStyle(size) | mit | 6,033,678,608,516,749,000 | 20.375 | 91 | 0.476679 | false |
wangkua1/sportvu | sportvu/data/extractor.py | 1 | 17452 | from __future__ import division
import cPickle as pickle
import yaml
import os
from sportvu import data
import numpy as np
import yaml
from utils import (pictorialize_team, pictorialize_fast,
make_3teams_11players, make_reference, scale_last_dim)
game_dir = data.constant.game_dir
class ExtractorException(Exception):
pass
class BaseExtractor(object):
"""base class for sequence extraction
Input: a truncated Event
Output: classifier model input
Simplest possible parametrization, a collapsed image of the full court
Options (all 3 can be used in conjunction):
-d0flip
-d1flip
-jitter (x,y)
returns a 3 channel image of (ball, offense, defense)
-> resolves possession: which side of the court it's one
"""
def __init__(self, f_config):
self.augment = True
if type(f_config) == str:
self.config = yaml.load(open(f_config, 'rb'))['extractor_config']
else:
self.config = f_config['extractor_config']
def extract_raw(self, event, dont_resolve_basket=False):
"""
"""
##
moments = event.moments
off_is_home = event.is_home_possession(moments[len(moments) // 2])
ball, offense, defense = [[]], [
[], [], [], [], []], [[], [], [], [], []]
for moment in moments:
ball[0].append([moment.ball.x, moment.ball.y])
off_id, def_id = 0, 0
for player_idx, player in enumerate(moment.players):
if dont_resolve_basket:
if player_idx < 5:
offense[off_id].append([player.x, player.y])
off_id += 1
else:
defense[def_id].append([player.x, player.y])
def_id += 1
else:
if (player.team.id == event.home_team_id) == off_is_home: # offense
offense[off_id].append([player.x, player.y])
off_id += 1
else: # defense
defense[def_id].append([player.x, player.y])
def_id += 1
if ( len(ball) == 0 or
(not ((len(np.array(ball).shape) == 3
and len(np.array(offense).shape) == 3
and len(np.array(defense).shape) == 3)
and
(np.array(ball).shape[1] == np.array(offense).shape[1]
and np.array(offense).shape[1] == np.array(defense).shape[1]))
)
):
raise ExtractorException()
return [ball, offense, defense]
def extract(self, event):
x = self.extract_raw(event)
ctxy = []
if self.augment and np.sum(self.config['jitter']) > 0:
d0_jit = (np.random.rand() * 2 - 1) * self.config['jitter'][0]
d1_jit = (np.random.rand() * 2 - 1) * self.config['jitter'][1]
jit = np.array([d0_jit, d1_jit])
jit = jit.reshape(1, 2).repeat(len(x[0][0]), axis=0)
for team in x:
for player in team:
try:
player = np.array(player) + jit
except ValueError: # bad sequence where not all players have the same number of moments
raise ExtractorException()
for play_sequence in x:
try:
team_matrix = np.concatenate(play_sequence, 1)
except ValueError:
raise ExtractorException()
tm = pictorialize_team(team_matrix, sample_rate=self.config['sample_rate'],
Y_RANGE=self.config[
'Y_RANGE'], X_RANGE=self.config['X_RANGE'],
radius=self.config['radius'])
ctxy.append(tm)
ctxy = np.array(ctxy)
if len(ctxy.shape) == 1: # different teams have different length
raise ExtractorException()
# compress the time dimension
if 'video' in self.config and self.config['video']:
if self.augment and self.config['d0flip'] and np.random.rand > .5:
ctxy = ctxy[:, :, ::-1]
if self.augment and self.config['d1flip'] and np.random.rand > .5:
ctxy = ctxy[:, :, :, ::-1]
return ctxy
else:
cxy = ctxy.sum(1)
cxy[cxy > 1] = 1
if self.augment and self.config['d0flip'] and np.random.rand > .5:
cxy = cxy[:, ::-1]
if self.augment and self.config['d1flip'] and np.random.rand > .5:
cxy = cxy[:, :, ::-1]
return cxy
def extract_batch(self, events_arr, input_is_sequence=False, dont_resolve_basket=False):
sample_rate = 1
Y_RANGE = 100
X_RANGE = 50
if input_is_sequence:
sequences = events_arr
else:
sequences = np.array([make_3teams_11players(
self.extract_raw(e,dont_resolve_basket=dont_resolve_basket)) for e in events_arr])
# time crop (+jitter) , spatial crop
if 'version' in self.config and self.config['version'] >= 2:
if self.augment:
t_jit = np.min([self.config['tfa_jitter_radius'],
sequences.shape[2] / 2 - self.config['tfr']])
t_jit = (2 * t_jit * np.random.rand()
).round().astype('int32') - t_jit
else:
t_jit = 0
tfa = int(sequences.shape[2] / 2 + t_jit)
sequences = sequences[:, :, tfa -
self.config['tfr']:tfa + self.config['tfr']]
if 'crop' in self.config and self.config['crop'] != '':
reference = make_reference(sequences, self.config[
'crop_size'], self.config['crop'])
sequences = sequences - reference
Y_RANGE = self.config['crop_size'][0] + 2
X_RANGE = self.config['crop_size'][1] + 2
# spatial jitter
if self.augment and np.sum(self.config['jitter']) > 0:
d0_jit = (np.random.rand() * 2 - 1) * self.config['jitter'][0]
d1_jit = (np.random.rand() * 2 - 1) * self.config['jitter'][1]
# hacky: can delete after -- temporary for malformed data (i.e.
# missing player)
try:
sequences[:, :, :, 0] += d0_jit
except:
raise ExtractorException()
sequences[:, :, :, 1] += d1_jit
##
bctxy = pictorialize_fast(sequences, sample_rate, Y_RANGE, X_RANGE)
# if cropped, shave off the extra padding
if ('version' in self.config and self.config['version'] >= 2
and 'crop' in self.config):
bctxy = bctxy[:, :, :, 1:-1, 1:-1]
# compress the time dimension
if 'video' in self.config and self.config['video']:
if self.augment and self.config['d0flip'] and np.random.rand > .5:
bctxy = bctxy[:, :, :, ::-1]
if self.augment and self.config['d1flip'] and np.random.rand > .5:
bctxy = bctxy[:, :, :, :, ::-1]
return bctxy
else:
bcxy = bctxy.sum(2)
bcxy[bcxy > 1] = 1
if self.augment and self.config['d0flip'] and np.random.rand > .5:
bcxy = bcxy[:, :, ::-1]
if self.augment and self.config['d1flip'] and np.random.rand > .5:
bcxy = bcxy[:, :, :, ::-1]
return bcxy
class Seq2SeqExtractor(BaseExtractor):
"""
"""
def __init__(self, f_config):
# super(Seq2SeqExtractor, self).__init__(f_config)
super(self.__class__, self).__init__(f_config)
def extract_batch(self, events_arr, input_is_sequence=False, dont_resolve_basket=False):
"""
Say, enc_time = (10) 0-10
dec_time = (10) 11-20
dec_target_sequence = (10) 11-20
decoder_output = (10) 12-21
"""
sample_rate = 1
Y_RANGE = 100
X_RANGE = 50
if input_is_sequence:
sequences = events_arr
else:
sequences = np.array([make_3teams_11players(
self.extract_raw(e,dont_resolve_basket=dont_resolve_basket)) for e in events_arr])
# spatial jitter
if self.augment and np.sum(self.config['jitter']) > 0:
d0_jit = (np.random.rand() * 2 - 1) * self.config['jitter'][0]
d1_jit = (np.random.rand() * 2 - 1) * self.config['jitter'][1]
# hacky: can delete after -- temporary for malformed data (i.e.
# missing player)
try:
sequences[:, :, :, 0] += d0_jit
except:
raise ExtractorException()
sequences[:, :, :, 1] += d1_jit
## temporal segment
target_player_ind = np.random.randint(1,6)
N_total_frames = sequences.shape[2]
start_time = np.round((np.random.rand() * (N_total_frames -
(1+self.config['encoder_input_time']+self.config['decoder_input_time']))
)).astype('int32')
input_seq = sequences[:, :, start_time:start_time+self.config['encoder_input_time']+self.config['decoder_input_time']]
dec_target_sequence = sequences[:, target_player_ind, start_time+self.config['encoder_input_time']
:start_time+self.config['encoder_input_time']+self.config['decoder_input_time']]
output_m1 = sequences[:, target_player_ind, start_time+self.config['encoder_input_time']
:1+start_time+self.config['encoder_input_time']+self.config['decoder_input_time']]
output = output_m1[:,1:] - output_m1[:,:-1]
##
bctxy = pictorialize_fast(input_seq, sample_rate, Y_RANGE, X_RANGE, keep_channels=True)
if self.augment and self.config['d0flip'] and np.random.rand > .5:
bctxy = bctxy[:, :, :, ::-1]
if self.augment and self.config['d1flip'] and np.random.rand > .5:
bctxy = bctxy[:, :, :, :, ::-1]
seq_inp = np.zeros((bctxy.shape[0], 4, self.config['encoder_input_time']+self.config['decoder_input_time'], Y_RANGE, X_RANGE))
#target player
seq_inp[:,0] = bctxy[:,target_player_ind]
#ball
seq_inp[:,1] = bctxy[:,0]
#team
seq_inp[:,2] = np.concatenate([bctxy[:,1:target_player_ind], bctxy[:,target_player_ind+1:6]], axis=1).sum(1)
#defense
seq_inp[:,3] = bctxy[:,6:].sum(1)
enc_inp = seq_inp[:,:,:self.config['encoder_input_time']]
dec_inp = seq_inp[:,:,self.config['encoder_input_time']:]
return enc_inp, dec_inp, dec_target_sequence, output
class EncDecExtractor(BaseExtractor):
"""
"""
def __init__(self, f_config):
super(self.__class__, self).__init__(f_config)
def extract_batch(self, events_arr, input_is_sequence=False, player_id=None, dont_resolve_basket=False):
"""
Say, enc_time = (10) 0-10
dec_time = (10) (11-10) - (20-19)
decoder_output = (10) (12-11)-(21-20)
"""
sample_rate = 1
Y_RANGE = 100
X_RANGE = 50
if input_is_sequence:
sequences = events_arr
else:
sequences = np.array([make_3teams_11players(
self.extract_raw(e, dont_resolve_basket=dont_resolve_basket)) for e in events_arr])
# spatial jitter
if self.augment and np.sum(self.config['jitter']) > 0:
d0_jit = (np.random.rand() * 2 - 1) * self.config['jitter'][0]
d1_jit = (np.random.rand() * 2 - 1) * self.config['jitter'][1]
# hacky: can delete after -- temporary for malformed data (i.e.
# missing player)
try:
sequences[:, :, :, 0] += d0_jit
except:
raise ExtractorException()
sequences[:, :, :, 1] += d1_jit
## temporal segment
if player_id is None:
target_player_ind = np.random.randint(1,6)
else:
target_player_ind = player_id
N_total_frames = sequences.shape[2]
if self.augment:
start_time = 1+np.round((np.random.rand() * (N_total_frames -
(2+self.config['encoder_input_time']+self.config['decoder_input_time']))
)).astype('int32')
else:
start_time = N_total_frames // 2 - self.config['encoder_input_time']
input_seq_m1 = np.array(sequences,copy=True)[:, :, start_time-1:start_time+self.config['encoder_input_time']]
output_m1 = np.array(sequences,copy=True)[:, target_player_ind,
-1+start_time+self.config['encoder_input_time']
:1+start_time+self.config['encoder_input_time']+self.config['decoder_input_time']]
output = output_m1[:,2:] - output_m1[:,1:-1]
dec_input = output_m1[:,1:-1] - output_m1[:,:-2]
## Encoder Input
if 'encoder_type' in self.config:
if self.config['encoder_type'] == 'target-seq':
abs_seq = input_seq_m1[:, target_player_ind, 1:]
abs_seq = scale_last_dim(abs_seq)
m1_v_seq = input_seq_m1[:, target_player_ind, 1:] - input_seq_m1[:, target_player_ind, :-1]
enc_input = np.concatenate([abs_seq, m1_v_seq], axis=-1)
return dec_input, output, enc_input, (sequences[:, :, start_time:start_time+self.config['encoder_input_time']], target_player_ind)
# , sequences[:, :, start_time+self.config['encoder_input_time']:start_time+self.config['encoder_input_time']+self.config['decoder_input_time']])
elif self.config['encoder_type'] in ['3d', '2d']:
bctxy = pictorialize_fast(input_seq_m1, sample_rate, Y_RANGE, X_RANGE, keep_channels=True)
# if self.augment and self.config['d0flip'] and np.random.rand > .5:
# bctxy = bctxy[:, :, :, ::-1]
# if self.augment and self.config['d1flip'] and np.random.rand > .5:
# bctxy = bctxy[:, :, :, :, ::-1]
seq_inp = np.zeros((bctxy.shape[0], 4, input_seq_m1.shape[2], Y_RANGE, X_RANGE))
#target player
seq_inp[:,0] = bctxy[:,target_player_ind]
#ball
seq_inp[:,1] = bctxy[:,0]
#team
seq_inp[:,2] = np.concatenate([bctxy[:,1:target_player_ind], bctxy[:,target_player_ind+1:6]], axis=1).sum(1)
#defense
seq_inp[:,3] = bctxy[:,6:].sum(1)
if self.config['encoder_type'] == '2d':
seq_inp = seq_inp.sum(2)[None]
seq_inp = np.transpose(seq_inp, (1,2,0,3,4))
seq_inp[seq_inp>1] = 1
return dec_input, output, seq_inp, (sequences[:, :, start_time:start_time+self.config['encoder_input_time']], target_player_ind)
else: #NO encoder
return dec_input, output, None, (sequences[:, :, start_time:start_time+self.config['encoder_input_time']], target_player_ind)
"""
HalfCourt Extractor
This extractor takes the Event
1. flips basket so everything lives in a halfcourt
(if sequence crossed both halves, it's not what we care about anyways
, so it's okay to randomly chop them off)
"""
"""
Ball Extractor
"""
"""
ImagePyramid Extractor
"""
if __name__ == '__main__':
from sportvu.data.dataset import BaseDataset
from sportvu.data.extractor import BaseExtractor
from loader import BaseLoader, Seq2SeqLoader
##
# f_config = 'config/train_rev0.yaml'
# dataset = BaseDataset(f_config, 0)
# extractor = BaseExtractor(f_config)
# loader = BaseLoader(dataset, extractor, 35, fraction_positive=0)
# print ('testing next_batch')
# batch = loader.next_batch(extract=False)
# for eind, event in enumerate(batch[0]):
# event.show('/home/wangkua1/Pictures/vis/%i.mp4' % eind)
f_config = 'config/rev3-dec-single-frame.yaml'
dataset = BaseDataset(f_config, 0)
extractor = EncDecExtractor(f_config)
loader = Seq2SeqLoader(dataset, extractor, 100, fraction_positive=0)
print ('testing next_batch')
batch = loader.next()
# for eind, event in enumerate(batch[0]):
# event.show('/home/wangkua1/Pictures/vis/%i.mp4' % eind)
# visualize model input
# import matplotlib.pyplot as plt
# plt.ion()
# for x in batch[0]:
# img = np.rollaxis(x, 0, 3)
# plt.imshow(img)
# raw_input()
# ## concurrent
# import sys
# sys.path.append('/home/wangkua1/toolboxes/resnet')
# from resnet.utils.concurrent_batch_iter import ConcurrentBatchIterator
# from tqdm import tqdm
# print ("compare loading latency")
# Q_size = 100
# N_thread = 32
# cloader = ConcurrentBatchIterator(loader, max_queue_size=Q_size, num_threads=N_thread)
# N = 100
# for i in tqdm(xrange(N), desc='multi thread Q size %i, N thread %i'%(Q_size, N_thread)):
# b = cloader.next()
# for i in tqdm(xrange(N), desc='single thread'):
# b = loader.next()
| mit | 1,836,699,956,899,260,200 | 43.182278 | 169 | 0.527962 | false |
CollabQ/CollabQ | .google_appengine/google/appengine/api/images/images_stub.py | 1 | 15868 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Stub version of the images API."""
import logging
import StringIO
try:
import PIL
from PIL import _imaging
from PIL import Image
except ImportError:
import _imaging
import Image
from google.appengine.api import apiproxy_stub
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import blobstore
from google.appengine.api import datastore
from google.appengine.api import datastore_errors
from google.appengine.api import datastore_types
from google.appengine.api import images
from google.appengine.api.images import images_service_pb
from google.appengine.runtime import apiproxy_errors
def _ArgbToRgbaTuple(argb):
"""Convert from a single ARGB value to a tuple containing RGBA.
Args:
argb: Signed 32 bit integer containing an ARGB value.
Returns:
RGBA tuple.
"""
unsigned_argb = argb % 0x100000000
return ((unsigned_argb >> 16) & 0xFF,
(unsigned_argb >> 8) & 0xFF,
unsigned_argb & 0xFF,
(unsigned_argb >> 24) & 0xFF)
class ImagesServiceStub(apiproxy_stub.APIProxyStub):
"""Stub version of images API to be used with the dev_appserver."""
def __init__(self, service_name="images"):
"""Preloads PIL to load all modules in the unhardened environment.
Args:
service_name: Service name expected for all calls.
"""
super(ImagesServiceStub, self).__init__(service_name)
Image.init()
def _Dynamic_Composite(self, request, response):
"""Implementation of ImagesService::Composite.
Based off documentation of the PIL library at
http://www.pythonware.com/library/pil/handbook/index.htm
Args:
request: ImagesCompositeRequest, contains image request info.
response: ImagesCompositeResponse, contains transformed image.
"""
width = request.canvas().width()
height = request.canvas().height()
color = _ArgbToRgbaTuple(request.canvas().color())
canvas = Image.new("RGBA", (width, height), color)
sources = []
if (not request.canvas().width() or request.canvas().width() > 4000 or
not request.canvas().height() or request.canvas().height() > 4000):
raise apiproxy_errors.ApplicationError(
images_service_pb.ImagesServiceError.BAD_TRANSFORM_DATA)
if not request.image_size():
raise apiproxy_errors.ApplicationError(
images_service_pb.ImagesServiceError.BAD_TRANSFORM_DATA)
if not request.options_size():
raise apiproxy_errors.ApplicationError(
images_service_pb.ImagesServiceError.BAD_TRANSFORM_DATA)
if request.options_size() > images.MAX_COMPOSITES_PER_REQUEST:
raise apiproxy_errors.ApplicationError(
images_service_pb.ImagesServiceError.BAD_TRANSFORM_DATA)
for image in request.image_list():
sources.append(self._OpenImageData(image))
for options in request.options_list():
if (options.anchor() < images.TOP_LEFT or
options.anchor() > images.BOTTOM_RIGHT):
raise apiproxy_errors.ApplicationError(
images_service_pb.ImagesServiceError.BAD_TRANSFORM_DATA)
if options.source_index() >= len(sources) or options.source_index() < 0:
raise apiproxy_errors.ApplicationError(
images_service_pb.ImagesServiceError.BAD_TRANSFORM_DATA)
if options.opacity() < 0 or options.opacity() > 1:
raise apiproxy_errors.ApplicationError(
images_service_pb.ImagesServiceError.BAD_TRANSFORM_DATA)
source = sources[options.source_index()]
x_anchor = (options.anchor() % 3) * 0.5
y_anchor = (options.anchor() / 3) * 0.5
x_offset = int(options.x_offset() + x_anchor * (width - source.size[0]))
y_offset = int(options.y_offset() + y_anchor * (height - source.size[1]))
alpha = options.opacity() * 255
mask = Image.new("L", source.size, alpha)
canvas.paste(source, (x_offset, y_offset), mask)
response_value = self._EncodeImage(canvas, request.canvas().output())
response.mutable_image().set_content(response_value)
def _Dynamic_Histogram(self, request, response):
"""Trivial implementation of ImagesService::Histogram.
Based off documentation of the PIL library at
http://www.pythonware.com/library/pil/handbook/index.htm
Args:
request: ImagesHistogramRequest, contains the image.
response: ImagesHistogramResponse, contains histogram of the image.
"""
image = self._OpenImageData(request.image())
img_format = image.format
if img_format not in ("BMP", "GIF", "ICO", "JPEG", "PNG", "TIFF"):
raise apiproxy_errors.ApplicationError(
images_service_pb.ImagesServiceError.NOT_IMAGE)
image = image.convert("RGBA")
red = [0] * 256
green = [0] * 256
blue = [0] * 256
for pixel in image.getdata():
red[int((pixel[0] * pixel[3]) / 255)] += 1
green[int((pixel[1] * pixel[3]) / 255)] += 1
blue[int((pixel[2] * pixel[3]) / 255)] += 1
histogram = response.mutable_histogram()
for value in red:
histogram.add_red(value)
for value in green:
histogram.add_green(value)
for value in blue:
histogram.add_blue(value)
def _Dynamic_Transform(self, request, response):
"""Trivial implementation of ImagesService::Transform.
Based off documentation of the PIL library at
http://www.pythonware.com/library/pil/handbook/index.htm
Args:
request: ImagesTransformRequest, contains image request info.
response: ImagesTransformResponse, contains transformed image.
"""
original_image = self._OpenImageData(request.image())
new_image = self._ProcessTransforms(original_image,
request.transform_list())
response_value = self._EncodeImage(new_image, request.output())
response.mutable_image().set_content(response_value)
def _EncodeImage(self, image, output_encoding):
"""Encode the given image and return it in string form.
Args:
image: PIL Image object, image to encode.
output_encoding: ImagesTransformRequest.OutputSettings object.
Returns:
str with encoded image information in given encoding format.
"""
image_string = StringIO.StringIO()
image_encoding = "PNG"
if (output_encoding.mime_type() == images_service_pb.OutputSettings.JPEG):
image_encoding = "JPEG"
image = image.convert("RGB")
image.save(image_string, image_encoding)
return image_string.getvalue()
def _OpenImageData(self, image_data):
"""Open image data from ImageData protocol buffer.
Args:
image_data: ImageData protocol buffer containing image data or blob
reference.
Returns:
Image containing the image data passed in or reference by blob-key.
Raises:
ApplicationError if both content and blob-key are provided.
NOTE: 'content' must always be set because it is a required field,
however, it must be the empty string when a blob-key is provided.
"""
if image_data.content() and image_data.has_blob_key():
raise apiproxy_errors.ApplicationError(
images_service_pb.ImagesServiceError.INVALID_BLOB_KEY)
if image_data.has_blob_key():
image = self._OpenBlob(image_data.blob_key())
else:
image = self._OpenImage(image_data.content())
img_format = image.format
if img_format not in ("BMP", "GIF", "ICO", "JPEG", "PNG", "TIFF"):
raise apiproxy_errors.ApplicationError(
images_service_pb.ImagesServiceError.NOT_IMAGE)
return image
def _OpenImage(self, image):
"""Opens an image provided as a string.
Args:
image: image data to be opened
Raises:
apiproxy_errors.ApplicationError if the image cannot be opened or if it
is an unsupported format.
Returns:
Image containing the image data passed in.
"""
if not image:
raise apiproxy_errors.ApplicationError(
images_service_pb.ImagesServiceError.NOT_IMAGE)
image = StringIO.StringIO(image)
try:
return Image.open(image)
except IOError:
raise apiproxy_errors.ApplicationError(
images_service_pb.ImagesServiceError.BAD_IMAGE_DATA)
def _OpenBlob(self, blob_key):
key = datastore_types.Key.from_path(blobstore.BLOB_INFO_KIND, blob_key)
try:
datastore.Get(key)
except datastore_errors.Error:
logging.exception('Blob with key %r does not exist', blob_key)
raise apiproxy_errors.ApplicationError(
images_service_pb.ImagesServiceError.UNSPECIFIED_ERROR)
blobstore_stub = apiproxy_stub_map.apiproxy.GetStub("blobstore")
try:
blob_file = blobstore_stub.storage.OpenBlob(blob_key)
except IOError:
logging.exception('Could not get file for blob_key %r', blob_key)
raise apiproxy_errors.ApplicationError(
images_service_pb.ImagesServiceError.BAD_IMAGE_DATA)
try:
return Image.open(blob_file)
except IOError:
logging.exception('Could not open image %r for blob_key %r',
blob_file, blob_key)
raise apiproxy_errors.ApplicationError(
images_service_pb.ImagesServiceError.BAD_IMAGE_DATA)
def _ValidateCropArg(self, arg):
"""Check an argument for the Crop transform.
Args:
arg: float, argument to Crop transform to check.
Raises:
apiproxy_errors.ApplicationError on problem with argument.
"""
if not isinstance(arg, float):
raise apiproxy_errors.ApplicationError(
images_service_pb.ImagesServiceError.BAD_TRANSFORM_DATA)
if not (0 <= arg <= 1.0):
raise apiproxy_errors.ApplicationError(
images_service_pb.ImagesServiceError.BAD_TRANSFORM_DATA)
def _CalculateNewDimensions(self,
current_width,
current_height,
req_width,
req_height):
"""Get new resize dimensions keeping the current aspect ratio.
This uses the more restricting of the two requested values to determine
the new ratio.
Args:
current_width: int, current width of the image.
current_height: int, current height of the image.
req_width: int, requested new width of the image.
req_height: int, requested new height of the image.
Returns:
tuple (width, height) which are both ints of the new ratio.
"""
width_ratio = float(req_width) / current_width
height_ratio = float(req_height) / current_height
if req_width == 0 or (width_ratio > height_ratio and req_height != 0):
return int(height_ratio * current_width), req_height
else:
return req_width, int(width_ratio * current_height)
def _Resize(self, image, transform):
"""Use PIL to resize the given image with the given transform.
Args:
image: PIL.Image.Image object to resize.
transform: images_service_pb.Transform to use when resizing.
Returns:
PIL.Image.Image with transforms performed on it.
Raises:
BadRequestError if the resize data given is bad.
"""
width = 0
height = 0
if transform.has_width():
width = transform.width()
if width < 0 or 4000 < width:
raise apiproxy_errors.ApplicationError(
images_service_pb.ImagesServiceError.BAD_TRANSFORM_DATA)
if transform.has_height():
height = transform.height()
if height < 0 or 4000 < height:
raise apiproxy_errors.ApplicationError(
images_service_pb.ImagesServiceError.BAD_TRANSFORM_DATA)
current_width, current_height = image.size
new_width, new_height = self._CalculateNewDimensions(current_width,
current_height,
width,
height)
return image.resize((new_width, new_height), Image.ANTIALIAS)
def _Rotate(self, image, transform):
"""Use PIL to rotate the given image with the given transform.
Args:
image: PIL.Image.Image object to rotate.
transform: images_service_pb.Transform to use when rotating.
Returns:
PIL.Image.Image with transforms performed on it.
Raises:
BadRequestError if the rotate data given is bad.
"""
degrees = transform.rotate()
if degrees < 0 or degrees % 90 != 0:
raise apiproxy_errors.ApplicationError(
images_service_pb.ImagesServiceError.BAD_TRANSFORM_DATA)
degrees %= 360
degrees = 360 - degrees
return image.rotate(degrees)
def _Crop(self, image, transform):
"""Use PIL to crop the given image with the given transform.
Args:
image: PIL.Image.Image object to crop.
transform: images_service_pb.Transform to use when cropping.
Returns:
PIL.Image.Image with transforms performed on it.
Raises:
BadRequestError if the crop data given is bad.
"""
left_x = 0.0
top_y = 0.0
right_x = 1.0
bottom_y = 1.0
if transform.has_crop_left_x():
left_x = transform.crop_left_x()
self._ValidateCropArg(left_x)
if transform.has_crop_top_y():
top_y = transform.crop_top_y()
self._ValidateCropArg(top_y)
if transform.has_crop_right_x():
right_x = transform.crop_right_x()
self._ValidateCropArg(right_x)
if transform.has_crop_bottom_y():
bottom_y = transform.crop_bottom_y()
self._ValidateCropArg(bottom_y)
width, height = image.size
box = (int(transform.crop_left_x() * width),
int(transform.crop_top_y() * height),
int(transform.crop_right_x() * width),
int(transform.crop_bottom_y() * height))
return image.crop(box)
def _ProcessTransforms(self, image, transforms):
"""Execute PIL operations based on transform values.
Args:
image: PIL.Image.Image instance, image to manipulate.
trasnforms: list of ImagesTransformRequest.Transform objects.
Returns:
PIL.Image.Image with transforms performed on it.
Raises:
BadRequestError if we are passed more than one of the same type of
transform.
"""
new_image = image
if len(transforms) > images.MAX_TRANSFORMS_PER_REQUEST:
raise apiproxy_errors.ApplicationError(
images_service_pb.ImagesServiceError.BAD_TRANSFORM_DATA)
for transform in transforms:
if transform.has_width() or transform.has_height():
new_image = self._Resize(new_image, transform)
elif transform.has_rotate():
new_image = self._Rotate(new_image, transform)
elif transform.has_horizontal_flip():
new_image = new_image.transpose(Image.FLIP_LEFT_RIGHT)
elif transform.has_vertical_flip():
new_image = new_image.transpose(Image.FLIP_TOP_BOTTOM)
elif (transform.has_crop_left_x() or
transform.has_crop_top_y() or
transform.has_crop_right_x() or
transform.has_crop_bottom_y()):
new_image = self._Crop(new_image, transform)
elif transform.has_autolevels():
logging.info("I'm Feeling Lucky autolevels will be visible once this "
"application is deployed.")
else:
logging.warn("Found no transformations found to perform.")
return new_image
| apache-2.0 | -8,046,123,606,114,034,000 | 33.051502 | 79 | 0.664104 | false |
chappers/sklearn-recipes | streaming_take2/dpp_classifier_dpp_only.py | 1 | 13063 | import sklearn
from sklearn.datasets import make_regression, make_classification
from sklearn.linear_model import SGDRegressor, SGDClassifier
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics.pairwise import euclidean_distances
import pandas as pd
import numpy as np
from scipy import stats
from scipy.stats import wilcoxon
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.decomposition import PCA, KernelPCA
from sklearn.kernel_approximation import Nystroem
from dpp import sample_dpp, decompose_kernel, sample_conditional_dpp
import random
from collections import Counter
def fast_euclid(X):
gamma = 1.0/X.shape[1]
if X.shape[0] < 1000:
L = rbf_kernel(X, gamma=gamma)
else:
L = Nystroem(gamma=gamma).fit_transform(X)
L = L.dot(L.T)
Ls = np.log(L)*(-1.0/(gamma))
return Ls
def class_separability(X, y, mode='mitra'):
"""
Calculates the class separability based on the mitra paper
"""
from ogfs_classifier import spec_supervised
return spec_supervised(X, y)
# get prior probs
prior_proba = Counter(y)
s_w = []
s_b = []
m_o = np.mean(X, axis=0).reshape(-1, 1)
if X.shape[0] > 1000:
mode = 'kernel'
for class_ in prior_proba.keys():
mask = y==class_
X_sel = X[mask, :]
if mode == 'mitra':
cov_sig = np.cov(X_sel.T)
s_w.append(cov_sig * prior_proba[class_])
else:
K = fast_euclid(X_sel.T)
s_w.append(K * prior_proba[class_])
mu_m = prior_proba[class_] - m_o
s_b.append(np.dot(mu_m, mu_m.T))
s_w = np.atleast_2d(np.add(*s_w))
s_b = np.add(*s_b)
return s_b, s_w
def evaluate_feats0(s_b, s_w):
curr_u1 = []
curr_u2 = []
my_feats = []
prev_score = None
try:
s_b_inv = np.linalg.inv(s_b)
except:
s_b_inv = np.linalg.pinv(s_b)
S = np.trace(np.dot(s_b_inv, s_w))
eval_order = np.argsort(S).flatten()
for idx in list(eval_order):
if prev_score is None:
curr_u1.append(s_b[idx])
curr_u2.append(s_w[idx])
my_feats.append(idx)
else:
test_u1 = curr_u1[:]
test_u2 = curr_u2[:]
test_u1.append(s_b[idx])
test_u2.append(s_w[idx])
score = (prev_score - (np.sum(test_u1)/np.sum(test_u2)))
if score > 0.001:
my_feats.append(idx)
curr_u1.append(s_b[idx])
curr_u2.append(s_w[idx])
prev_score = np.sum(curr_u1)/np.sum(curr_u2)
return list(my_feats)
def evaluate_feats1(s_b, s_w, highest_best=True):
curr_u1 = []
curr_u2 = []
my_feats = []
prev_score = None
X = s_b/s_w
eval_order = np.argsort(X).flatten()
if highest_best:
eval_order = eval_order[::-1]
for idx in list(eval_order):
if prev_score is None:
curr_u1.append(s_b[idx])
curr_u2.append(s_w[idx])
my_feats.append(idx)
else:
test_u1 = curr_u1[:]
test_u2 = curr_u2[:]
test_u1.append(s_b[idx])
test_u2.append(s_w[idx])
score = ((np.sum(test_u1)/np.sum(test_u2)) - prev_score)
if score > 0.001:
my_feats.append(idx)
curr_u1.append(s_b[idx])
curr_u2.append(s_w[idx])
prev_score = np.sum(curr_u1)/np.sum(curr_u2)
return list(my_feats)
def evaluate_feats2(X, alpha=0.05, highest_best=True):
"""
X is the raw scrores
alpha is the level of significance
This version uses T-test
Returns: set of indices indicating selected features.
"""
eval_order = np.argsort(X)
if highest_best:
eval_order = eval_order[::-1]
selected_feats = []
selected_idx = []
for idx in eval_order:
if len(selected_feats) == 0:
selected_feats.append(X[idx])
selected_idx.append(idx)
continue
# now continue on and decide what to do
mu = np.mean(selected_feats)
sigma = np.std(selected_feats)
U = len(selected_feats)
if sigma == 0.0 and U > 1:
return selected_idx
elif sigma == 0.0:
selected_feats.append(X[idx])
selected_idx.append(idx)
continue
# otherwise compute score for T test.
t_stat = (mu - X[idx])/(sigma/np.sqrt(U))
t_alpha = stats.t.pdf(t_stat, U)
if t_alpha <= alpha:
selected_feats.append(X[idx])
selected_idx.append(idx)
else:
return selected_idx
return selected_idx
def evaluate_feats(s_b, s_w, alpha=0.05):
set1 = evaluate_feats1(s_b,s_w)
eval2 = s_b/s_w
if len(eval2.shape) > 1:
eval2 = np.diag(s_b)/np.diag(s_w)
set2 = evaluate_feats2(eval2, alpha)
return list(set(set1 + set2))
def entropy(X):
mm = MinMaxScaler()
X_mm = mm.fit_transform(X)
Dpq = euclidean_distances(X_mm)
D_bar = np.mean([x for x in np.triu(Dpq).flatten() if x != 0])
alpha = -np.log(0.5)/D_bar
sim_pq = np.exp(-alpha * Dpq)
log_sim_pq = np.log(sim_pq)
entropy = -2*np.sum(np.triu(sim_pq*log_sim_pq + ((1-sim_pq)*np.log((1-sim_pq))), 1))
return entropy
def wilcoxon_group(X, f):
"""
Wilcoxon is a very aggressive selector in an unsupervised sense.
Do we require a supervised group selection? (probably)
Probably one that is score based in order to select the "best" ones
similar to OGFS?
"""
# X is a matrix, f is a single vector
if len(X.shape) == 1:
return wilcoxon(X, f).pvalue
# now we shall perform and check each one...and return only the lowest pvalue
return np.max([wilcoxon(x, f) for x in X.T])
"""
Implement DPP version that is similar to what is done above
sketch of solution
------------------
DPP requires a known number of parameters to check at each partial fit!
"""
class DPPClassifier(SGDClassifier):
def __init__(self, loss="log", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, max_iter=None, tol=None, shuffle=True,
verbose=0, epsilon=0.1, n_jobs=1,
random_state=None, learning_rate="optimal", eta0=0.0,
power_t=0.5, class_weight=None, warm_start=False,
average=False, n_iter=None,
intragroup_decay = 0.9, pca_alpha=0.05,
intragroup_alpha=0.05, intergroup_thres=None):
super(DPPClassifier, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, max_iter=max_iter, tol=tol,
shuffle=shuffle, verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, class_weight=class_weight, warm_start=warm_start,
average=average, n_iter=n_iter)
self.coef_info = {'cols': [], 'coef':[], 'excluded_cols': []}
self.seen_cols = []
self.base_shape = None
self.dpp_k = {'pca': 0, 'kpca':0}
self.unseen_only = False
self.intragroup_alpha = intragroup_alpha
self.intergroup_thres = intergroup_thres if intergroup_thres is not None else epsilon
def _dpp_estimate_k(self, L):
"""
L is the input kernel
"""
"""
pca = PCA(n_components=None)
pca.fit(L)
pca_k = np.min(np.argwhere(np.cumsum(pca.explained_variance_ratio_) >
(1-self.intragroup_alpha)))
# also use KernelPCA
kpca = KernelPCA(kernel='rbf')
kpca.fit(L)
kpca_k = np.argwhere(kpca.lambdas_ > 0.01).flatten().shape[0]
self.dpp_k['pca'] = pca_k
self.dpp_k['kpca'] = kpca_k
"""
self.dpp_k['pca'] = None
def add_column_exclusion(self, cols):
self.coef_info['excluded_cols'] = list(self.coef_info['excluded_cols']) + list(cols)
def _fit_columns(self, X_, return_x=True, transform_only=False):
"""
Method filter through "unselected" columns. The goal of this
method is to filter any uninformative columns.
This will be selected based on index only?
If return_x is false, it will only return the boolean mask.
"""
X = X_[X_.columns.difference(self.coef_info['excluded_cols'])]
# order the columns correctly...
col_order = self.coef_info['cols'] + list([x for x in X.columns if x not in self.coef_info['cols']])
X = X[col_order]
return X
def _reg_penalty(self, X):
col_coef = [(col, coef) for col, coef in zip(X.columns.tolist(), self.coef_.flatten()) if np.abs(coef) >= self.intergroup_thres]
self.coef_info['cols'] = [x for x, _ in col_coef]
self.coef_info['coef'] = [x for _, x in col_coef]
self.coef_info['excluded_cols'] = [x for x in self.seen_cols if x not in self.coef_info['cols']]
self.coef_ = np.array(self.coef_info['coef']).reshape(1, -1)
def _dpp_sel(self, X_, y=None):
"""
DPP only relies on X.
We will condition the sampling based on:
* `self.coef_info['cols']`
After sampling it will go ahead and then perform grouped wilcoxon selection.
"""
X = np.array(X_)
print(X.shape)
cols_to_index = [idx for idx, x in enumerate(X_.columns) if x in self.coef_info['cols']]
unseen_cols_to_index = [idx for idx, x in enumerate(X_.columns) if x not in self.coef_info['cols']]
if X.shape[0] < 1000 or X.shape[1] < 100:
#feat_dist = rbf_kernel(X.T)
feat_dist = Nystroem().fit_transform(X.T)
feat_dist = feat_dist.dot(feat_dist.T)
else:
feat_dist = Nystroem().fit_transform(X.T)
feat_dist = feat_dist.dot(feat_dist.T)
#self._dpp_estimate_k(feat_dist)
#k = self.dpp_k['pca'] #- len(self.coef_info['cols'])
k = None
feat_index = []
#while len(feat_index) == 0:
if len(self.coef_info['cols']) == 0:
feat_index = sample_dpp(decompose_kernel(feat_dist), k=k)
else:
feat_index = sample_conditional_dpp(feat_dist, cols_to_index, k=k)
feat_index = [x for x in feat_index if x is not None]
index_to_col = [col for idx, col in enumerate(X_.columns) if idx in feat_index]
self.coef_info['cols'] = list(set(self.coef_info['cols'] + index_to_col))
col_rem = X_.columns.difference(self.coef_info['cols'])
# update column exclusion...
self.coef_info['excluded_cols'] = [x for x in self.coef_info['excluded_cols'] if x not in self.coef_info['cols']]
self.add_column_exclusion(col_rem)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
self.seen_cols = list(set(self.seen_cols + X.columns.tolist()))
# TODO: add DPP selection
self.coef_info = {'cols': [], 'coef':[], 'excluded_cols': []}
#self._dpp_sel(X, y)
#X = self._fit_columns(X)
super(DPPClassifier, self).fit(X, y, coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
self._reg_penalty(X)
return self
def partial_fit(self, X, y, sample_weight=None):
X_ = X.copy()
unseen_col_size = len([1 for x in X.columns if x not in self.seen_cols])
self.seen_cols = list(set(self.seen_cols + X.columns.tolist()))
#sample_from_exclude_size = int(len(self.coef_info['excluded_cols']) - (len(self.coef_info['cols'])/2.0))+1
sample_from_exclude_size = int(len(self.coef_info['excluded_cols']) - unseen_col_size)
if sample_from_exclude_size > 0:
cols_excl_sample = random.sample(self.coef_info['excluded_cols'], sample_from_exclude_size)
X = X[X.columns.difference(cols_excl_sample)]
#X = X[X.columns.difference(self.coef_info['excluded_cols'])]
# TODO: add DPP selection
self._dpp_sel(X, y)
X = self._fit_columns(X_)
# now update coefficients
n_samples, n_features = X.shape
coef_list = np.zeros(n_features, dtype=np.float64, order="C")
coef_list[:len(self.coef_info['coef'])] = self.coef_info['coef']
self.coef_ = np.array(coef_list).reshape(1, -1)
super(DPPClassifier, self).partial_fit(X, y, sample_weight=None)
self._reg_penalty(X)
return self
def predict(self, X):
X = self._fit_columns(X, transform_only=True)
return super(DPPClassifier, self).predict(X)
def predict_proba(self, X):
X = self._fit_columns(X, transform_only=True)
return super(DPPClassifier, self).predict_proba(X) | mit | 1,082,041,100,579,539,800 | 35.8 | 136 | 0.569318 | false |
sony/nnabla | python/test/communicator/test_all_reduce.py | 1 | 4846 | # Copyright 2017,2018,2019,2020,2021 Sony Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import nnabla as nn
import nnabla.parametric_functions as PF
import numpy as np
from nnabla.testing import assert_allclose
from six.moves import reduce
def check_comm_nccl_opts(comm_nccl_opts):
if comm_nccl_opts is None:
pytest.skip(
"Communicator test is disabled. You can turn it on by an option `--test-communicator`.")
if len(comm_nccl_opts.devices) < 2:
pytest.skip(
"Communicator test is disabled. Use more than 1 gpus.")
def ref_all_reduce(x_data_list, size, division):
f = reduce(lambda x, y: x + y, np.arange(size)) + size
results = []
for x_data in x_data_list:
result = x_data * f
if division:
result /= size
results.append(result)
return results
@pytest.mark.parametrize("seed", [313])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("division", [True, False])
def test_all_reduce(seed, inplace, division, comm_nccl_opts):
check_comm_nccl_opts(comm_nccl_opts)
comm = comm_nccl_opts.comm
device_id = int(comm_nccl_opts.device_id)
n_devices = len(comm_nccl_opts.devices)
# Variables
x_list = []
x_data_list = []
num_layers = 20
rng = np.random.RandomState(seed)
for l in range(num_layers):
x_data = rng.rand(3, 4)
x_data_list.append(x_data)
x = nn.Variable(x_data.shape)
x.d = x_data * (device_id + 1)
x_list.append(x)
# AllReduce
comm.all_reduce([x.data for x in x_list],
division=division, inplace=inplace)
# Ref AllReduce
refs = ref_all_reduce(x_data_list, n_devices, division)
# Check
for x, ref in zip(x_list, refs):
assert_allclose(x.d, ref, rtol=1e-3, atol=1e-6)
@pytest.mark.parametrize("seed", [313])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("division", [True, False])
def test_all_reduce_skip_by_zero(seed, inplace, division, comm_nccl_opts):
'''
Checking the behavior that all_reduce is skipped if NdArray is set as zeroing
by NdArray.zero().
'''
check_comm_nccl_opts(comm_nccl_opts)
comm = comm_nccl_opts.comm
device_id = int(comm_nccl_opts.device_id)
n_devices = len(comm_nccl_opts.devices)
xs = [nn.Variable((2, 3, 4), need_grad=True),
nn.Variable((2, 3), need_grad=True)]
# Fill data as 1
for x in xs:
x.data.fill(1)
def get_grads(aa):
return [a.grad for a in aa]
def zero_grads(aa):
for a in aa:
a.grad.zero()
# A. Allreduce is not performed as all arrays are not updated.
zero_grads(xs)
comm.all_reduce(get_grads(xs), division=division, inplace=inplace)
for g in get_grads(xs):
assert g.zeroing
# B. All reduce is performed as any of arrays is updated.
zero_grads(xs)
# modify the grad values in rank 0
if comm.rank == 0:
for g in get_grads(xs):
g.data = 1
comm.all_reduce(get_grads(xs), division=division, inplace=inplace)
for g in get_grads(xs):
assert not g.zeroing
# Construct a graph for allreduce during backward
import nnabla.functions as F
y = sum([F.sum(F.relu(x)) for x in xs])
def execute_allreduce_during_backward(performed):
y.forward(clear_no_need_grad=True)
comm_callback = comm.all_reduce_callback(
get_grads(xs), 1024 * 1024 * 2, division=division)
zero_grads(xs)
y.backward(
None, clear_buffer=True,
communicator_callbacks=comm_callback
)
for g in get_grads(xs):
assert g.zeroing != performed
# C-1. performing allreduce during backward
execute_allreduce_during_backward(True)
# C-2. not performing allreduce during backward
for x in xs:
x.need_grad = False
execute_allreduce_during_backward(False)
# C-3. performing allreduce during backward
# NOTE: It's not supported because callbacks over devices are not
# consistently callled due to skipping backward operation on
# variables not requiring gradients.
# if comm.rank == 0:
# for x in xs:
# x.need_grad = True
# execute_allreduce_during_backward(True)
| apache-2.0 | 5,594,287,679,990,989,000 | 30.673203 | 100 | 0.645894 | false |
ym2050/pythonpractice | CF_SVD++_K-means/refine/utils.py | 1 | 4130 | from __future__ import (absolute_import, division,
print_function, unicode_literals)
# 下一个新版本的特性导入到当前版本,可以在当前版本中使用一些新版本的特性,必须放在文档开头
'''
工具类与工具方法
'''
# from builtins import *
# 引入内建模块,该模块中有一些常用函数;而该模块在Python启动后、且没有执行程序员所写的任何代码前,
# Python会首先加载该内建函数到内存,如str(),min(),max()等常用函数,不是必要
import time
from os.path import isfile
from sklearn.externals import joblib
# Joblib 包括为Python函数提供轻量级管道任务(pipeline job)服务的一系列工具,
# 包括透明磁盘IO缓冲、快速序列化、简单并行化运行、日志服务
# 创建定时器Timer类,对调用该类的操作进行时间统计,并输出结果
class Timer:
# def a(): 定义函数a
# _init_初始化 类似于构造函数
def __init__(self, progress=100000,msg=''):
self.starttime = None
self.progress = progress
self.i = 0
self.msg = msg
# __enter__ 方法将在进入代码块前被调用
def __enter__(self):
return self.start()
# __exit__ 方法则在离开代码块之后被调用(即使在代码块中遇到了异常)
def __exit__(self, *args):
self.stop()
# 开始计时,输出开始提示信息
def start(self):
self.starttime = time.clock()
#format(self.msg) 格式化输出,将参数self.msg在大括号所在位置输出,
print("{}: Started.".format(self.msg))
return self
# 结束计时,输出结束提示信息
def stop(self):
interval = time.clock() - self.starttime
print("{}: Finished in {:.3f} secs.".format(self.msg,interval))
#增量函数
def increment(self):
self.i += 1
if self.i % self.progress == 0:
interval = time.clock() - self.starttime
print("{}: {} step has been made in {:.3f} secs.".format(self.msg,self.i, interval))
def diskcache(fname, recache=False):
'''
缓存方法,不必仔细研究,功能描述如下
若fname文件存在,则加载后返回。若不存在,执行函数后,将结果写入缓存文件。
适合于执行特定流程并返回结果的函数。
:param fname:缓存文件名
:param recache:是否重新生成缓存
'''
def wrapper(F):
def docache(*args, **kwargs):
if isfile(fname) and not recache:
# 使用jolibd.load读取缓存文件并返回
return joblib.load(fname)
else:
# 使用jolibd.dump 创建缓存文件,返回函数的返回值
result = F(*args, **kwargs)
joblib.dump(result, fname)
return result
return docache
return wrapper
# 对上面的类和方法进行测试,不必注意
def __timertest():
with Timer(100,msg="COUNTING") as t:
for i in range(10000):
t.increment()
def __diskcachetest():
import numpy as np
@diskcache('../tmp/computetest.cache')
def compute():
print("Compute Run")
return np.arange(100000)
result1 = compute()
result2 = compute()
print(np.array_equal(result1,result2))
def writeToFile(file, content):
with open(file, 'wt+') as fd:
fd.write(content)
fd.close()
def writeMatrix(file, M):
content = ''
for i in range(len(M)):
content = content + '{}: '.format(i+1) + str(M[i]) + '\n'
writeToFile(file, content)
def writeRates(file, uid, rate):
content = '{}: '.format(uid) + str(rate) + '\n'
writeToFile(file, content)
if __name__ == '__main__':
# __timertest()
# __diskcachetest()
# data = ['1', '2', '3']
data = [[1, 2, 3, 4, 7],[2, 4, 5, 6]]
rate = [2, 4, 5, 6]
# writeMatrix('e.txt', data)
writeRates('rate.txt', 1009, rate)
| gpl-3.0 | -5,676,998,464,668,595,000 | 25.495798 | 96 | 0.564792 | false |
eeshangarg/zulip | zerver/lib/email_notifications.py | 1 | 28170 | # See https://zulip.readthedocs.io/en/latest/subsystems/notifications.html
import re
from collections import defaultdict
from datetime import timedelta
from email.headerregistry import Address
from typing import Any, Dict, Iterable, List, Optional, Tuple
import html2text
import lxml.html
import pytz
from bs4 import BeautifulSoup
from django.conf import settings
from django.contrib.auth import get_backends
from django.utils.timezone import now as timezone_now
from django.utils.translation import gettext as _
from django.utils.translation import override as override_language
from lxml.cssselect import CSSSelector
from confirmation.models import one_click_unsubscribe_link
from zerver.decorator import statsd_increment
from zerver.lib.markdown.fenced_code import FENCE_RE
from zerver.lib.message import bulk_access_messages
from zerver.lib.queue import queue_json_publish
from zerver.lib.send_email import FromAddress, send_future_email
from zerver.lib.types import DisplayRecipientT
from zerver.lib.url_encoding import (
huddle_narrow_url,
personal_narrow_url,
stream_narrow_url,
topic_narrow_url,
)
from zerver.models import (
Message,
Recipient,
Stream,
UserMessage,
UserProfile,
get_context_for_message,
get_display_recipient,
get_user_profile_by_id,
receives_offline_email_notifications,
)
def relative_to_full_url(base_url: str, content: str) -> str:
# Convert relative URLs to absolute URLs.
fragment = lxml.html.fromstring(content)
# We handle narrow URLs separately because of two reasons:
# 1: 'lxml' seems to be having an issue in dealing with URLs that begin
# `#` due to which it doesn't add a `/` before joining the base_url to
# the relative URL.
# 2: We also need to update the title attribute in the narrow links which
# is not possible with `make_links_absolute()`.
for link_info in fragment.iterlinks():
elem, attrib, link, pos = link_info
match = re.match("/?#narrow/", link)
if match is not None:
link = re.sub(r"^/?#narrow/", base_url + "/#narrow/", link)
elem.set(attrib, link)
# Only manually linked narrow URLs have title attribute set.
if elem.get("title") is not None:
elem.set("title", link)
# Inline images can't be displayed in the emails as the request
# from the mail server can't be authenticated because it has no
# user_profile object linked to it. So we scrub the inline image
# container.
inline_image_containers = fragment.find_class("message_inline_image")
for container in inline_image_containers:
container.drop_tree()
# The previous block handles most inline images, but for messages
# where the entire Markdown input was just the URL of an image
# (i.e. the entire body is a message_inline_image object), the
# entire message body will be that image element; here, we need a
# more drastic edit to the content.
if fragment.get("class") == "message_inline_image":
image_link = fragment.find("a").get("href")
image_title = fragment.find("a").get("title")
fragment = lxml.html.Element("p")
a = lxml.html.Element("a")
a.set("href", image_link)
a.set("target", "_blank")
a.set("title", image_title)
a.text = image_link
fragment.append(a)
fragment.make_links_absolute(base_url)
content = lxml.html.tostring(fragment, encoding="unicode")
return content
def fix_emojis(content: str, base_url: str, emojiset: str) -> str:
def make_emoji_img_elem(emoji_span_elem: CSSSelector) -> Dict[str, Any]:
# Convert the emoji spans to img tags.
classes = emoji_span_elem.get("class")
match = re.search(r"emoji-(?P<emoji_code>\S+)", classes)
# re.search is capable of returning None,
# but since the parent function should only be called with a valid css element
# we assert that it does not.
assert match is not None
emoji_code = match.group("emoji_code")
emoji_name = emoji_span_elem.get("title")
alt_code = emoji_span_elem.text
image_url = base_url + f"/static/generated/emoji/images-{emojiset}-64/{emoji_code}.png"
img_elem = lxml.html.fromstring(
f'<img alt="{alt_code}" src="{image_url}" title="{emoji_name}">'
)
img_elem.set("style", "height: 20px;")
img_elem.tail = emoji_span_elem.tail
return img_elem
fragment = lxml.html.fromstring(content)
for elem in fragment.cssselect("span.emoji"):
parent = elem.getparent()
img_elem = make_emoji_img_elem(elem)
parent.replace(elem, img_elem)
for realm_emoji in fragment.cssselect(".emoji"):
del realm_emoji.attrib["class"]
realm_emoji.set("style", "height: 20px;")
content = lxml.html.tostring(fragment, encoding="unicode")
return content
def fix_spoilers_in_html(content: str, language: str) -> str:
with override_language(language):
spoiler_title: str = _("Open Zulip to see the spoiler content")
fragment = lxml.html.fromstring(content)
spoilers = fragment.find_class("spoiler-block")
for spoiler in spoilers:
header = spoiler.find_class("spoiler-header")[0]
spoiler_content = spoiler.find_class("spoiler-content")[0]
header_content = header.find("p")
if header_content is None:
# Create a new element to append the spoiler to)
header_content = lxml.html.fromstring("<p></p>")
header.append(header_content)
else:
# Add a space. Its simpler to append a new span element than
# inserting text after the last node ends since neither .text
# and .tail do the right thing for us.
header_content.append(lxml.html.fromstring("<span> </span>"))
span_elem = lxml.html.fromstring(
f'<span class="spoiler-title" title="{spoiler_title}">({spoiler_title})</span'
)
header_content.append(span_elem)
header.drop_tag()
spoiler_content.drop_tree()
content = lxml.html.tostring(fragment, encoding="unicode")
return content
def fix_spoilers_in_text(content: str, language: str) -> str:
with override_language(language):
spoiler_title: str = _("Open Zulip to see the spoiler content")
lines = content.split("\n")
output = []
open_fence = None
for line in lines:
m = FENCE_RE.match(line)
if m:
fence = m.group("fence")
lang = m.group("lang")
if lang == "spoiler":
open_fence = fence
output.append(line)
output.append(f"({spoiler_title})")
elif fence == open_fence:
open_fence = None
output.append(line)
elif not open_fence:
output.append(line)
return "\n".join(output)
def build_message_list(
user: UserProfile,
messages: List[Message],
stream_map: Dict[int, Stream], # only needs id, name
) -> List[Dict[str, Any]]:
"""
Builds the message list object for the message notification email template.
The messages are collapsed into per-recipient and per-sender blocks, like
our web interface
"""
messages_to_render: List[Dict[str, Any]] = []
def sender_string(message: Message) -> str:
if message.recipient.type in (Recipient.STREAM, Recipient.HUDDLE):
return message.sender.full_name
else:
return ""
def fix_plaintext_image_urls(content: str) -> str:
# Replace image URLs in plaintext content of the form
# [image name](image url)
# with a simple hyperlink.
return re.sub(r"\[(\S*)\]\((\S*)\)", r"\2", content)
def append_sender_to_message(
message_plain: str, message_html: str, sender: str
) -> Tuple[str, str]:
message_plain = f"{sender}: {message_plain}"
message_soup = BeautifulSoup(message_html, "html.parser")
sender_name_soup = BeautifulSoup(f"<b>{sender}</b>: ", "html.parser")
first_tag = message_soup.find()
if first_tag.name == "p":
first_tag.insert(0, sender_name_soup)
else:
message_soup.insert(0, sender_name_soup)
return message_plain, str(message_soup)
def build_message_payload(message: Message, sender: Optional[str] = None) -> Dict[str, str]:
plain = message.content
plain = fix_plaintext_image_urls(plain)
# There's a small chance of colliding with non-Zulip URLs containing
# "/user_uploads/", but we don't have much information about the
# structure of the URL to leverage. We can't use `relative_to_full_url()`
# function here because it uses a stricter regex which will not work for
# plain text.
plain = re.sub(r"/user_uploads/(\S*)", user.realm.uri + r"/user_uploads/\1", plain)
plain = fix_spoilers_in_text(plain, user.default_language)
assert message.rendered_content is not None
html = message.rendered_content
html = relative_to_full_url(user.realm.uri, html)
html = fix_emojis(html, user.realm.uri, user.emojiset)
html = fix_spoilers_in_html(html, user.default_language)
if sender:
plain, html = append_sender_to_message(plain, html, sender)
return {"plain": plain, "html": html}
def build_sender_payload(message: Message) -> Dict[str, Any]:
sender = sender_string(message)
return {"sender": sender, "content": [build_message_payload(message, sender)]}
def message_header(message: Message) -> Dict[str, Any]:
if message.recipient.type == Recipient.PERSONAL:
narrow_link = get_narrow_url(user, message)
header = f"You and {message.sender.full_name}"
header_html = f"<a style='color: #ffffff;' href='{narrow_link}'>{header}</a>"
elif message.recipient.type == Recipient.HUDDLE:
display_recipient = get_display_recipient(message.recipient)
assert not isinstance(display_recipient, str)
narrow_link = get_narrow_url(user, message, display_recipient=display_recipient)
other_recipients = [r["full_name"] for r in display_recipient if r["id"] != user.id]
header = "You and {}".format(", ".join(other_recipients))
header_html = f"<a style='color: #ffffff;' href='{narrow_link}'>{header}</a>"
else:
stream_id = message.recipient.type_id
stream = stream_map.get(stream_id, None)
if stream is None:
# Some of our callers don't populate stream_map, so
# we just populate the stream from the database.
stream = Stream.objects.only("id", "name").get(id=stream_id)
narrow_link = get_narrow_url(user, message, stream=stream)
header = f"{stream.name} > {message.topic_name()}"
stream_link = stream_narrow_url(user.realm, stream)
header_html = f"<a href='{stream_link}'>{stream.name}</a> > <a href='{narrow_link}'>{message.topic_name()}</a>"
return {
"plain": header,
"html": header_html,
"stream_message": message.recipient.type_name() == "stream",
}
# # Collapse message list to
# [
# {
# "header": {
# "plain":"header",
# "html":"htmlheader"
# }
# "senders":[
# {
# "sender":"sender_name",
# "content":[
# {
# "plain":"content",
# "html":"htmlcontent"
# }
# {
# "plain":"content",
# "html":"htmlcontent"
# }
# ]
# }
# ]
# },
# ]
messages.sort(key=lambda message: message.date_sent)
for message in messages:
header = message_header(message)
# If we want to collapse into the previous recipient block
if len(messages_to_render) > 0 and messages_to_render[-1]["header"] == header:
sender = sender_string(message)
sender_block = messages_to_render[-1]["senders"]
# Same message sender, collapse again
if sender_block[-1]["sender"] == sender:
sender_block[-1]["content"].append(build_message_payload(message))
else:
# Start a new sender block
sender_block.append(build_sender_payload(message))
else:
# New recipient and sender block
recipient_block = {"header": header, "senders": [build_sender_payload(message)]}
messages_to_render.append(recipient_block)
return messages_to_render
def get_narrow_url(
user_profile: UserProfile,
message: Message,
display_recipient: Optional[DisplayRecipientT] = None,
stream: Optional[Stream] = None,
) -> str:
"""The display_recipient and stream arguments are optional. If not
provided, we'll compute them from the message; they exist as a
performance optimization for cases where the caller needs those
data too.
"""
if message.recipient.type == Recipient.PERSONAL:
assert stream is None
assert display_recipient is None
return personal_narrow_url(
realm=user_profile.realm,
sender=message.sender,
)
elif message.recipient.type == Recipient.HUDDLE:
assert stream is None
if display_recipient is None:
display_recipient = get_display_recipient(message.recipient)
assert display_recipient is not None
assert not isinstance(display_recipient, str)
other_user_ids = [r["id"] for r in display_recipient if r["id"] != user_profile.id]
return huddle_narrow_url(
realm=user_profile.realm,
other_user_ids=other_user_ids,
)
else:
assert display_recipient is None
if stream is None:
stream = Stream.objects.only("id", "name").get(id=message.recipient.type_id)
return topic_narrow_url(user_profile.realm, stream, message.topic_name())
def message_content_allowed_in_missedmessage_emails(user_profile: UserProfile) -> bool:
return (
user_profile.realm.message_content_allowed_in_email_notifications
and user_profile.message_content_in_email_notifications
)
@statsd_increment("missed_message_reminders")
def do_send_missedmessage_events_reply_in_zulip(
user_profile: UserProfile, missed_messages: List[Dict[str, Any]], message_count: int
) -> None:
"""
Send a reminder email to a user if she's missed some PMs by being offline.
The email will have its reply to address set to a limited used email
address that will send a Zulip message to the correct recipient. This
allows the user to respond to missed PMs, huddles, and @-mentions directly
from the email.
`user_profile` is the user to send the reminder to
`missed_messages` is a list of dictionaries to Message objects and other data
for a group of messages that share a recipient (and topic)
"""
from zerver.context_processors import common_context
recipients = {
(msg["message"].recipient_id, msg["message"].topic_name()) for msg in missed_messages
}
if len(recipients) != 1:
raise ValueError(
f"All missed_messages must have the same recipient and topic {recipients!r}",
)
# This link is no longer a part of the email, but keeping the code in case
# we find a clean way to add it back in the future
unsubscribe_link = one_click_unsubscribe_link(user_profile, "missed_messages")
context = common_context(user_profile)
context.update(
name=user_profile.full_name,
message_count=message_count,
unsubscribe_link=unsubscribe_link,
realm_name_in_notifications=user_profile.realm_name_in_notifications,
)
triggers = [message["trigger"] for message in missed_messages]
unique_triggers = set(triggers)
context.update(
mention="mentioned" in unique_triggers or "wildcard_mentioned" in unique_triggers,
stream_email_notify="stream_email_notify" in unique_triggers,
mention_count=triggers.count("mentioned") + triggers.count("wildcard_mentioned"),
)
# If this setting (email mirroring integration) is enabled, only then
# can users reply to email to send message to Zulip. Thus, one must
# ensure to display warning in the template.
if settings.EMAIL_GATEWAY_PATTERN:
context.update(
reply_to_zulip=True,
)
else:
context.update(
reply_to_zulip=False,
)
from zerver.lib.email_mirror import create_missed_message_address
reply_to_address = create_missed_message_address(user_profile, missed_messages[0]["message"])
if reply_to_address == FromAddress.NOREPLY:
reply_to_name = ""
else:
reply_to_name = "Zulip"
narrow_url = get_narrow_url(user_profile, missed_messages[0]["message"])
context.update(
narrow_url=narrow_url,
)
senders = list({m["message"].sender for m in missed_messages})
if missed_messages[0]["message"].recipient.type == Recipient.HUDDLE:
display_recipient = get_display_recipient(missed_messages[0]["message"].recipient)
# Make sure that this is a list of strings, not a string.
assert not isinstance(display_recipient, str)
other_recipients = [r["full_name"] for r in display_recipient if r["id"] != user_profile.id]
context.update(group_pm=True)
if len(other_recipients) == 2:
huddle_display_name = " and ".join(other_recipients)
context.update(huddle_display_name=huddle_display_name)
elif len(other_recipients) == 3:
huddle_display_name = (
f"{other_recipients[0]}, {other_recipients[1]}, and {other_recipients[2]}"
)
context.update(huddle_display_name=huddle_display_name)
else:
huddle_display_name = "{}, and {} others".format(
", ".join(other_recipients[:2]), len(other_recipients) - 2
)
context.update(huddle_display_name=huddle_display_name)
elif missed_messages[0]["message"].recipient.type == Recipient.PERSONAL:
context.update(private_message=True)
elif context["mention"] or context["stream_email_notify"]:
# Keep only the senders who actually mentioned the user
if context["mention"]:
senders = list(
{
m["message"].sender
for m in missed_messages
if m["trigger"] == "mentioned" or m["trigger"] == "wildcard_mentioned"
}
)
message = missed_messages[0]["message"]
stream = Stream.objects.only("id", "name").get(id=message.recipient.type_id)
stream_header = f"{stream.name} > {message.topic_name()}"
context.update(
stream_header=stream_header,
)
else:
raise AssertionError("Invalid messages!")
# If message content is disabled, then flush all information we pass to email.
if not message_content_allowed_in_missedmessage_emails(user_profile):
realm = user_profile.realm
context.update(
reply_to_zulip=False,
messages=[],
sender_str="",
realm_str=realm.name,
huddle_display_name="",
show_message_content=False,
message_content_disabled_by_user=not user_profile.message_content_in_email_notifications,
message_content_disabled_by_realm=not realm.message_content_allowed_in_email_notifications,
)
else:
context.update(
messages=build_message_list(
user=user_profile,
messages=[m["message"] for m in missed_messages],
stream_map={},
),
sender_str=", ".join(sender.full_name for sender in senders),
realm_str=user_profile.realm.name,
show_message_content=True,
)
with override_language(user_profile.default_language):
from_name: str = _("Zulip notifications")
from_address = FromAddress.NOREPLY
if len(senders) == 1 and settings.SEND_MISSED_MESSAGE_EMAILS_AS_USER:
# If this setting is enabled, you can reply to the Zulip
# message notification emails directly back to the original sender.
# However, one must ensure the Zulip server is in the SPF
# record for the domain, or there will be spam/deliverability
# problems.
#
# Also, this setting is not really compatible with
# EMAIL_ADDRESS_VISIBILITY_ADMINS.
sender = senders[0]
from_name, from_address = (sender.full_name, sender.email)
context.update(
reply_to_zulip=False,
)
email_dict = {
"template_prefix": "zerver/emails/missed_message",
"to_user_ids": [user_profile.id],
"from_name": from_name,
"from_address": from_address,
"reply_to_email": str(Address(display_name=reply_to_name, addr_spec=reply_to_address)),
"context": context,
}
queue_json_publish("email_senders", email_dict)
user_profile.last_reminder = timezone_now()
user_profile.save(update_fields=["last_reminder"])
def handle_missedmessage_emails(
user_profile_id: int, missed_email_events: Iterable[Dict[str, Any]]
) -> None:
message_ids = {event.get("message_id"): event.get("trigger") for event in missed_email_events}
user_profile = get_user_profile_by_id(user_profile_id)
if not receives_offline_email_notifications(user_profile):
return
# Note: This query structure automatically filters out any
# messages that were permanently deleted, since those would now be
# in the ArchivedMessage table, not the Message table.
messages = Message.objects.filter(
usermessage__user_profile_id=user_profile,
id__in=message_ids,
usermessage__flags=~UserMessage.flags.read,
)
# Cancel missed-message emails for deleted messages
messages = [um for um in messages if um.content != "(deleted)"]
if not messages:
return
# We bucket messages by tuples that identify similar messages.
# For streams it's recipient_id and topic.
# For PMs it's recipient id and sender.
messages_by_bucket: Dict[Tuple[int, str], List[Message]] = defaultdict(list)
for msg in messages:
if msg.recipient.type == Recipient.PERSONAL:
# For PM's group using (recipient, sender).
messages_by_bucket[(msg.recipient_id, msg.sender_id)].append(msg)
else:
messages_by_bucket[(msg.recipient_id, msg.topic_name())].append(msg)
message_count_by_bucket = {
bucket_tup: len(msgs) for bucket_tup, msgs in messages_by_bucket.items()
}
for msg_list in messages_by_bucket.values():
msg = min(msg_list, key=lambda msg: msg.date_sent)
if msg.is_stream_message():
context_messages = get_context_for_message(msg)
filtered_context_messages = bulk_access_messages(user_profile, context_messages)
msg_list.extend(filtered_context_messages)
# Sort emails by least recently-active discussion.
bucket_tups: List[Tuple[Tuple[int, str], int]] = []
for bucket_tup, msg_list in messages_by_bucket.items():
max_message_id = max(msg_list, key=lambda msg: msg.id).id
bucket_tups.append((bucket_tup, max_message_id))
bucket_tups = sorted(bucket_tups, key=lambda x: x[1])
# Send an email per bucket.
for bucket_tup, ignored_max_id in bucket_tups:
unique_messages = {}
for m in messages_by_bucket[bucket_tup]:
unique_messages[m.id] = dict(
message=m,
trigger=message_ids.get(m.id),
)
do_send_missedmessage_events_reply_in_zulip(
user_profile,
list(unique_messages.values()),
message_count_by_bucket[bucket_tup],
)
def followup_day2_email_delay(user: UserProfile) -> timedelta:
days_to_delay = 2
user_tz = user.timezone
if user_tz == "":
user_tz = "UTC"
signup_day = user.date_joined.astimezone(pytz.timezone(user_tz)).isoweekday()
if signup_day == 5:
# If the day is Friday then delay should be till Monday
days_to_delay = 3
elif signup_day == 4:
# If the day is Thursday then delay should be till Friday
days_to_delay = 1
# The delay should be 1 hour before the above calculated delay as
# our goal is to maximize the chance that this email is near the top
# of the user's inbox when the user sits down to deal with their inbox,
# or comes in while they are dealing with their inbox.
return timedelta(days=days_to_delay, hours=-1)
def enqueue_welcome_emails(user: UserProfile, realm_creation: bool = False) -> None:
from zerver.context_processors import common_context
if settings.WELCOME_EMAIL_SENDER is not None:
# line break to avoid triggering lint rule
from_name = settings.WELCOME_EMAIL_SENDER["name"]
from_address = settings.WELCOME_EMAIL_SENDER["email"]
else:
from_name = None
from_address = FromAddress.support_placeholder
other_account_count = (
UserProfile.objects.filter(delivery_email__iexact=user.delivery_email)
.exclude(id=user.id)
.count()
)
unsubscribe_link = one_click_unsubscribe_link(user, "welcome")
context = common_context(user)
context.update(
unsubscribe_link=unsubscribe_link,
keyboard_shortcuts_link=user.realm.uri + "/help/keyboard-shortcuts",
realm_name=user.realm.name,
realm_creation=realm_creation,
email=user.delivery_email,
is_realm_admin=user.is_realm_admin,
)
if user.is_realm_admin:
context["getting_started_link"] = (
user.realm.uri + "/help/getting-your-organization-started-with-zulip"
)
else:
context["getting_started_link"] = "https://zulip.com"
# Imported here to avoid import cycles.
from zproject.backends import ZulipLDAPAuthBackend, email_belongs_to_ldap
if email_belongs_to_ldap(user.realm, user.delivery_email):
context["ldap"] = True
for backend in get_backends():
# If the user is doing authentication via LDAP, Note that
# we exclude ZulipLDAPUserPopulator here, since that
# isn't used for authentication.
if isinstance(backend, ZulipLDAPAuthBackend):
context["ldap_username"] = backend.django_to_ldap_username(user.delivery_email)
break
send_future_email(
"zerver/emails/followup_day1",
user.realm,
to_user_ids=[user.id],
from_name=from_name,
from_address=from_address,
context=context,
)
if other_account_count == 0:
send_future_email(
"zerver/emails/followup_day2",
user.realm,
to_user_ids=[user.id],
from_name=from_name,
from_address=from_address,
context=context,
delay=followup_day2_email_delay(user),
)
def convert_html_to_markdown(html: str) -> str:
parser = html2text.HTML2Text()
markdown = parser.handle(html).strip()
# We want images to get linked and inline previewed, but html2text will turn
# them into links of the form ``, which is
# ugly. Run a regex over the resulting description, turning links of the
# form `` into
# `[image.png](http://foo.com/image.png)`.
return re.sub("!\\[\\]\\((\\S*)/(\\S*)\\?(\\S*)\\)", "[\\2](\\1/\\2)", markdown)
| apache-2.0 | -4,329,368,778,265,539,600 | 39.416069 | 123 | 0.625204 | false |
Guiiix/ip150_interfacer | classes/Interfacer.py | 1 | 5394 | from classes.Parser import Parser
from classes.Paracrypt import Paracrypt
from threading import Thread
import urllib2
import time
import random
class Interfacer:
def __init__(self, ip, port, username, password, verbose_level, zones_identifier,
area_identifier, ses_identifier, status_identifier, states_identifier, keep_alive_allowed_errors):
self.ip = ip
self.port = str(port)
self.username = str(username)
self.password = str(password)
self.connected = False
self.verbose_level = verbose_level
self.parser = Parser(self, zones_identifier, area_identifier, ses_identifier, status_identifier, states_identifier)
self.current_status = "Init phase"
self.paracrypt = Paracrypt(username, password)
self.running = False
self.keep_alive_allowed_errors = keep_alive_allowed_errors;
self.keep_alive_errors = 0
### Main method ###
def run(self, login_max_try, ready_wait_time, update_time_interval):
th = Thread(target=self.keep_alive)
if not self.loop_login(login_max_try, ready_wait_time):
return False
self.connected = True
equipment = self.get_equipment()
if not equipment:
return False
print equipment
self.zones = equipment[0]
self.areas = equipment[1]
self.update_status()
self.running = True
self.current_status = "Running"
th.start()
while self.running and self.connected:
self.update_status()
time.sleep(update_time_interval)
running = False
th.join()
if self.connected:
self.logout()
### These methods provide some usefull features to help ###
def display_message(self, msg, verbose_level):
if verbose_level <= self.verbose_level:
print '\033[94m' + "* <INTERFACER> : " + msg + '\033[0m'
def raise_error(self, msg):
print '\033[94m' + "* <INTERFACER> : /!\ " + msg + '\033[0m'
self.current_status = msg
def do_request(self, location):
try:
html = urllib2.urlopen("http://" + self.ip + ":" + self.port + "/" + location, timeout=1).read()
self.display_message("Making request to /" + location, 2)
return html
except Exception:
self.raise_error('Unable to make request to /' + location)
return False
### Login/logout methods ###
def loop_login(self, login_max_try, ready_wait_time):
# Trying to connect
retry = True
i = 0
while retry:
if self.login():
retry = False
else:
i += 1
if (i == login_max_try):
return False
# Waiting for server to be ready
while not self.do_request("index.html"):
self.raise_error("Not yes ready...")
time.sleep(ready_wait_time)
self.display_message("Seems to be ready", 1)
return True
def login(self):
html = self.do_request("login_page.html")
if not html:
return False
js = self.parser.js_from_html(html)
if not js:
return False
self.display_message("Looking for someone connected...", 1)
if self.parser.someone_connected(js):
self.raise_error('Unable to login : someone is already connected')
time.sleep(30)
return False
ses = self.parser.parse_ses(js)
if ses == False:
self.raise_error('Unable to login : No SES value found')
self.display_message('SES Value found, encrypting credentials...', 2)
credentials = self.paracrypt.login_encrypt(ses)
self.display_message('Sending auth request...', 2)
html = self.do_request("default.html?u=" + str(credentials['user']) + "&p=" + str(credentials['password']))
if not html:
return False
return True
def logout(self):
self.connected = False
return self.do_request("logout.html")
### Status/equipment methods ###
def get_status(self):
html = self.do_request("statuslive.html")
if not html:
return False
js = self.parser.js_from_html(html)
if not js:
return False
return self.parser.parse_status(js)
def get_equipment(self):
html = self.do_request("index.html")
if not html:
return False
js = self.parser.js_from_html(html)
if not js:
return False
return self.parser.parse_equipment(js)
def update_status(self):
status = self.get_status()
if not status:
return False
states = status[1]
status = status[0]
if len(status) == len(self.zones):
for i in range(0, len(status)):
self.zones[i]["status"] = status[i]
else:
self.raise_error("status (" + str(len(status)) + ") != zones (" + str(len(self.zones)) + " )...")
return False
if len(states) == len(self.areas):
for i in range(0, len(states)):
self.areas[i]["armed"] = states[i]
else:
self.raise_error("Erf, states (" + str(len(states)) + ") != areas (" + str(len(Areas)) + " )...")
return False
return True
### Stay connected ###
def keep_alive(self):
while (self.running):
#generate random id
rand = random.randint(1000000000000000,9999999999999999)
html = self.do_request("keep_alive.html?msgid=1&" + str(rand))
if not html:
self.keep_alive_errors += 1
if self.keep_alive_allowed_errors == self.keep_alive_errors:
self.raise_error("Keep alive errors exceeded")
self.running = False
return False
else:
if "javascript" in html:
self.raise_error("Connection lost")
self.running = False
self.connected = False
return False
time.sleep(2.5)
### Commands methods ###
def arm(self):
self.do_request("statuslive.html?area=00&value=r")
def desarm(self):
self.do_request("statuslive.html?area=00&value=d")
def partiel(self):
self.do_request("statuslive.html?area=00&value=s") | gpl-3.0 | 2,701,808,814,002,473,000 | 25.975 | 117 | 0.673526 | false |
markgw/jazzparser | lib/nltk/tokenize/api.py | 1 | 2128 | # Natural Language Toolkit: Tokenizer Interface
#
# Copyright (C) 2001-2010 NLTK Project
# Author: Edward Loper <[email protected]>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
"""
Tokenizer Interface
"""
from nltk.internals import overridden
from util import string_span_tokenize
class TokenizerI(object):
"""
A processing interface for I{tokenizing} a string, or dividing it
into a list of substrings.
Subclasses must define:
- either L{tokenize()} or L{batch_tokenize()} (or both)
"""
def tokenize(self, s):
"""
Divide the given string into a list of substrings.
@return: C{list} of C{str}
"""
if overridden(self.batch_tokenize):
return self.batch_tokenize([s])[0]
else:
raise NotImplementedError()
def span_tokenize(self, s):
"""
Identify the tokens using integer offsets (start_i, end_i),
where s[start_i:end_i] is the corresponding token.
@return: C{iter} of C{tuple} of C{int}
"""
raise NotImplementedError()
def batch_tokenize(self, strings):
"""
Apply L{self.tokenize()} to each element of C{strings}. I.e.:
>>> return [self.tokenize(s) for s in strings]
@rtype: C{list} of C{list} of C{str}
"""
return [self.tokenize(s) for s in strings]
def batch_span_tokenize(self, strings):
"""
Apply L{self.span_tokenize()} to each element of C{strings}. I.e.:
>>> return [self.span_tokenize(s) for s in strings]
@rtype: C{iter} of C{list} of C{tuple} of C{int}
"""
for s in strings:
yield list(self.span_tokenize(s))
class StringTokenizer(TokenizerI):
r"""
A tokenizer that divides a string into substrings by splitting
on the specified string (defined in subclasses).
"""
def tokenize(self, s):
return s.split(self._string)
def span_tokenize(self, s):
for span in string_span_tokenize(s, self._string):
yield span
| gpl-3.0 | 3,250,222,746,014,356,000 | 27 | 75 | 0.600094 | false |
koomik/CouchPotatoServer | couchpotato/core/media/_base/providers/torrent/torrentday.py | 1 | 3584 | from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
log = CPLog(__name__)
class Base(TorrentProvider):
urls = {
'test': 'http://www.td.af/',
'login': 'http://www.td.af/torrents/',
'login_check': 'http://www.torrentday.com/userdetails.php',
'detail': 'http://www.td.af/details.php?id=%s',
'search': 'http://www.td.af/V3/API/API.php',
'download': 'http://www.td.af/download.php/%s/%s',
}
http_time_between_calls = 1 # Seconds
def _search(self, media, quality, results):
query = self.buildUrl(media)
data = {
'/browse.php?': None,
'cata': 'yes',
'jxt': 8,
'jxw': 'b',
'search': query,
}
data = self.getJsonData(self.urls['search'], data = data)
try: torrents = data.get('Fs', [])[0].get('Cn', {}).get('torrents', [])
except: return
for torrent in torrents:
results.append({
'id': torrent['id'],
'name': torrent['name'],
'url': self.urls['download'] % (torrent['id'], torrent['fname']),
'detail_url': self.urls['detail'] % torrent['id'],
'size': self.parseSize(torrent.get('size')),
'seeders': tryInt(torrent.get('seed')),
'leechers': tryInt(torrent.get('leech')),
})
def getLoginParams(self):
return {
'username': self.conf('username'),
'password': self.conf('password'),
'submit.x': 18,
'submit.y': 11,
'submit': 'submit',
}
def loginSuccess(self, output):
return 'Password not correct' not in output
def loginCheckSuccess(self, output):
return 'logout.php' in output.lower()
config = [{
'name': 'torrentday',
'groups': [
{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'TorrentDay',
'description': 'See <a href="http://www.td.af/">TorrentDay</a>',
'wizard': True,
'options': [
{
'name': 'enabled',
'type': 'enabler',
'default': False,
},
{
'name': 'username',
'default': '',
},
{
'name': 'password',
'default': '',
'type': 'password',
},
{
'name': 'seed_ratio',
'label': 'Seed ratio',
'type': 'float',
'default': 1,
'description': 'Will not be (re)moved until this seed ratio is met.',
},
{
'name': 'seed_time',
'label': 'Seed time',
'type': 'int',
'default': 40,
'description': 'Will not be (re)moved until this seed time (in hours) is met.',
},
{
'name': 'extra_score',
'advanced': True,
'label': 'Extra Score',
'type': 'int',
'default': 0,
'description': 'Starting score for each release found via this provider.',
}
],
},
],
}]
| gpl-3.0 | -641,086,129,201,212,200 | 30.716814 | 99 | 0.429129 | false |
bitmovin/bitmovin-python | bitmovin/resources/models/encodings/muxings/information/progressive_ts_information.py | 1 | 4987 | from bitmovin.resources import Resource
from bitmovin.resources.models.encodings.muxings.information import ByteRange
from bitmovin.errors import InvalidTypeError
from bitmovin.utils.serialization import Serializable
from .muxing_information_video_track import MuxingInformationVideoTrack
from .muxing_information_audio_track import MuxingInformationAudioTrack
class ProgressiveTSInformation(Resource, Serializable):
def __init__(self, mime_type=None, file_size=None, container_format=None, container_bitrate=None, duration=None,
video_tracks=None, audio_tracks=None, byte_ranges=None):
super().__init__()
self.mime_type = mime_type
self.file_size = file_size
self.container_format = container_format
self.container_bitrate = container_bitrate
self.duration = duration
self._video_tracks = None
self._audio_tracks = None
self._byte_ranges = None
self.video_tracks = video_tracks
self.audio_tracks = audio_tracks
self.byte_ranges = byte_ranges
@classmethod
def parse_from_json_object(cls, json_object):
mime_type = json_object.get('mimeType')
file_size = json_object.get('fileSize')
container_format = json_object.get('containerFormat')
container_bitrate = json_object.get('containerBitrate')
duration = json_object.get('duration')
video_tracks = json_object.get('videoTracks')
audio_tracks = json_object.get('audioTracks')
byte_ranges = json_object.get('byteRanges')
progressive_ts_muxing_information = ProgressiveTSInformation(mime_type=mime_type,
file_size=file_size,
container_format=container_format,
container_bitrate=container_bitrate,
duration=duration,
video_tracks=video_tracks,
audio_tracks=audio_tracks,
byte_ranges=byte_ranges)
return progressive_ts_muxing_information
@property
def audio_tracks(self):
return self._audio_tracks
@audio_tracks.setter
def audio_tracks(self, new_audio_tracks):
if new_audio_tracks is None:
return
if not isinstance(new_audio_tracks, list):
raise InvalidTypeError('new_audio_tracks has to be a list of MuxingInformationAudioTrack objects')
if all(isinstance(audio_track, MuxingInformationAudioTrack) for audio_track in new_audio_tracks):
self._audio_tracks = new_audio_tracks
else:
audio_tracks = []
for json_object in new_audio_tracks:
audio_track = MuxingInformationAudioTrack.parse_from_json_object(json_object)
audio_tracks.append(audio_track)
self._audio_tracks = audio_tracks
@property
def video_tracks(self):
return self._video_tracks
@video_tracks.setter
def video_tracks(self, new_video_tracks):
if new_video_tracks is None:
return
if not isinstance(new_video_tracks, list):
raise InvalidTypeError('new_video_tracks has to be a list of MuxingInformationVideoTrack objects')
if all(isinstance(video_track, MuxingInformationVideoTrack) for video_track in new_video_tracks):
self._video_tracks = new_video_tracks
else:
video_tracks = []
for json_object in new_video_tracks:
video_track = MuxingInformationVideoTrack.parse_from_json_object(json_object)
video_tracks.append(video_track)
self._video_tracks = video_tracks
@property
def byte_ranges(self):
return self._byte_ranges
@byte_ranges.setter
def byte_ranges(self, new_value):
if new_value is None:
return
if not isinstance(new_value, list):
raise InvalidTypeError('byte_ranges has to be a list of ByteRange instances')
if all(isinstance(output, ByteRange) for output in new_value):
byte_ranges = []
for item in new_value:
byte_ranges.append(item)
self._byte_ranges = byte_ranges
else:
byte_ranges = []
for item in new_value:
byte_ranges.append(ByteRange.parse_from_json_object(item))
self._byte_ranges = byte_ranges
def serialize(self):
serialized = super().serialize()
serialized['videoTracks'] = self.video_tracks
serialized['audioTracks'] = self.audio_tracks
serialized['byteRanges'] = self.byte_ranges
return serialized
| unlicense | -5,852,554,402,090,742,000 | 39.544715 | 116 | 0.594947 | false |
bosichong/17python.com | thread/threadlock.py | 1 | 2692 | #codeing=utf-8
# @Time : 2017-10.04
# @Author : J.sky
# @Mail : [email protected]
# @Site : www.17python.com
# @Title : Python多线程编程(2)多线程锁 threading.Lock
# @Url : http://www.17python.com/blog/33
# @Details : Python多线程编程(2)多线程锁 threading.Lock
# @Other : OS X 10.11.6
# Python 3.6.1
# VSCode 1.15.1
###################################
# Python多线程编程(2)多线程锁 threading.Lock
###################################
'''
在多线程任务中,线程对数据的操作是随机的,这个先后次序无法预测,如果利用多线程修改唯一数据,由于对数据操作的随机性,必会影响到数据结果的准确性,所以在多线程的任务的编码中,我们必须使用线程锁。
## Python的多线程锁 threading.Lock
通过下边的例子,我们来看看多线程锁的重要性,定义两个数据,同时利用多线程对其+ -相同的数值,
如果操作次序是正常的,一加一减,那个数据应该是没有变化的,但是因为多线程操作没有加锁时对数据的操作是随机争抢资源的,
多线程操作时会发生,多加或是多减的结果,我们看下边的例子:
'''
import threading
data = 0
lock_data = 0
lock = threading.Lock()#创建一把线程锁
lock.acquire()
lock.release()
def change_d(n):
'''修改无锁数据的函数'''
global data
data += n
data -= n
def change_l_d(n):
'''修改有锁数据的函数'''
global lock_data
lock_data += n
lock_data -= n
def myfun(n):
for i in range(500000):
change_d(n)
#lock.acquire()
#change_l_d(n)
#lock.release()
#与下边的with语句处相同
with lock:
change_l_d(n)
def main():
threads = []
k = 5
for i in range(k):
t = threading.Thread(target=myfun, args=(10,))
threads.append(t)
for i in range(k):
threads[i].start()
for i in range(k):
threads[i].join()
print("无锁数据最终结果=={0}".format(data))
print("有锁数据最终结果=={0}".format(lock_data))
if __name__ == '__main__':
main()
'''
多次运行后我们会发现,无锁数据的最终结果会出现不同,因为可以证明,无锁的时候多线程操作是随机性的。
所以在多线程操作中,如果存在多线程操作唯一数据时,一定要加锁保证每次只有一个线程对基进行操作。
除了对多唯一数据进行加锁这种方法以外,在`Python`中还可以使用信号量或是事件对线程进行控制,但笔者认为,还是使用Lock对象比较方便。
''' | apache-2.0 | 8,619,549,271,112,287,000 | 21.21519 | 96 | 0.607184 | false |
gaetano-guerriero/eyeD3-debian | src/eyed3/utils/console.py | 1 | 18555 | # -*- coding: utf-8 -*-
from __future__ import print_function
import os
import struct
import sys
import time
from . import formatSize, formatTime
from .. import LOCAL_ENCODING, compat
from .log import log
try:
import fcntl
import termios
import signal
_CAN_RESIZE_TERMINAL = True
except ImportError:
_CAN_RESIZE_TERMINAL = False
class AnsiCodes(object):
_USE_ANSI = False
_CSI = '\033['
def __init__(self, codes):
def code_to_chars(code):
return AnsiCodes._CSI + str(code) + 'm'
for name in dir(codes):
if not name.startswith('_'):
value = getattr(codes, name)
setattr(self, name, code_to_chars(value))
# Add color function
for reset_name in ("RESET_%s" % name, "RESET"):
if hasattr(codes, reset_name):
reset_value = getattr(codes, reset_name)
setattr(self, "%s" % name.lower(),
AnsiCodes._mkfunc(code_to_chars(value),
code_to_chars(reset_value)))
break
@staticmethod
def _mkfunc(color, reset):
def _cwrap(text, *styles):
if not AnsiCodes._USE_ANSI:
return text
s = u''
for st in styles:
s += st
s += color + text + reset
if styles:
s += Style.RESET_ALL
return s
return _cwrap
def __getattribute__(self, name):
attr = super(AnsiCodes, self).__getattribute__(name)
if (hasattr(attr, "startswith") and
attr.startswith(AnsiCodes._CSI) and
not AnsiCodes._USE_ANSI):
return ""
else:
return attr
def __getitem__(self, name):
return getattr(self, name.upper())
@classmethod
def init(cls, allow_colors):
cls._USE_ANSI = allow_colors and cls._term_supports_color()
@staticmethod
def _term_supports_color():
if (os.environ.get("TERM") == "dumb" or
os.environ.get("OS") == "Windows_NT"):
return False
return hasattr(sys.stdout, "isatty") and sys.stdout.isatty()
class AnsiFore:
GREY = 30 # noqa
RED = 31 # noqa
GREEN = 32 # noqa
YELLOW = 33 # noqa
BLUE = 34 # noqa
MAGENTA = 35 # noqa
CYAN = 36 # noqa
WHITE = 37 # noqa
RESET = 39 # noqa
class AnsiBack:
GREY = 40 # noqa
RED = 41 # noqa
GREEN = 42 # noqa
YELLOW = 43 # noqa
BLUE = 44 # noqa
MAGENTA = 45 # noqa
CYAN = 46 # noqa
WHITE = 47 # noqa
RESET = 49 # noqa
class AnsiStyle:
RESET_ALL = 0 # noqa
BRIGHT = 1 # noqa
RESET_BRIGHT = 22 # noqa
DIM = 2 # noqa
RESET_DIM = RESET_BRIGHT # noqa
ITALICS = 3 # noqa
RESET_ITALICS = 23 # noqa
UNDERLINE = 4 # noqa
RESET_UNDERLINE = 24 # noqa
BLINK_SLOW = 5 # noqa
RESET_BLINK_SLOW = 25 # noqa
BLINK_FAST = 6 # noqa
RESET_BLINK_FAST = 26 # noqa
INVERSE = 7 # noqa
RESET_INVERSE = 27 # noqa
STRIKE_THRU = 9 # noqa
RESET_STRIKE_THRU = 29 # noqa
Fore = AnsiCodes(AnsiFore)
Back = AnsiCodes(AnsiBack)
Style = AnsiCodes(AnsiStyle)
def ERROR_COLOR():
return Fore.RED
def WARNING_COLOR():
return Fore.YELLOW
def HEADER_COLOR():
return Fore.GREEN
class Spinner(object):
"""
A class to display a spinner in the terminal.
It is designed to be used with the `with` statement::
with Spinner("Reticulating splines", "green") as s:
for item in enumerate(items):
s.next()
"""
_default_unicode_chars = u"◓◑◒◐"
_default_ascii_chars = u"-/|\\"
def __init__(self, msg, file=None, step=1,
chars=None, use_unicode=True, print_done=True):
self._msg = msg
self._file = file or sys.stdout
self._step = step
if not chars:
if use_unicode:
chars = self._default_unicode_chars
else:
chars = self._default_ascii_chars
self._chars = chars
self._silent = not self._file.isatty()
self._print_done = print_done
def _iterator(self):
chars = self._chars
index = 0
write = self._file.write
flush = self._file.flush
while True:
write(u'\r')
write(self._msg)
write(u' ')
write(chars[index])
flush()
yield
for i in range(self._step):
yield
index += 1
if index == len(chars):
index = 0
def __enter__(self):
if self._silent:
return self._silent_iterator()
else:
return self._iterator()
def __exit__(self, exc_type, exc_value, traceback):
write = self._file.write
flush = self._file.flush
if not self._silent:
write(u'\r')
write(self._msg)
if self._print_done:
if exc_type is None:
write(Fore.GREEN + u' [Done]\n')
else:
write(Fore.RED + u' [Failed]\n')
else:
write("\n")
flush()
def _silent_iterator(self):
self._file.write(self._msg)
self._file.flush()
while True:
yield
class ProgressBar(object):
"""
A class to display a progress bar in the terminal.
It is designed to be used either with the `with` statement::
with ProgressBar(len(items)) as bar:
for item in enumerate(items):
bar.update()
or as a generator::
for item in ProgressBar(items):
item.process()
"""
def __init__(self, total_or_items, file=None):
"""
total_or_items : int or sequence
If an int, the number of increments in the process being
tracked. If a sequence, the items to iterate over.
file : writable file-like object, optional
The file to write the progress bar to. Defaults to
`sys.stdout`. If `file` is not a tty (as determined by
calling its `isatty` member, if any), the scrollbar will
be completely silent.
"""
self._file = file or sys.stdout
if not self._file.isatty():
self.update = self._silent_update
self._silent = True
else:
self._silent = False
try:
self._items = iter(total_or_items)
self._total = len(total_or_items)
except TypeError:
try:
self._total = int(total_or_items)
self._items = iter(range(self._total))
except TypeError:
raise TypeError("First argument must be int or sequence")
self._start_time = time.time()
self._should_handle_resize = (
_CAN_RESIZE_TERMINAL and self._file.isatty())
self._handle_resize()
if self._should_handle_resize:
signal.signal(signal.SIGWINCH, self._handle_resize)
self._signal_set = True
else:
self._signal_set = False
self.update(0)
def _handle_resize(self, signum=None, frame=None):
self._terminal_width = getTtySize(self._file,
self._should_handle_resize)[1]
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if not self._silent:
if exc_type is None:
self.update(self._total)
self._file.write('\n')
self._file.flush()
if self._signal_set:
signal.signal(signal.SIGWINCH, signal.SIG_DFL)
def __iter__(self):
return self
def next(self):
try:
rv = next(self._items)
except StopIteration:
self.__exit__(None, None, None)
raise
else:
self.update()
return rv
def update(self, value=None):
"""
Update the progress bar to the given value (out of the total
given to the constructor).
"""
if value is None:
value = self._current_value = self._current_value + 1
else:
self._current_value = value
if self._total == 0:
frac = 1.0
else:
frac = float(value) / float(self._total)
file = self._file
write = file.write
suffix = self._formatSuffix(value, frac)
self._bar_length = self._terminal_width - 37
bar_fill = int(float(self._bar_length) * frac)
write(u'\r|')
write(Fore.BLUE + u'=' * bar_fill + Fore.RESET)
if bar_fill < self._bar_length:
write(Fore.GREEN + u'>' + Fore.RESET)
write(u'-' * (self._bar_length - bar_fill - 1))
write(u'|')
write(suffix)
self._file.flush()
def _formatSuffix(self, value, frac):
if value >= self._total:
t = time.time() - self._start_time
time_str = ' '
elif value <= 0:
t = None
time_str = ''
else:
t = ((time.time() - self._start_time) * (1.0 - frac)) / frac
time_str = u' ETA '
if t is not None:
time_str += formatTime(t, short=True)
suffix = ' {0:>4s}/{1:>4s}'.format(formatSize(value, short=True),
formatSize(self._total, short=True))
suffix += u' ({0:>6s}%)'.format(u'{0:.2f}'.format(frac * 100.0))
suffix += time_str
return suffix
def _silent_update(self, value=None):
pass
@classmethod
def map(cls, function, items, multiprocess=False, file=None):
"""
Does a `map` operation while displaying a progress bar with
percentage complete.
::
def work(i):
print(i)
ProgressBar.map(work, range(50))
Parameters:
function : function
Function to call for each step
items : sequence
Sequence where each element is a tuple of arguments to pass to
*function*.
multiprocess : bool, optional
If `True`, use the `multiprocessing` module to distribute each
task to a different processor core.
file : writeable file-like object, optional
The file to write the progress bar to. Defaults to
`sys.stdout`. If `file` is not a tty (as determined by
calling its `isatty` member, if any), the scrollbar will
be completely silent.
"""
results = []
if file is None:
file = sys.stdout
with cls(len(items), file=file) as bar:
step_size = max(200, bar._bar_length)
steps = max(int(float(len(items)) / step_size), 1)
if not multiprocess:
for i, item in enumerate(items):
function(item)
if (i % steps) == 0:
bar.update(i)
else:
import multiprocessing
p = multiprocessing.Pool()
for i, result in enumerate(p.imap_unordered(function, items,
steps)):
bar.update(i)
results.append(result)
return results
def _encode(s):
'''This is a helper for output of unicode. With Python2 it is necessary to
do encoding to the LOCAL_ENCODING since by default unicode will be encoded
to ascii. In python3 this conversion is not necessary for the user to
to perform; in fact sys.std*.write, for example, requires unicode strings
be passed in. This function will encode for python2 and do nothing
for python3 (except assert that ``s`` is a unicode type).'''
if compat.PY2:
if isinstance(s, compat.unicode):
try:
return s.encode(LOCAL_ENCODING)
except Exception as ex:
log.error("Encoding error: " + str(ex))
return s.encode(LOCAL_ENCODING, "replace")
elif isinstance(s, str):
return s
else:
raise TypeError("Argument must be str or unicode")
else:
assert(isinstance(s, str))
return s
def printMsg(s):
fp = sys.stdout
s = _encode(s)
try:
fp.write("%s\n" % s)
except UnicodeEncodeError:
fp.write("%s\n" % compat.unicode(s.encode("utf-8", "replace"), "utf-8"))
fp.flush()
def printError(s):
_printWithColor(s, ERROR_COLOR(), sys.stderr)
def printWarning(s):
_printWithColor(s, WARNING_COLOR(), sys.stdout)
def printHeader(s):
_printWithColor(s, HEADER_COLOR(), sys.stdout)
def boldText(s, c=None):
return formatText(s, b=True, c=c)
def formatText(s, b=False, c=None):
return ((Style.BRIGHT if b else '') +
(c or '') +
s +
(Fore.RESET if c else '') +
(Style.RESET_BRIGHT if b else ''))
def _printWithColor(s, color, file):
s = _encode(s)
file.write(color + s + Fore.RESET + '\n')
file.flush()
def cformat(msg, fg, bg=None, styles=None):
'''Format ``msg`` with foreground and optional background. Optional
``styles`` lists will also be applied. The formatted string is returned.'''
fg = fg or ""
bg = bg or ""
styles = "".join(styles or [])
reset = Fore.RESET + Back.RESET + Style.RESET_ALL if (fg or bg or styles) \
else ""
output = u"%(fg)s%(bg)s%(styles)s%(msg)s%(reset)s" % locals()
return output
def getTtySize(fd=sys.stdout, check_tty=True):
hw = None
if check_tty:
try:
data = fcntl.ioctl(fd, termios.TIOCGWINSZ, '\0' * 4)
hw = struct.unpack("hh", data)
except (OSError, IOError, NameError):
pass
if not hw:
try:
hw = (int(os.environ.get('LINES')),
int(os.environ.get('COLUMNS')))
except (TypeError, ValueError):
hw = (78, 25)
return hw
def cprint(msg, fg, bg=None, styles=None, file=sys.stdout):
'''Calls ``cformat`` and prints the result to output stream ``file``.'''
print(cformat(msg, fg, bg=bg, styles=styles), file=file)
if __name__ == "__main__":
AnsiCodes.init(True)
def checkCode(c):
return (c[0] != '_' and
"RESET" not in c and
c[0] == c[0].upper()
)
for bg_name, bg_code in ((c, getattr(Back, c))
for c in dir(Back) if checkCode(c)):
sys.stdout.write('%s%-7s%s %s ' %
(bg_code, bg_name, Back.RESET, bg_code))
for fg_name, fg_code in ((c, getattr(Fore, c))
for c in dir(Fore) if checkCode(c)):
sys.stdout.write(fg_code)
for st_name, st_code in ((c, getattr(Style, c))
for c in dir(Style) if checkCode(c)):
sys.stdout.write('%s%s %s %s' %
(st_code, st_name,
getattr(Style, "RESET_%s" % st_name),
bg_code))
sys.stdout.write("%s\n" % Style.RESET_ALL)
sys.stdout.write("\n")
with Spinner(Fore.GREEN + u"Phase #1") as spinner:
for i in range(50):
time.sleep(.05)
spinner.next()
with Spinner(Fore.RED + u"Phase #2" + Fore.RESET,
print_done=False) as spinner:
for i in range(50):
time.sleep(.05)
spinner.next()
with Spinner(u"Phase #3", print_done=False, use_unicode=False) as spinner:
for i in range(50):
spinner.next()
time.sleep(.05)
with Spinner(u"Phase #4", print_done=False, chars='.oO°Oo.') as spinner:
for i in range(50):
spinner.next()
time.sleep(.05)
items = range(200)
with ProgressBar(len(items)) as bar:
for item in enumerate(items):
bar.update()
time.sleep(.05)
for item in ProgressBar(items):
time.sleep(.05)
progress = 0
max = 320000000
with ProgressBar(max) as bar:
while progress < max:
progress += 23400
bar.update(progress)
time.sleep(.001)
| gpl-3.0 | 7,526,668,568,266,435,000 | 31.031088 | 80 | 0.463496 | false |
NicoSantangelo/sublime-gulp | base_command.py | 1 | 5042 | import sublime
import sublime_plugin
import os.path
is_sublime_text_3 = int(sublime.version()) >= 3000
if is_sublime_text_3:
from .settings import Settings
from .status_bar import StatusBar
from .insert_in_output_view import insert_in_output_view
from .timeout import set_timeout, defer_sync
else:
from settings import Settings
from status_bar import StatusBar
from insert_in_output_view import insert_in_output_view
from timeout import set_timeout, defer_sync
#
# A base for each command
#
class BaseCommand(sublime_plugin.WindowCommand):
def run(self, task_name=None, task_flag=None, silent=False, paths=[]):
self.settings = None
self.setup_data_from_settings()
self.task_name = task_name
self.task_flag = task_flag if task_name is not None and task_flag is not None else self.get_flag_from_task_name()
self.silent = silent
self._working_dir = ""
self.searchable_folders = [os.path.dirname(path) for path in paths] if len(paths) > 0 else self.window.folders()
self.output_view = None
self.status_bar = StatusBar(self.window)
self.work()
def setup_data_from_settings(self):
Settings.gather_shared_data()
self.settings = Settings()
self.results_in_new_tab = self.settings.get("results_in_new_tab", False)
self.check_for_gulpfile = self.settings.get('check_for_gulpfile', True)
def get_flag_from_task_name(self):
flags = self.settings.get("flags", {})
return flags[self.task_name] if self.task_name in flags else ""
# Properties
@property
def working_dir(self):
return self._working_dir
@working_dir.setter
def working_dir(self, value):
if self.check_for_gulpfile:
self._working_dir = os.path.dirname(value)
else:
self._working_dir = value
# Main method, override
def work(self):
pass
# Panels and message
def show_quick_panel(self, items, on_done=None, font=sublime.MONOSPACE_FONT):
defer_sync(lambda: self.window.show_quick_panel(items, on_done, font))
def show_input_panel(self, caption, initial_text="", on_done=None, on_change=None, on_cancel=None):
self.window.show_input_panel(caption, initial_text, on_done, on_change, on_cancel)
def status_message(self, text):
sublime.status_message("%s: %s" % (Settings.PACKAGE_NAME, text))
def error_message(self, text):
sublime.error_message("%s: %s" % (Settings.PACKAGE_NAME, text))
# Output view
def show_output_panel(self, text):
if self.silent:
self.status_message(text)
return
if self.results_in_new_tab:
new_tab_path = os.path.join(self.gulp_results_path(), "Gulp Results")
self.output_view = self.window.open_file(new_tab_path)
self.output_view.set_scratch(True)
else:
self.output_view = self.window.get_output_panel("gulp_output")
self.show_panel()
self.output_view.settings().set("scroll_past_end", False)
self.add_syntax()
self.append_to_output_view(text)
def gulp_results_path(self):
return next(folder_path for folder_path in self.searchable_folders if self.working_dir.find(folder_path) != -1) if self.working_dir else ""
def gulp_results_view(self):
if self.output_view is None:
gulp_results = [view for view in sublime.active_window().views() if view.file_name() and os.path.basename(view.file_name()) == "Gulp Results"]
return gulp_results[0] if len(gulp_results) > 0 else None
else:
return self.output_view
def add_syntax(self):
if self.settings.has("syntax_override"):
syntax_file = self.settings.get("syntax_override")
else:
syntax_file = self.settings.get_from_user_settings("syntax", "Packages/Gulp/syntax/GulpResults.tmLanguage")
if syntax_file:
self.output_view.set_syntax_file(syntax_file)
def append_to_output_view_in_main_thread(self, text):
defer_sync(lambda: self.append_to_output_view(text))
def append_to_output_view(self, text):
if not self.silent:
insert_in_output_view(self.output_view, text, self.results_in_new_tab)
def set_output_close_on_timeout(self):
timeout = self.settings.get("results_autoclose_timeout_in_milliseconds", False)
if timeout:
set_timeout(self.close_panel, timeout)
def close_panel(self):
if self.results_in_new_tab:
self.output_view = self.gulp_results_view()
if self.output_view and self.output_view.file_name():
self.window.focus_view(self.output_view)
self.window.run_command('close_file')
else:
self.window.run_command("hide_panel", { "panel": "output.gulp_output" })
def show_panel(self):
self.window.run_command("show_panel", { "panel": "output.gulp_output" })
| mit | 4,268,979,282,055,752,000 | 36.626866 | 154 | 0.639825 | false |
loonycyborg/scons-plusplus | python_modules/Tool/lex.py | 1 | 4870 | """SCons.Tool.lex
Tool-specific initialization for lex.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2019 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/lex.py bee7caf9defd6e108fc2998a2520ddb36a967691 2019-12-17 02:07:09 bdeegan"
import os.path
import sys
import SCons.Action
import SCons.Tool
import SCons.Util
from SCons.Platform.mingw import MINGW_DEFAULT_PATHS
from SCons.Platform.cygwin import CYGWIN_DEFAULT_PATHS
from SCons.Platform.win32 import CHOCO_DEFAULT_PATH
LexAction = SCons.Action.Action("$LEXCOM", "$LEXCOMSTR")
if sys.platform == 'win32':
BINS = ['flex', 'lex', 'win_flex']
else:
BINS = ["flex", "lex"]
def lexEmitter(target, source, env):
sourceBase, sourceExt = os.path.splitext(SCons.Util.to_String(source[0]))
if sourceExt == ".lm": # If using Objective-C
target = [sourceBase + ".m"] # the extension is ".m".
# This emitter essentially tries to add to the target all extra
# files generated by flex.
# Different options that are used to trigger the creation of extra files.
fileGenOptions = ["--header-file=", "--tables-file="]
lexflags = env.subst("$LEXFLAGS", target=target, source=source)
for option in SCons.Util.CLVar(lexflags):
for fileGenOption in fileGenOptions:
l = len(fileGenOption)
if option[:l] == fileGenOption:
# A file generating option is present, so add the
# file name to the target list.
fileName = option[l:].strip()
target.append(fileName)
return (target, source)
def get_lex_path(env, append_paths=False):
"""
Find the path to the lex tool, searching several possible names
Only called in the Windows case, so the default_path
can be Windows-specific
:param env: current construction environment
:param append_paths: if set, add the path to the tool to PATH
:return: path to lex tool, if found
"""
for prog in BINS:
bin_path = SCons.Tool.find_program_path(
env,
prog,
default_paths=CHOCO_DEFAULT_PATH + MINGW_DEFAULT_PATHS + CYGWIN_DEFAULT_PATHS )
if bin_path:
if append_paths:
env.AppendENVPath('PATH', os.path.dirname(bin_path))
return bin_path
SCons.Warnings.Warning('lex tool requested, but lex or flex binary not found in ENV PATH')
def generate(env):
"""Add Builders and construction variables for lex to an Environment."""
c_file, cxx_file = SCons.Tool.createCFileBuilders(env)
# C
c_file.add_action(".l", LexAction)
c_file.add_emitter(".l", lexEmitter)
c_file.add_action(".lex", LexAction)
c_file.add_emitter(".lex", lexEmitter)
# Objective-C
cxx_file.add_action(".lm", LexAction)
cxx_file.add_emitter(".lm", lexEmitter)
# C++
cxx_file.add_action(".ll", LexAction)
cxx_file.add_emitter(".ll", lexEmitter)
env["LEXFLAGS"] = SCons.Util.CLVar("")
if sys.platform == 'win32':
# ignore the return - we do not need the full path here
_ = get_lex_path(env, append_paths=True)
env["LEX"] = env.Detect(BINS)
if not env.get("LEXUNISTD"):
env["LEXUNISTD"] = SCons.Util.CLVar("")
env["LEXCOM"] = "$LEX $LEXUNISTD $LEXFLAGS -t $SOURCES > $TARGET"
else:
env["LEX"] = env.Detect(BINS)
env["LEXCOM"] = "$LEX $LEXFLAGS -t $SOURCES > $TARGET"
def exists(env):
if sys.platform == 'win32':
return get_lex_path(env)
else:
return env.Detect(BINS)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-3.0 | 1,416,306,155,329,501,000 | 33.539007 | 114 | 0.674127 | false |
attm2x/m2x-python | m2x/tests/test_utils.py | 1 | 3024 | from datetime import datetime
from iso8601 import iso8601
from m2x import utils
class TestUtils(object):
def test_to_utc(self):
dtime = datetime.now()
utc_dtime = utils.to_utc(dtime)
assert utc_dtime.tzinfo == iso8601.UTC
def test_to_iso(self):
dtime = iso8601.parse_date('2015-04-15 12:00:00+0300')
iso_time = utils.to_iso(dtime)
assert iso_time == '2015-04-15T15:00:00.000000Z'
dtime = '2015-04-15 12:00:00'
iso_time = utils.to_iso(dtime)
assert iso_time == '2015-04-15T15:00:00.000000Z'
def test_tags_to_server(self):
tags = utils.tags_to_server(['foo', 'bar'])
assert tags == 'foo,bar'
tags = utils.tags_to_server(['foo'])
assert tags == 'foo'
tags = utils.tags_to_server('foo')
assert tags == 'foo'
tags = utils.tags_to_server(None)
assert tags == ''
tags = utils.tags_to_server([None])
assert tags == ''
tags = utils.tags_to_server([''])
assert tags == ''
def test_from_server(self):
out = utils.from_server('tags', 'foo,bar')
assert out == ['foo', 'bar']
out = utils.from_server('timestamp', '2015-04-15T15:00:00.000000Z')
assert out.year == 2015 and out.month == 4 and out.day == 15
assert out.hour == 15 and out.minute == 0 and out.second == 0
assert out.tzinfo == iso8601.UTC
out = utils.from_server('ignored', 'just a string')
assert out == 'just a string'
out = utils.from_server('ignored', 123)
assert out == 123
def test_to_server(self):
out = utils.to_server('tags', ['foo', 'bar'])
assert out == 'foo,bar'
dtime = iso8601.parse_date('2015-04-15 12:00:00+0300')
out = utils.to_server('timestamp', dtime)
assert out == '2015-04-15T15:00:00.000000Z'
out = utils.to_server('ignored', 'just a string')
assert out == 'just a string'
out = utils.to_server('ignored', 123)
assert out == 123
def test_attrs_from_server(self):
values = {'tags': 'foo,bar',
'timestamp': '2015-04-15 12:00:00+0300',
'ignored1': 'just a string',
'ignored2': 123}
out = utils.attrs_from_server(values)
assert out['tags'] == ['foo', 'bar']
assert out['timestamp'] == iso8601.parse_date(
'2015-04-15 12:00:00+0300'
)
assert out['ignored1'] == 'just a string'
assert out['ignored2'] == 123
def test_attrs_to_server(self):
values = {'tags': ['foo', 'bar'],
'timestamp': iso8601.parse_date('2015-04-15 12:00:00+0300'),
'ignored1': 'just a string',
'ignored2': 123}
out = utils.attrs_to_server(values)
assert out['tags'] == 'foo,bar'
assert out['timestamp'] == '2015-04-15T15:00:00.000000Z'
assert out['ignored1'] == 'just a string'
assert out['ignored2'] == 123
| mit | -6,358,644,146,657,331,000 | 36.333333 | 78 | 0.549934 | false |
lmazuel/azure-sdk-for-python | azure-mgmt-compute/azure/mgmt/compute/v2017_12_01/models/virtual_machine_scale_set_vm.py | 1 | 7939 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class VirtualMachineScaleSetVM(Resource):
"""Describes a virtual machine scale set virtual machine.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id
:vartype id: str
:ivar name: Resource name
:vartype name: str
:ivar type: Resource type
:vartype type: str
:param location: Required. Resource location
:type location: str
:param tags: Resource tags
:type tags: dict[str, str]
:ivar instance_id: The virtual machine instance ID.
:vartype instance_id: str
:ivar sku: The virtual machine SKU.
:vartype sku: ~azure.mgmt.compute.v2017_12_01.models.Sku
:ivar latest_model_applied: Specifies whether the latest model has been
applied to the virtual machine.
:vartype latest_model_applied: bool
:ivar vm_id: Azure VM unique ID.
:vartype vm_id: str
:ivar instance_view: The virtual machine instance view.
:vartype instance_view:
~azure.mgmt.compute.v2017_12_01.models.VirtualMachineInstanceView
:param hardware_profile: Specifies the hardware settings for the virtual
machine.
:type hardware_profile:
~azure.mgmt.compute.v2017_12_01.models.HardwareProfile
:param storage_profile: Specifies the storage settings for the virtual
machine disks.
:type storage_profile:
~azure.mgmt.compute.v2017_12_01.models.StorageProfile
:param os_profile: Specifies the operating system settings for the virtual
machine.
:type os_profile: ~azure.mgmt.compute.v2017_12_01.models.OSProfile
:param network_profile: Specifies the network interfaces of the virtual
machine.
:type network_profile:
~azure.mgmt.compute.v2017_12_01.models.NetworkProfile
:param diagnostics_profile: Specifies the boot diagnostic settings state.
<br><br>Minimum api-version: 2015-06-15.
:type diagnostics_profile:
~azure.mgmt.compute.v2017_12_01.models.DiagnosticsProfile
:param availability_set: Specifies information about the availability set
that the virtual machine should be assigned to. Virtual machines specified
in the same availability set are allocated to different nodes to maximize
availability. For more information about availability sets, see [Manage
the availability of virtual
machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-manage-availability?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).
<br><br> For more information on Azure planned maintainance, see [Planned
maintenance for virtual machines in
Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-planned-maintenance?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)
<br><br> Currently, a VM can only be added to availability set at creation
time. An existing VM cannot be added to an availability set.
:type availability_set: ~azure.mgmt.compute.v2017_12_01.models.SubResource
:ivar provisioning_state: The provisioning state, which only appears in
the response.
:vartype provisioning_state: str
:param license_type: Specifies that the image or disk that is being used
was licensed on-premises. This element is only used for images that
contain the Windows Server operating system. <br><br> Possible values are:
<br><br> Windows_Client <br><br> Windows_Server <br><br> If this element
is included in a request for an update, the value must match the initial
value. This value cannot be updated. <br><br> For more information, see
[Azure Hybrid Use Benefit for Windows
Server](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-hybrid-use-benefit-licensing?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)
<br><br> Minimum api-version: 2015-06-15
:type license_type: str
:param plan: Specifies information about the marketplace image used to
create the virtual machine. This element is only used for marketplace
images. Before you can use a marketplace image from an API, you must
enable the image for programmatic use. In the Azure portal, find the
marketplace image that you want to use and then click **Want to deploy
programmatically, Get Started ->**. Enter any required information and
then click **Save**.
:type plan: ~azure.mgmt.compute.v2017_12_01.models.Plan
:ivar resources: The virtual machine child extension resources.
:vartype resources:
list[~azure.mgmt.compute.v2017_12_01.models.VirtualMachineExtension]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'instance_id': {'readonly': True},
'sku': {'readonly': True},
'latest_model_applied': {'readonly': True},
'vm_id': {'readonly': True},
'instance_view': {'readonly': True},
'provisioning_state': {'readonly': True},
'resources': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'instance_id': {'key': 'instanceId', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'Sku'},
'latest_model_applied': {'key': 'properties.latestModelApplied', 'type': 'bool'},
'vm_id': {'key': 'properties.vmId', 'type': 'str'},
'instance_view': {'key': 'properties.instanceView', 'type': 'VirtualMachineInstanceView'},
'hardware_profile': {'key': 'properties.hardwareProfile', 'type': 'HardwareProfile'},
'storage_profile': {'key': 'properties.storageProfile', 'type': 'StorageProfile'},
'os_profile': {'key': 'properties.osProfile', 'type': 'OSProfile'},
'network_profile': {'key': 'properties.networkProfile', 'type': 'NetworkProfile'},
'diagnostics_profile': {'key': 'properties.diagnosticsProfile', 'type': 'DiagnosticsProfile'},
'availability_set': {'key': 'properties.availabilitySet', 'type': 'SubResource'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'license_type': {'key': 'properties.licenseType', 'type': 'str'},
'plan': {'key': 'plan', 'type': 'Plan'},
'resources': {'key': 'resources', 'type': '[VirtualMachineExtension]'},
}
def __init__(self, **kwargs):
super(VirtualMachineScaleSetVM, self).__init__(**kwargs)
self.instance_id = None
self.sku = None
self.latest_model_applied = None
self.vm_id = None
self.instance_view = None
self.hardware_profile = kwargs.get('hardware_profile', None)
self.storage_profile = kwargs.get('storage_profile', None)
self.os_profile = kwargs.get('os_profile', None)
self.network_profile = kwargs.get('network_profile', None)
self.diagnostics_profile = kwargs.get('diagnostics_profile', None)
self.availability_set = kwargs.get('availability_set', None)
self.provisioning_state = None
self.license_type = kwargs.get('license_type', None)
self.plan = kwargs.get('plan', None)
self.resources = None
| mit | 385,435,582,843,014,660 | 50.219355 | 170 | 0.666835 | false |
ryansb/tremendous | tremendous/colors.py | 1 | 12059 | from functools import partial
from tremendous.api import (
apply_color,
apply_256,
apply_256_bg,
apply_256_hl,
)
from tremendous.bindings import lib as __lib
from tremendous.bindings import ffi
colors_16 = dict(
bold=__lib.BOLD,
italic=__lib.ITALIC,
under=__lib.UNDER,
under2=__lib.UNDER2,
strike=__lib.STRIKE,
blink=__lib.BLINK,
flip=__lib.FLIP,
black=__lib.BLACK,
red=__lib.RED,
green=__lib.GREEN,
yellow=__lib.YELLOW,
blue=__lib.BLUE,
magenta=__lib.MAGENTA,
cyan=__lib.CYAN,
white=__lib.WHITE,
hblack=__lib.HBLACK,
hred=__lib.HRED,
hgreen=__lib.HGREEN,
hyellow=__lib.HYELLOW,
hblue=__lib.HBLUE,
hmagenta=__lib.HMAGENTA,
hcyan=__lib.HCYAN,
hwhite=__lib.HWHITE,
bgblack=__lib.BGBLACK,
bgred=__lib.BGRED,
bggreen=__lib.BGGREEN,
bgyellow=__lib.BGYELLOW,
bgblue=__lib.BGBLUE,
bgmagenta=__lib.BGMAGENTA,
bgcyan=__lib.BGCYAN,
bgwhite=__lib.BGWHITE,
)
__funcs = {}
# This is also gross. Sorry.
for k, v in colors_16.items():
if k.startswith('h'):
__funcs['highlight_' + k[1:]] = partial(apply_color, v)
__funcs['hi_' + k[1:]] = partial(apply_color, v)
__funcs['hl_' + k[1:]] = partial(apply_color, v)
elif k.startswith('bg'):
__funcs['background_' + k[1:]] = partial(apply_color, v)
__funcs['bg_' + k[2:]] = partial(apply_color, v)
elif k.startswith('under'):
__funcs[k] = partial(apply_color, v)
__funcs['underline' + k[5:]] = partial(apply_color, v)
else:
__funcs[k] = partial(apply_color, v)
extended_colors = {
'Grey0': [0, 0, 0],
'NavyBlue': [0, 0, 95],
'DarkBlue': [0, 0, 135],
'Blue3': [0, 0, 175],
'Blue3': [0, 0, 215],
'Blue1': [0, 0, 255],
'DarkGreen': [0, 95, 0],
'DeepSkyBlue4': [0, 95, 95],
'DeepSkyBlue4': [0, 95, 135],
'DeepSkyBlue4': [0, 95, 175],
'DodgerBlue3': [0, 95, 215],
'DodgerBlue2': [0, 95, 255],
'Green4': [0, 135, 0],
'SpringGreen4': [0, 135, 95],
'Turquoise4': [0, 135, 135],
'DeepSkyBlue3': [0, 135, 175],
'DeepSkyBlue3': [0, 135, 215],
'DodgerBlue1': [0, 135, 255],
'Green3': [0, 175, 0],
'SpringGreen3': [0, 175, 95],
'DarkCyan': [0, 175, 135],
'LightSeaGreen': [0, 175, 175],
'DeepSkyBlue2': [0, 175, 215],
'DeepSkyBlue1': [0, 175, 255],
'Green3': [0, 215, 0],
'SpringGreen3': [0, 215, 95],
'SpringGreen2': [0, 215, 135],
'Cyan3': [0, 215, 175],
'DarkTurquoise': [0, 215, 215],
'Turquoise2': [0, 215, 255],
'Green1': [0, 255, 0],
'SpringGreen2': [0, 255, 95],
'SpringGreen1': [0, 255, 135],
'MediumSpringGreen': [0, 255, 175],
'Cyan2': [0, 255, 215],
'Cyan1': [0, 255, 255],
'DarkRed': [95, 0, 0],
'DeepPink4': [95, 0, 95],
'Purple4': [95, 0, 135],
'Purple4': [95, 0, 175],
'Purple3': [95, 0, 215],
'BlueViolet': [95, 0, 255],
'Orange4': [95, 95, 0],
'Grey37': [95, 95, 95],
'MediumPurple4': [95, 95, 135],
'SlateBlue3': [95, 95, 175],
'SlateBlue3': [95, 95, 215],
'RoyalBlue1': [95, 95, 255],
'Chartreuse4': [95, 135, 0],
'DarkSeaGreen4': [95, 135, 95],
'PaleTurquoise4': [95, 135, 135],
'SteelBlue': [95, 135, 175],
'SteelBlue3': [95, 135, 215],
'CornflowerBlue': [95, 135, 255],
'Chartreuse3': [95, 175, 0],
'DarkSeaGreen4': [95, 175, 95],
'CadetBlue': [95, 175, 135],
'CadetBlue': [95, 175, 175],
'SkyBlue3': [95, 175, 215],
'SteelBlue1': [95, 175, 255],
'Chartreuse3': [95, 215, 0],
'PaleGreen3': [95, 215, 95],
'SeaGreen3': [95, 215, 135],
'Aquamarine3': [95, 215, 175],
'MediumTurquoise': [95, 215, 215],
'SteelBlue1': [95, 215, 255],
'Chartreuse2': [95, 255, 0],
'SeaGreen2': [95, 255, 95],
'SeaGreen1': [95, 255, 135],
'SeaGreen1': [95, 255, 175],
'Aquamarine1': [95, 255, 215],
'DarkSlateGray2': [95, 255, 255],
'DarkRed': [135, 0, 0],
'DeepPink4': [135, 0, 95],
'DarkMagenta': [135, 0, 135],
'DarkMagenta': [135, 0, 175],
'DarkViolet': [135, 0, 215],
'Purple': [135, 0, 255],
'Orange4': [135, 95, 0],
'LightPink4': [135, 95, 95],
'Plum4': [135, 95, 135],
'MediumPurple3': [135, 95, 175],
'MediumPurple3': [135, 95, 215],
'SlateBlue1': [135, 95, 255],
'Yellow4': [135, 135, 0],
'Wheat4': [135, 135, 95],
'Grey53': [135, 135, 135],
'LightSlateGrey': [135, 135, 175],
'MediumPurple': [135, 135, 215],
'LightSlateBlue': [135, 135, 255],
'Yellow4': [135, 175, 0],
'DarkOliveGreen3': [135, 175, 95],
'DarkSeaGreen': [135, 175, 135],
'LightSkyBlue3': [135, 175, 175],
'LightSkyBlue3': [135, 175, 215],
'SkyBlue2': [135, 175, 255],
'Chartreuse2': [135, 215, 0],
'DarkOliveGreen3': [135, 215, 95],
'PaleGreen3': [135, 215, 135],
'DarkSeaGreen3': [135, 215, 175],
'DarkSlateGray3': [135, 215, 215],
'SkyBlue1': [135, 215, 255],
'Chartreuse1': [135, 255, 0],
'LightGreen': [135, 255, 95],
'LightGreen': [135, 255, 135],
'PaleGreen1': [135, 255, 175],
'Aquamarine1': [135, 255, 215],
'DarkSlateGray1': [135, 255, 255],
'Red3': [175, 0, 0],
'DeepPink4': [175, 0, 95],
'MediumVioletRed': [175, 0, 135],
'Magenta3': [175, 0, 175],
'DarkViolet': [175, 0, 215],
'Purple': [175, 0, 255],
'DarkOrange3': [175, 95, 0],
'IndianRed': [175, 95, 95],
'HotPink3': [175, 95, 135],
'MediumOrchid3': [175, 95, 175],
'MediumOrchid': [175, 95, 215],
'MediumPurple2': [175, 95, 255],
'DarkGoldenrod': [175, 135, 0],
'LightSalmon3': [175, 135, 95],
'RosyBrown': [175, 135, 135],
'Grey63': [175, 135, 175],
'MediumPurple2': [175, 135, 215],
'MediumPurple1': [175, 135, 255],
'Gold3': [175, 175, 0],
'DarkKhaki': [175, 175, 95],
'NavajoWhite3': [175, 175, 135],
'Grey69': [175, 175, 175],
'LightSteelBlue3': [175, 175, 215],
'LightSteelBlue': [175, 175, 255],
'Yellow3': [175, 215, 0],
'DarkOliveGreen3': [175, 215, 95],
'DarkSeaGreen3': [175, 215, 135],
'DarkSeaGreen2': [175, 215, 175],
'LightCyan3': [175, 215, 215],
'LightSkyBlue1': [175, 215, 255],
'GreenYellow': [175, 255, 0],
'DarkOliveGreen2': [175, 255, 95],
'PaleGreen1': [175, 255, 135],
'DarkSeaGreen2': [175, 255, 175],
'DarkSeaGreen1': [175, 255, 215],
'PaleTurquoise1': [175, 255, 255],
'Red3': [215, 0, 0],
'DeepPink3': [215, 0, 95],
'DeepPink3': [215, 0, 135],
'Magenta3': [215, 0, 175],
'Magenta3': [215, 0, 215],
'Magenta2': [215, 0, 255],
'DarkOrange3': [215, 95, 0],
'IndianRed': [215, 95, 95],
'HotPink3': [215, 95, 135],
'HotPink2': [215, 95, 175],
'Orchid': [215, 95, 215],
'MediumOrchid1': [215, 95, 255],
'Orange3': [215, 135, 0],
'LightSalmon3': [215, 135, 95],
'LightPink3': [215, 135, 135],
'Pink3': [215, 135, 175],
'Plum3': [215, 135, 215],
'Violet': [215, 135, 255],
'Gold3': [215, 175, 0],
'LightGoldenrod3': [215, 175, 95],
'Tan': [215, 175, 135],
'MistyRose3': [215, 175, 175],
'Thistle3': [215, 175, 215],
'Plum2': [215, 175, 255],
'Yellow3': [215, 215, 0],
'Khaki3': [215, 215, 95],
'LightGoldenrod2': [215, 215, 135],
'LightYellow3': [215, 215, 175],
'Grey84': [215, 215, 215],
'LightSteelBlue1': [215, 215, 255],
'Yellow2': [215, 255, 0],
'DarkOliveGreen1': [215, 255, 95],
'DarkOliveGreen1': [215, 255, 135],
'DarkSeaGreen1': [215, 255, 175],
'Honeydew2': [215, 255, 215],
'LightCyan1': [215, 255, 255],
'Red1': [255, 0, 0],
'DeepPink2': [255, 0, 95],
'DeepPink1': [255, 0, 135],
'DeepPink1': [255, 0, 175],
'Magenta2': [255, 0, 215],
'Magenta1': [255, 0, 255],
'OrangeRed1': [255, 95, 0],
'IndianRed1': [255, 95, 95],
'IndianRed1': [255, 95, 135],
'HotPink': [255, 95, 175],
'HotPink': [255, 95, 215],
'MediumOrchid1': [255, 95, 255],
'DarkOrange': [255, 135, 0],
'Salmon1': [255, 135, 95],
'LightCoral': [255, 135, 135],
'PaleVioletRed1': [255, 135, 175],
'Orchid2': [255, 135, 215],
'Orchid1': [255, 135, 255],
'Orange1': [255, 175, 0],
'SandyBrown': [255, 175, 95],
'LightSalmon1': [255, 175, 135],
'LightPink1': [255, 175, 175],
'Pink1': [255, 175, 215],
'Plum1': [255, 175, 255],
'Gold1': [255, 215, 0],
'LightGoldenrod2': [255, 215, 95],
'LightGoldenrod2': [255, 215, 135],
'NavajoWhite1': [255, 215, 175],
'MistyRose1': [255, 215, 215],
'Thistle1': [255, 215, 255],
'Yellow1': [255, 255, 0],
'LightGoldenrod1': [255, 255, 95],
'Khaki1': [255, 255, 135],
'Wheat1': [255, 255, 175],
'Cornsilk1': [255, 255, 215],
'Grey100': [255, 255, 255],
'Grey3': [8, 8, 8],
'Grey7': [18, 18, 18],
'Grey11': [28, 28, 28],
'Grey15': [38, 38, 38],
'Grey19': [48, 48, 48],
'Grey23': [58, 58, 58],
'Grey27': [68, 68, 68],
'Grey30': [78, 78, 78],
'Grey35': [88, 88, 88],
'Grey39': [98, 98, 98],
'Grey42': [108, 108, 108],
'Grey46': [118, 118, 118],
'Grey50': [128, 128, 128],
'Grey54': [138, 138, 138],
'Grey58': [148, 148, 148],
'Grey62': [158, 158, 158],
'Grey66': [168, 168, 168],
'Grey70': [178, 178, 178],
'Grey74': [188, 188, 188],
'Grey78': [198, 198, 198],
'Grey82': [208, 208, 208],
'Grey85': [218, 218, 218],
'Grey89': [228, 228, 228],
'Grey93': [238, 238, 238],
}
__extended_funcs = {}
# This is also gross. Sorry.
for k, v in extended_colors.items():
color = ffi.new('rgb_t *', v)
__extended_funcs[k.lower()] = partial(apply_256, v)
__extended_funcs['bg_' + k.lower()] = partial(apply_256_bg, v)
__extended_funcs['background_' + k.lower()] = partial(apply_256_bg, v)
__extended_funcs['hl_' + k.lower()] = partial(apply_256_hl, v)
__extended_funcs['highlight' + k.lower()] = partial(apply_256_hl, v)
| mit | 2,573,298,628,492,698,000 | 37.404459 | 74 | 0.43992 | false |
kuba/letsencrypt | acme/acme/messages.py | 1 | 13660 | """ACME protocol messages."""
import collections
from acme import challenges
from acme import errors
from acme import fields
from acme import jose
from acme import util
class Error(jose.JSONObjectWithFields, errors.Error):
"""ACME error.
https://tools.ietf.org/html/draft-ietf-appsawg-http-problem-00
:ivar unicode typ:
:ivar unicode title:
:ivar unicode detail:
"""
ERROR_TYPE_DESCRIPTIONS = dict(
('urn:acme:error:' + name, description) for name, description in (
('badCSR', 'The CSR is unacceptable (e.g., due to a short key)'),
('badNonce', 'The client sent an unacceptable anti-replay nonce'),
('connection', 'The server could not connect to the client to '
'verify the domain'),
('dnssec', 'The server could not validate a DNSSEC signed domain'),
('invalidEmail',
'The provided email for a registration was invalid'),
('malformed', 'The request message was malformed'),
('rateLimited', 'There were too many requests of a given type'),
('serverInternal', 'The server experienced an internal error'),
('tls', 'The server experienced a TLS error during domain '
'verification'),
('unauthorized', 'The client lacks sufficient authorization'),
('unknownHost', 'The server could not resolve a domain name'),
)
)
typ = jose.Field('type')
title = jose.Field('title', omitempty=True)
detail = jose.Field('detail')
@property
def description(self):
"""Hardcoded error description based on its type.
:returns: Description if standard ACME error or ``None``.
:rtype: unicode
"""
return self.ERROR_TYPE_DESCRIPTIONS.get(self.typ)
def __str__(self):
return ' :: '.join(
part for part in
(self.typ, self.description, self.detail, self.title)
if part is not None)
class _Constant(jose.JSONDeSerializable, collections.Hashable):
"""ACME constant."""
__slots__ = ('name',)
POSSIBLE_NAMES = NotImplemented
def __init__(self, name):
self.POSSIBLE_NAMES[name] = self
self.name = name
def to_partial_json(self):
return self.name
@classmethod
def from_json(cls, value):
if value not in cls.POSSIBLE_NAMES:
raise jose.DeserializationError(
'{0} not recognized'.format(cls.__name__))
return cls.POSSIBLE_NAMES[value]
def __repr__(self):
return '{0}({1})'.format(self.__class__.__name__, self.name)
def __eq__(self, other):
return isinstance(other, type(self)) and other.name == self.name
def __hash__(self):
return hash((self.__class__, self.name))
def __ne__(self, other):
return not self == other
class Status(_Constant):
"""ACME "status" field."""
POSSIBLE_NAMES = {}
STATUS_UNKNOWN = Status('unknown')
STATUS_PENDING = Status('pending')
STATUS_PROCESSING = Status('processing')
STATUS_VALID = Status('valid')
STATUS_INVALID = Status('invalid')
STATUS_REVOKED = Status('revoked')
class IdentifierType(_Constant):
"""ACME identifier type."""
POSSIBLE_NAMES = {}
IDENTIFIER_FQDN = IdentifierType('dns') # IdentifierDNS in Boulder
class Identifier(jose.JSONObjectWithFields):
"""ACME identifier.
:ivar IdentifierType typ:
:ivar unicode value:
"""
typ = jose.Field('type', decoder=IdentifierType.from_json)
value = jose.Field('value')
class Directory(jose.JSONDeSerializable):
"""Directory."""
_REGISTERED_TYPES = {}
@classmethod
def _canon_key(cls, key):
return getattr(key, 'resource_type', key)
@classmethod
def register(cls, resource_body_cls):
"""Register resource."""
assert resource_body_cls.resource_type not in cls._REGISTERED_TYPES
cls._REGISTERED_TYPES[resource_body_cls.resource_type] = resource_body_cls
return resource_body_cls
def __init__(self, jobj):
canon_jobj = util.map_keys(jobj, self._canon_key)
if not set(canon_jobj).issubset(self._REGISTERED_TYPES):
# TODO: acme-spec is not clear about this: 'It is a JSON
# dictionary, whose keys are the "resource" values listed
# in {{https-requests}}'z
raise ValueError('Wrong directory fields')
# TODO: check that everything is an absolute URL; acme-spec is
# not clear on that
self._jobj = canon_jobj
def __getattr__(self, name):
try:
return self[name.replace('_', '-')]
except KeyError as error:
raise AttributeError(str(error))
def __getitem__(self, name):
try:
return self._jobj[self._canon_key(name)]
except KeyError:
raise KeyError('Directory field not found')
def to_partial_json(self):
return self._jobj
@classmethod
def from_json(cls, jobj):
try:
return cls(jobj)
except ValueError as error:
raise jose.DeserializationError(str(error))
class Resource(jose.JSONObjectWithFields):
"""ACME Resource.
:ivar acme.messages.ResourceBody body: Resource body.
"""
body = jose.Field('body')
class ResourceWithURI(Resource):
"""ACME Resource with URI.
:ivar unicode uri: Location of the resource.
"""
uri = jose.Field('uri') # no ChallengeResource.uri
class ResourceBody(jose.JSONObjectWithFields):
"""ACME Resource Body."""
class Registration(ResourceBody):
"""Registration Resource Body.
:ivar acme.jose.jwk.JWK key: Public key.
:ivar tuple contact: Contact information following ACME spec,
`tuple` of `unicode`.
:ivar unicode agreement:
:ivar unicode authorizations: URI where
`messages.Registration.Authorizations` can be found.
:ivar unicode certificates: URI where
`messages.Registration.Certificates` can be found.
"""
# on new-reg key server ignores 'key' and populates it based on
# JWS.signature.combined.jwk
key = jose.Field('key', omitempty=True, decoder=jose.JWK.from_json)
contact = jose.Field('contact', omitempty=True, default=())
agreement = jose.Field('agreement', omitempty=True)
authorizations = jose.Field('authorizations', omitempty=True)
certificates = jose.Field('certificates', omitempty=True)
class Authorizations(jose.JSONObjectWithFields):
"""Authorizations granted to Account in the process of registration.
:ivar tuple authorizations: URIs to Authorization Resources.
"""
authorizations = jose.Field('authorizations')
class Certificates(jose.JSONObjectWithFields):
"""Certificates granted to Account in the process of registration.
:ivar tuple certificates: URIs to Certificate Resources.
"""
certificates = jose.Field('certificates')
phone_prefix = 'tel:'
email_prefix = 'mailto:'
@classmethod
def from_data(cls, phone=None, email=None, **kwargs):
"""Create registration resource from contact details."""
details = list(kwargs.pop('contact', ()))
if phone is not None:
details.append(cls.phone_prefix + phone)
if email is not None:
details.append(cls.email_prefix + email)
kwargs['contact'] = tuple(details)
return cls(**kwargs)
def _filter_contact(self, prefix):
return tuple(
detail[len(prefix):] for detail in self.contact
if detail.startswith(prefix))
@property
def phones(self):
"""All phones found in the ``contact`` field."""
return self._filter_contact(self.phone_prefix)
@property
def emails(self):
"""All emails found in the ``contact`` field."""
return self._filter_contact(self.email_prefix)
@Directory.register
class NewRegistration(Registration):
"""New registration."""
resource_type = 'new-reg'
resource = fields.Resource(resource_type)
class UpdateRegistration(Registration):
"""Update registration."""
resource_type = 'reg'
resource = fields.Resource(resource_type)
class RegistrationResource(ResourceWithURI):
"""Registration Resource.
:ivar acme.messages.Registration body:
:ivar unicode new_authzr_uri: URI found in the 'next' ``Link`` header
:ivar unicode terms_of_service: URL for the CA TOS.
"""
body = jose.Field('body', decoder=Registration.from_json)
new_authzr_uri = jose.Field('new_authzr_uri')
terms_of_service = jose.Field('terms_of_service', omitempty=True)
class ChallengeBody(ResourceBody):
"""Challenge Resource Body.
.. todo::
Confusingly, this has a similar name to `.challenges.Challenge`,
as well as `.achallenges.AnnotatedChallenge`. Please use names
such as ``challb`` to distinguish instances of this class from
``achall``.
:ivar acme.challenges.Challenge: Wrapped challenge.
Conveniently, all challenge fields are proxied, i.e. you can
call ``challb.x`` to get ``challb.chall.x`` contents.
:ivar acme.messages.Status status:
:ivar datetime.datetime validated:
:ivar messages.Error error:
"""
__slots__ = ('chall',)
uri = jose.Field('uri')
status = jose.Field('status', decoder=Status.from_json,
omitempty=True, default=STATUS_PENDING)
validated = fields.RFC3339Field('validated', omitempty=True)
error = jose.Field('error', decoder=Error.from_json,
omitempty=True, default=None)
def to_partial_json(self):
jobj = super(ChallengeBody, self).to_partial_json()
jobj.update(self.chall.to_partial_json())
return jobj
@classmethod
def fields_from_json(cls, jobj):
jobj_fields = super(ChallengeBody, cls).fields_from_json(jobj)
jobj_fields['chall'] = challenges.Challenge.from_json(jobj)
return jobj_fields
def __getattr__(self, name):
return getattr(self.chall, name)
class ChallengeResource(Resource):
"""Challenge Resource.
:ivar acme.messages.ChallengeBody body:
:ivar unicode authzr_uri: URI found in the 'up' ``Link`` header.
"""
body = jose.Field('body', decoder=ChallengeBody.from_json)
authzr_uri = jose.Field('authzr_uri')
@property
def uri(self): # pylint: disable=missing-docstring,no-self-argument
# bug? 'method already defined line None'
# pylint: disable=function-redefined
return self.body.uri # pylint: disable=no-member
class Authorization(ResourceBody):
"""Authorization Resource Body.
:ivar acme.messages.Identifier identifier:
:ivar list challenges: `list` of `.ChallengeBody`
:ivar tuple combinations: Challenge combinations (`tuple` of `tuple`
of `int`, as opposed to `list` of `list` from the spec).
:ivar acme.messages.Status status:
:ivar datetime.datetime expires:
"""
identifier = jose.Field('identifier', decoder=Identifier.from_json)
challenges = jose.Field('challenges', omitempty=True)
combinations = jose.Field('combinations', omitempty=True)
status = jose.Field('status', omitempty=True, decoder=Status.from_json)
# TODO: 'expires' is allowed for Authorization Resources in
# general, but for Key Authorization '[t]he "expires" field MUST
# be absent'... then acme-spec gives example with 'expires'
# present... That's confusing!
expires = fields.RFC3339Field('expires', omitempty=True)
@challenges.decoder
def challenges(value): # pylint: disable=missing-docstring,no-self-argument
return tuple(ChallengeBody.from_json(chall) for chall in value)
@property
def resolved_combinations(self):
"""Combinations with challenges instead of indices."""
return tuple(tuple(self.challenges[idx] for idx in combo)
for combo in self.combinations)
@Directory.register
class NewAuthorization(Authorization):
"""New authorization."""
resource_type = 'new-authz'
resource = fields.Resource(resource_type)
class AuthorizationResource(ResourceWithURI):
"""Authorization Resource.
:ivar acme.messages.Authorization body:
:ivar unicode new_cert_uri: URI found in the 'next' ``Link`` header
"""
body = jose.Field('body', decoder=Authorization.from_json)
new_cert_uri = jose.Field('new_cert_uri')
@Directory.register
class CertificateRequest(jose.JSONObjectWithFields):
"""ACME new-cert request.
:ivar acme.jose.util.ComparableX509 csr:
`OpenSSL.crypto.X509Req` wrapped in `.ComparableX509`
"""
resource_type = 'new-cert'
resource = fields.Resource(resource_type)
csr = jose.Field('csr', decoder=jose.decode_csr, encoder=jose.encode_csr)
class CertificateResource(ResourceWithURI):
"""Certificate Resource.
:ivar acme.jose.util.ComparableX509 body:
`OpenSSL.crypto.X509` wrapped in `.ComparableX509`
:ivar unicode cert_chain_uri: URI found in the 'up' ``Link`` header
:ivar tuple authzrs: `tuple` of `AuthorizationResource`.
"""
cert_chain_uri = jose.Field('cert_chain_uri')
authzrs = jose.Field('authzrs')
@Directory.register
class Revocation(jose.JSONObjectWithFields):
"""Revocation message.
:ivar .ComparableX509 certificate: `OpenSSL.crypto.X509` wrapped in
`.ComparableX509`
"""
resource_type = 'revoke-cert'
resource = fields.Resource(resource_type)
certificate = jose.Field(
'certificate', decoder=jose.decode_cert, encoder=jose.encode_cert)
| apache-2.0 | 6,495,732,572,067,539,000 | 30.693735 | 82 | 0.651171 | false |
tstenner/bleachbit | tests/TestInit.py | 1 | 1924 | # vim: ts=4:sw=4:expandtab
# coding=utf-8
# BleachBit
# Copyright (C) 2008-2020 Andrew Ziem
# https://www.bleachbit.org
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Test cases for __init__
"""
from tests import common
import os
class InitTestCase(common.BleachbitTestCase):
"""Test cases for __init__"""
def test_expanduser(self):
"""Unit test for function expanduser()"""
# already absolute
test_input = '/home/user/foo'
test_output = os.path.expanduser(test_input)
self.assertEqual(test_input, test_output)
# tilde not at beginning
test_input = '/home/user/~'
test_output = os.path.expanduser(test_input)
self.assertEqual(test_input, test_output)
# should be expanded
if os.name == 'nt':
test_inputs = ('~', r'~\ntuser.dat')
if os.name == 'posix':
test_inputs = ('~', '~/.profile')
for test_input in test_inputs:
test_output = os.path.expanduser(test_input)
self.assertNotEqual(test_input, test_output)
self.assertExists(test_output)
if os.name == 'posix':
self.assertTrue(os.path.samefile(
test_output, os.path.expanduser(test_input)))
def suite():
return unittest.makeSuite(InitTestCase)
| gpl-3.0 | -5,291,252,514,280,642,000 | 30.032258 | 71 | 0.648649 | false |
Marzona/rig-remote | test/test_queuecomms.py | 1 | 2132 | #!/usr/bin/env python
# import modules
import pytest
from rig_remote.queue_comms import QueueComms
from rig_remote.constants import QUEUE_MAX_SIZE
from Queue import Queue, Empty, Full
def test_queued_for_parent1():
qc=QueueComms()
qc.parent_queue.put("2")
qc.parent_queue.get()
assert(qc.queued_for_parent() == False)
def test_queued_for_parent2():
qc=QueueComms()
qc.parent_queue.put("2")
assert(qc.queued_for_parent() == True)
def test_get_from_parent1():
qc=QueueComms()
qc.parent_queue.put("2")
assert(qc.get_from_parent() == "2")
def test_get_from_parent2():
qc=QueueComms()
assert(qc.get_from_parent() == None)
def test_get_from_child1():
qc=QueueComms()
qc.child_queue.put("2")
assert(qc.get_from_child() == "2")
def test_get_from_child2():
qc=QueueComms()
assert(qc.get_from_child() == None)
def test_queued_for_child1():
qc=QueueComms()
qc.child_queue.put("2")
qc.child_queue.get()
assert(qc.queued_for_child() == False)
def test_queued_for_child2():
qc=QueueComms()
qc.child_queue.put("2")
assert(qc.queued_for_child() == True)
def test_queue_max_size_parent():
qc=QueueComms()
for i in range(QUEUE_MAX_SIZE):
qc.send_to_parent(i)
with pytest.raises(Full):
qc.send_to_parent("overflow")
def test_queue_max_size_child1():
qc=QueueComms()
for i in range(QUEUE_MAX_SIZE):
qc.send_to_child(i)
with pytest.raises(Full):
qc.send_to_child("overflow")
def test_queue_value_error_child2():
qc=QueueComms()
with pytest.raises(ValueError):
qc.signal_child("overflow")
def test_queue_max_size_child3():
qc=QueueComms()
for i in range(QUEUE_MAX_SIZE):
qc.signal_child(i)
with pytest.raises(Full):
qc.signal_child(1)
def test_queue_value_error_parent2():
qc=QueueComms()
with pytest.raises(ValueError):
qc.signal_parent("overflow")
def test_queue_max_size_parent3():
qc=QueueComms()
for i in range(QUEUE_MAX_SIZE):
qc.signal_parent(i)
with pytest.raises(Full):
qc.signal_parent(1)
| mit | -5,075,182,294,116,822,000 | 24.082353 | 47 | 0.643996 | false |
GoogleCloudPlatform/functions-framework-python | tests/test_functions/http_flask_render_template/main.py | 1 | 1531 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Function used in Worker tests of handling HTTP functions."""
from flask import render_template
def function(request):
"""Test HTTP function whose behavior depends on the given mode.
The function returns a success, a failure, or throws an exception, depending
on the given mode.
Args:
request: The HTTP request which triggered this function. Must contain name
of the requested mode in the 'mode' field in JSON document in request
body.
Returns:
Value and status code defined for the given mode.
Raises:
Exception: Thrown when requested in the incoming mode specification.
"""
if request.args and "message" in request.args:
message = request.args.get("message")
elif request.get_json() and "message" in request.get_json():
message = request.get_json()["message"]
else:
message = "Hello World!"
return render_template("hello.html", name=message)
| apache-2.0 | -5,523,064,124,783,055,000 | 34.604651 | 80 | 0.713259 | false |
dims/cinder | cinder/volume/drivers/vmware/volumeops.py | 1 | 68569 | # Copyright (c) 2013 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Implements operations on volumes residing on VMware datastores.
"""
from oslo_log import log as logging
from oslo_utils import units
from oslo_vmware import exceptions
from oslo_vmware import pbm
from oslo_vmware import vim_util
import six
from six.moves import urllib
from cinder.i18n import _, _LE, _LI
from cinder.volume.drivers.vmware import exceptions as vmdk_exceptions
LOG = logging.getLogger(__name__)
LINKED_CLONE_TYPE = 'linked'
FULL_CLONE_TYPE = 'full'
def split_datastore_path(datastore_path):
"""Split the datastore path to components.
return the datastore name, relative folder path and the file name
E.g. datastore_path = [datastore1] my_volume/my_volume.vmdk, returns
(datastore1, my_volume/, my_volume.vmdk)
:param datastore_path: Datastore path of a file
:return: Parsed datastore name, relative folder path and file name
"""
splits = datastore_path.split('[', 1)[1].split(']', 1)
datastore_name = None
folder_path = None
file_name = None
if len(splits) == 1:
datastore_name = splits[0]
else:
datastore_name, path = splits
# Path will be of form my_volume/my_volume.vmdk
# we need into my_volumes/ and my_volume.vmdk
splits = path.split('/')
file_name = splits[len(splits) - 1]
folder_path = path[:-len(file_name)]
return (datastore_name.strip(), folder_path.strip(), file_name.strip())
class VirtualDiskPath(object):
"""Class representing paths of files comprising a virtual disk."""
def __init__(self, ds_name, folder_path, disk_name):
"""Creates path object for the given disk.
:param ds_name: name of the datastore where disk is stored
:param folder_path: absolute path of the folder containing the disk
:param disk_name: name of the virtual disk
"""
self._descriptor_file_path = "%s%s.vmdk" % (folder_path, disk_name)
self._descriptor_ds_file_path = self.get_datastore_file_path(
ds_name, self._descriptor_file_path)
def get_datastore_file_path(self, ds_name, file_path):
"""Get datastore path corresponding to the given file path.
:param ds_name: name of the datastore containing the file represented
by the given file path
:param file_path: absolute path of the file
:return: datastore file path
"""
return "[%s] %s" % (ds_name, file_path)
def get_descriptor_file_path(self):
"""Get absolute file path of the virtual disk descriptor."""
return self._descriptor_file_path
def get_descriptor_ds_file_path(self):
"""Get datastore file path of the virtual disk descriptor."""
return self._descriptor_ds_file_path
class FlatExtentVirtualDiskPath(VirtualDiskPath):
"""Paths of files in a non-monolithic disk with a single flat extent."""
def __init__(self, ds_name, folder_path, disk_name):
"""Creates path object for the given disk.
:param ds_name: name of the datastore where disk is stored
:param folder_path: absolute path of the folder containing the disk
:param disk_name: name of the virtual disk
"""
super(FlatExtentVirtualDiskPath, self).__init__(
ds_name, folder_path, disk_name)
self._flat_extent_file_path = "%s%s-flat.vmdk" % (folder_path,
disk_name)
self._flat_extent_ds_file_path = self.get_datastore_file_path(
ds_name, self._flat_extent_file_path)
def get_flat_extent_file_path(self):
"""Get absolute file path of the flat extent."""
return self._flat_extent_file_path
def get_flat_extent_ds_file_path(self):
"""Get datastore file path of the flat extent."""
return self._flat_extent_ds_file_path
class MonolithicSparseVirtualDiskPath(VirtualDiskPath):
"""Paths of file comprising a monolithic sparse disk."""
pass
class VirtualDiskType(object):
"""Supported virtual disk types."""
EAGER_ZEROED_THICK = "eagerZeroedThick"
PREALLOCATED = "preallocated"
THIN = "thin"
# thick in extra_spec means lazy-zeroed thick disk
EXTRA_SPEC_DISK_TYPE_DICT = {'eagerZeroedThick': EAGER_ZEROED_THICK,
'thick': PREALLOCATED,
'thin': THIN
}
@staticmethod
def is_valid(extra_spec_disk_type):
"""Check if the given disk type in extra_spec is valid.
:param extra_spec_disk_type: disk type in extra_spec
:return: True if valid
"""
return (extra_spec_disk_type in
VirtualDiskType.EXTRA_SPEC_DISK_TYPE_DICT)
@staticmethod
def validate(extra_spec_disk_type):
"""Validate the given disk type in extra_spec.
This method throws an instance of InvalidDiskTypeException if the given
disk type is invalid.
:param extra_spec_disk_type: disk type in extra_spec
:raises: InvalidDiskTypeException
"""
if not VirtualDiskType.is_valid(extra_spec_disk_type):
raise vmdk_exceptions.InvalidDiskTypeException(
disk_type=extra_spec_disk_type)
@staticmethod
def get_virtual_disk_type(extra_spec_disk_type):
"""Return disk type corresponding to the extra_spec disk type.
:param extra_spec_disk_type: disk type in extra_spec
:return: virtual disk type
:raises: InvalidDiskTypeException
"""
VirtualDiskType.validate(extra_spec_disk_type)
return (VirtualDiskType.EXTRA_SPEC_DISK_TYPE_DICT[
extra_spec_disk_type])
class VirtualDiskAdapterType(object):
"""Supported virtual disk adapter types."""
LSI_LOGIC = "lsiLogic"
BUS_LOGIC = "busLogic"
LSI_LOGIC_SAS = "lsiLogicsas"
IDE = "ide"
@staticmethod
def is_valid(adapter_type):
"""Check if the given adapter type is valid.
:param adapter_type: adapter type to check
:return: True if valid
"""
return adapter_type in [VirtualDiskAdapterType.LSI_LOGIC,
VirtualDiskAdapterType.BUS_LOGIC,
VirtualDiskAdapterType.LSI_LOGIC_SAS,
VirtualDiskAdapterType.IDE]
@staticmethod
def validate(extra_spec_adapter_type):
"""Validate the given adapter type in extra_spec.
This method throws an instance of InvalidAdapterTypeException if the
given adapter type is invalid.
:param extra_spec_adapter_type: adapter type in extra_spec
:raises: InvalidAdapterTypeException
"""
if not VirtualDiskAdapterType.is_valid(extra_spec_adapter_type):
raise vmdk_exceptions.InvalidAdapterTypeException(
invalid_type=extra_spec_adapter_type)
@staticmethod
def get_adapter_type(extra_spec_adapter_type):
"""Get the adapter type to be used in VirtualDiskSpec.
:param extra_spec_adapter_type: adapter type in the extra_spec
:return: adapter type to be used in VirtualDiskSpec
"""
VirtualDiskAdapterType.validate(extra_spec_adapter_type)
# We set the adapter type as lsiLogic for lsiLogicsas since it is not
# supported by VirtualDiskManager APIs. This won't be a problem because
# we attach the virtual disk to the correct controller type and the
# disk adapter type is always resolved using its controller key.
if extra_spec_adapter_type == VirtualDiskAdapterType.LSI_LOGIC_SAS:
return VirtualDiskAdapterType.LSI_LOGIC
return extra_spec_adapter_type
class ControllerType(object):
"""Encapsulate various controller types."""
LSI_LOGIC = 'VirtualLsiLogicController'
BUS_LOGIC = 'VirtualBusLogicController'
LSI_LOGIC_SAS = 'VirtualLsiLogicSASController'
IDE = 'VirtualIDEController'
CONTROLLER_TYPE_DICT = {
VirtualDiskAdapterType.LSI_LOGIC: LSI_LOGIC,
VirtualDiskAdapterType.BUS_LOGIC: BUS_LOGIC,
VirtualDiskAdapterType.LSI_LOGIC_SAS: LSI_LOGIC_SAS,
VirtualDiskAdapterType.IDE: IDE}
@staticmethod
def get_controller_type(adapter_type):
"""Get the disk controller type based on the given adapter type.
:param adapter_type: disk adapter type
:return: controller type corresponding to the given adapter type
:raises: InvalidAdapterTypeException
"""
if adapter_type in ControllerType.CONTROLLER_TYPE_DICT:
return ControllerType.CONTROLLER_TYPE_DICT[adapter_type]
raise vmdk_exceptions.InvalidAdapterTypeException(
invalid_type=adapter_type)
@staticmethod
def is_scsi_controller(controller_type):
"""Check if the given controller is a SCSI controller.
:param controller_type: controller type
:return: True if the controller is a SCSI controller
"""
return controller_type in [ControllerType.LSI_LOGIC,
ControllerType.BUS_LOGIC,
ControllerType.LSI_LOGIC_SAS]
class VMwareVolumeOps(object):
"""Manages volume operations."""
def __init__(self, session, max_objects):
self._session = session
self._max_objects = max_objects
self._folder_cache = {}
def get_backing(self, name):
"""Get the backing based on name.
:param name: Name of the backing
:return: Managed object reference to the backing
"""
retrieve_result = self._session.invoke_api(vim_util, 'get_objects',
self._session.vim,
'VirtualMachine',
self._max_objects)
while retrieve_result:
vms = retrieve_result.objects
for vm in vms:
if vm.propSet[0].val == name:
# We got the result, so cancel further retrieval.
self.cancel_retrieval(retrieve_result)
return vm.obj
# Result not obtained, continue retrieving results.
retrieve_result = self.continue_retrieval(retrieve_result)
LOG.debug("Did not find any backing with name: %s", name)
def delete_backing(self, backing):
"""Delete the backing.
:param backing: Managed object reference to the backing
"""
LOG.debug("Deleting the VM backing: %s.", backing)
task = self._session.invoke_api(self._session.vim, 'Destroy_Task',
backing)
LOG.debug("Initiated deletion of VM backing: %s.", backing)
self._session.wait_for_task(task)
LOG.info(_LI("Deleted the VM backing: %s."), backing)
# TODO(kartikaditya) Keep the methods not specific to volume in
# a different file
def get_host(self, instance):
"""Get host under which instance is present.
:param instance: Managed object reference of the instance VM
:return: Host managing the instance VM
"""
return self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, instance,
'runtime.host')
def is_host_usable(self, host):
"""Check if the given ESX host is usable.
A host is usable if it is connected to vCenter server and not in
maintenance mode.
:param host: Managed object reference to the ESX host
:return: True if host is usable, False otherwise
"""
runtime_info = self._session.invoke_api(vim_util,
'get_object_property',
self._session.vim,
host,
'runtime')
return (runtime_info.connectionState == 'connected' and
not runtime_info.inMaintenanceMode)
def get_hosts(self):
"""Get all host from the inventory.
:return: All the hosts from the inventory
"""
return self._session.invoke_api(vim_util, 'get_objects',
self._session.vim,
'HostSystem', self._max_objects)
def continue_retrieval(self, retrieve_result):
"""Continue retrieval of results if necessary.
:param retrieve_result: Result from RetrievePropertiesEx
"""
return self._session.invoke_api(vim_util, 'continue_retrieval',
self._session.vim, retrieve_result)
def cancel_retrieval(self, retrieve_result):
"""Cancel retrieval of results if necessary.
:param retrieve_result: Result from RetrievePropertiesEx
"""
self._session.invoke_api(vim_util, 'cancel_retrieval',
self._session.vim, retrieve_result)
def _is_usable(self, mount_info):
"""Check if a datastore is usable as per the given mount info.
The datastore is considered to be usable for a host only if it is
writable, mounted and accessible.
:param mount_info: Host mount information
:return: True if datastore is usable
"""
writable = mount_info.accessMode == 'readWrite'
# If mounted attribute is not set, then default is True
mounted = getattr(mount_info, 'mounted', True)
# If accessible attribute is not set, then default is False
accessible = getattr(mount_info, 'accessible', False)
return writable and mounted and accessible
def get_connected_hosts(self, datastore):
"""Get all the hosts to which the datastore is connected and usable.
The datastore is considered to be usable for a host only if it is
writable, mounted and accessible.
:param datastore: Reference to the datastore entity
:return: List of managed object references of all connected
hosts
"""
summary = self.get_summary(datastore)
if not summary.accessible:
return []
host_mounts = self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, datastore,
'host')
if not hasattr(host_mounts, 'DatastoreHostMount'):
return []
connected_hosts = []
for host_mount in host_mounts.DatastoreHostMount:
if self._is_usable(host_mount.mountInfo):
connected_hosts.append(host_mount.key.value)
return connected_hosts
def is_datastore_accessible(self, datastore, host):
"""Check if the datastore is accessible to the given host.
:param datastore: datastore reference
:return: True if the datastore is accessible
"""
hosts = self.get_connected_hosts(datastore)
return host.value in hosts
def _in_maintenance(self, summary):
"""Check if a datastore is entering maintenance or in maintenance.
:param summary: Summary information about the datastore
:return: True if the datastore is entering maintenance or in
maintenance
"""
if hasattr(summary, 'maintenanceMode'):
return summary.maintenanceMode in ['enteringMaintenance',
'inMaintenance']
return False
def _is_valid(self, datastore, host):
"""Check if the datastore is valid for the given host.
A datastore is considered valid for a host only if the datastore is
writable, mounted and accessible. Also, the datastore should not be
in maintenance mode.
:param datastore: Reference to the datastore entity
:param host: Reference to the host entity
:return: True if datastore can be used for volume creation
"""
summary = self.get_summary(datastore)
in_maintenance = self._in_maintenance(summary)
if not summary.accessible or in_maintenance:
return False
host_mounts = self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, datastore,
'host')
for host_mount in host_mounts.DatastoreHostMount:
if host_mount.key.value == host.value:
return self._is_usable(host_mount.mountInfo)
return False
def get_dss_rp(self, host):
"""Get accessible datastores and resource pool of the host.
:param host: Managed object reference of the host
:return: Datastores accessible to the host and resource pool to which
the host belongs to
"""
props = self._session.invoke_api(vim_util, 'get_object_properties',
self._session.vim, host,
['datastore', 'parent'])
# Get datastores and compute resource or cluster compute resource
datastores = []
compute_resource = None
for elem in props:
for prop in elem.propSet:
if prop.name == 'datastore' and prop.val:
# Consider only if datastores are present under host
datastores = prop.val.ManagedObjectReference
elif prop.name == 'parent':
compute_resource = prop.val
LOG.debug("Datastores attached to host %(host)s are: %(ds)s.",
{'host': host, 'ds': datastores})
# Filter datastores based on if it is accessible, mounted and writable
valid_dss = []
for datastore in datastores:
if self._is_valid(datastore, host):
valid_dss.append(datastore)
# Get resource pool from compute resource or cluster compute resource
resource_pool = self._session.invoke_api(vim_util,
'get_object_property',
self._session.vim,
compute_resource,
'resourcePool')
if not valid_dss:
msg = _("There are no valid datastores attached to %s.") % host
LOG.error(msg)
raise exceptions.VimException(msg)
else:
LOG.debug("Valid datastores are: %s", valid_dss)
return (valid_dss, resource_pool)
def _get_parent(self, child, parent_type):
"""Get immediate parent of given type via 'parent' property.
:param child: Child entity reference
:param parent_type: Entity type of the parent
:return: Immediate parent of specific type up the hierarchy via
'parent' property
"""
if not child:
return None
if child._type == parent_type:
return child
parent = self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, child, 'parent')
return self._get_parent(parent, parent_type)
def get_dc(self, child):
"""Get parent datacenter up the hierarchy via 'parent' property.
:param child: Reference of the child entity
:return: Parent Datacenter of the param child entity
"""
return self._get_parent(child, 'Datacenter')
def get_vmfolder(self, datacenter):
"""Get the vmFolder.
:param datacenter: Reference to the datacenter entity
:return: vmFolder property of the datacenter
"""
return self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, datacenter,
'vmFolder')
def _get_child_folder(self, parent_folder, child_folder_name):
# Get list of child entities for the parent folder
prop_val = self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, parent_folder,
'childEntity')
if prop_val and hasattr(prop_val, 'ManagedObjectReference'):
child_entities = prop_val.ManagedObjectReference
# Return if the child folder with input name is already present
for child_entity in child_entities:
if child_entity._type != 'Folder':
continue
child_entity_name = self.get_entity_name(child_entity)
if (child_entity_name
and (urllib.parse.unquote(child_entity_name)
== child_folder_name)):
LOG.debug("Child folder: %s exists.", child_folder_name)
return child_entity
def create_folder(self, parent_folder, child_folder_name):
"""Creates child folder with given name under the given parent folder.
The method first checks if a child folder already exists, if it does,
then it returns a moref for the folder, else it creates one and then
return the moref.
:param parent_folder: Reference to the folder entity
:param child_folder_name: Name of the child folder
:return: Reference to the child folder with input name if it already
exists, else create one and return the reference
"""
LOG.debug("Creating folder: %(child_folder_name)s under parent "
"folder: %(parent_folder)s.",
{'child_folder_name': child_folder_name,
'parent_folder': parent_folder})
child_folder = self._get_child_folder(parent_folder, child_folder_name)
if not child_folder:
# Need to create the child folder.
try:
child_folder = self._session.invoke_api(self._session.vim,
'CreateFolder',
parent_folder,
name=child_folder_name)
LOG.debug("Created child folder: %s.", child_folder)
except exceptions.DuplicateName:
# Another thread is trying to create the same folder, ignore
# the exception.
child_folder = self._get_child_folder(parent_folder,
child_folder_name)
return child_folder
def create_vm_inventory_folder(self, datacenter, path_comp):
"""Create and return a VM inventory folder.
This method caches references to inventory folders returned.
:param datacenter: Reference to datacenter
:param path_comp: Path components as a list
"""
LOG.debug("Creating inventory folder: %(path_comp)s under VM folder "
"of datacenter: %(datacenter)s.",
{'path_comp': path_comp,
'datacenter': datacenter})
path = "/" + datacenter.value
parent = self._folder_cache.get(path)
if not parent:
parent = self.get_vmfolder(datacenter)
self._folder_cache[path] = parent
folder = None
for folder_name in path_comp:
path = "/".join([path, folder_name])
folder = self._folder_cache.get(path)
if not folder:
folder = self.create_folder(parent, folder_name)
self._folder_cache[path] = folder
parent = folder
LOG.debug("Inventory folder for path: %(path)s is %(folder)s.",
{'path': path,
'folder': folder})
return folder
def extend_virtual_disk(self, requested_size_in_gb, path, dc_ref,
eager_zero=False):
"""Extend the virtual disk to the requested size.
:param requested_size_in_gb: Size of the volume in GB
:param path: Datastore path of the virtual disk to extend
:param dc_ref: Reference to datacenter
:param eager_zero: Boolean determining if the free space
is zeroed out
"""
LOG.debug("Extending virtual disk: %(path)s to %(size)s GB.",
{'path': path, 'size': requested_size_in_gb})
diskMgr = self._session.vim.service_content.virtualDiskManager
# VMWare API needs the capacity unit to be in KB, so convert the
# capacity unit from GB to KB.
size_in_kb = requested_size_in_gb * units.Mi
task = self._session.invoke_api(self._session.vim,
"ExtendVirtualDisk_Task",
diskMgr,
name=path,
datacenter=dc_ref,
newCapacityKb=size_in_kb,
eagerZero=eager_zero)
self._session.wait_for_task(task)
LOG.info(_LI("Successfully extended virtual disk: %(path)s to "
"%(size)s GB."),
{'path': path, 'size': requested_size_in_gb})
def _create_controller_config_spec(self, adapter_type):
"""Returns config spec for adding a disk controller."""
cf = self._session.vim.client.factory
controller_type = ControllerType.get_controller_type(adapter_type)
controller_device = cf.create('ns0:%s' % controller_type)
controller_device.key = -100
controller_device.busNumber = 0
if ControllerType.is_scsi_controller(controller_type):
controller_device.sharedBus = 'noSharing'
controller_spec = cf.create('ns0:VirtualDeviceConfigSpec')
controller_spec.operation = 'add'
controller_spec.device = controller_device
return controller_spec
def _create_disk_backing(self, disk_type, vmdk_ds_file_path):
"""Creates file backing for virtual disk."""
cf = self._session.vim.client.factory
disk_device_bkng = cf.create('ns0:VirtualDiskFlatVer2BackingInfo')
if disk_type == VirtualDiskType.EAGER_ZEROED_THICK:
disk_device_bkng.eagerlyScrub = True
elif disk_type == VirtualDiskType.THIN:
disk_device_bkng.thinProvisioned = True
disk_device_bkng.fileName = vmdk_ds_file_path or ''
disk_device_bkng.diskMode = 'persistent'
return disk_device_bkng
def _create_virtual_disk_config_spec(self, size_kb, disk_type,
controller_key, vmdk_ds_file_path):
"""Returns config spec for adding a virtual disk."""
cf = self._session.vim.client.factory
disk_device = cf.create('ns0:VirtualDisk')
# disk size should be at least 1024KB
disk_device.capacityInKB = max(units.Ki, int(size_kb))
if controller_key < 0:
disk_device.key = controller_key - 1
else:
disk_device.key = -101
disk_device.unitNumber = 0
disk_device.controllerKey = controller_key
disk_device.backing = self._create_disk_backing(disk_type,
vmdk_ds_file_path)
disk_spec = cf.create('ns0:VirtualDeviceConfigSpec')
disk_spec.operation = 'add'
if vmdk_ds_file_path is None:
disk_spec.fileOperation = 'create'
disk_spec.device = disk_device
return disk_spec
def _create_specs_for_disk_add(self, size_kb, disk_type, adapter_type,
vmdk_ds_file_path=None):
"""Create controller and disk config specs for adding a new disk.
:param size_kb: disk size in KB
:param disk_type: disk provisioning type
:param adapter_type: disk adapter type
:param vmdk_ds_file_path: Optional datastore file path of an existing
virtual disk. If specified, file backing is
not created for the virtual disk.
:return: list containing controller and disk config specs
"""
controller_spec = None
if adapter_type == 'ide':
# For IDE disks, use one of the default IDE controllers (with keys
# 200 and 201) created as part of backing VM creation.
controller_key = 200
else:
controller_spec = self._create_controller_config_spec(adapter_type)
controller_key = controller_spec.device.key
disk_spec = self._create_virtual_disk_config_spec(size_kb,
disk_type,
controller_key,
vmdk_ds_file_path)
specs = [disk_spec]
if controller_spec is not None:
specs.append(controller_spec)
return specs
def _get_extra_config_option_values(self, extra_config):
cf = self._session.vim.client.factory
option_values = []
for key, value in six.iteritems(extra_config):
opt = cf.create('ns0:OptionValue')
opt.key = key
opt.value = value
option_values.append(opt)
return option_values
def _get_create_spec_disk_less(self, name, ds_name, profileId=None,
extra_config=None):
"""Return spec for creating disk-less backing.
:param name: Name of the backing
:param ds_name: Datastore name where the disk is to be provisioned
:param profileId: Storage profile ID for the backing
:param extra_config: Key-value pairs to be written to backing's
extra-config
:return: Spec for creation
"""
cf = self._session.vim.client.factory
vm_file_info = cf.create('ns0:VirtualMachineFileInfo')
vm_file_info.vmPathName = '[%s]' % ds_name
create_spec = cf.create('ns0:VirtualMachineConfigSpec')
create_spec.name = name
create_spec.guestId = 'otherGuest'
create_spec.numCPUs = 1
create_spec.memoryMB = 128
create_spec.files = vm_file_info
# Set the hardware version to a compatible version supported by
# vSphere 5.0. This will ensure that the backing VM can be migrated
# without any incompatibility issues in a mixed cluster of ESX hosts
# with versions 5.0 or above.
create_spec.version = "vmx-08"
if profileId:
vmProfile = cf.create('ns0:VirtualMachineDefinedProfileSpec')
vmProfile.profileId = profileId
create_spec.vmProfile = [vmProfile]
if extra_config:
create_spec.extraConfig = self._get_extra_config_option_values(
extra_config)
return create_spec
def get_create_spec(self, name, size_kb, disk_type, ds_name,
profileId=None, adapter_type='lsiLogic',
extra_config=None):
"""Return spec for creating backing with a single disk.
:param name: name of the backing
:param size_kb: disk size in KB
:param disk_type: disk provisioning type
:param ds_name: datastore name where the disk is to be provisioned
:param profileId: storage profile ID for the backing
:param adapter_type: disk adapter type
:param extra_config: key-value pairs to be written to backing's
extra-config
:return: spec for creation
"""
create_spec = self._get_create_spec_disk_less(
name, ds_name, profileId=profileId, extra_config=extra_config)
create_spec.deviceChange = self._create_specs_for_disk_add(
size_kb, disk_type, adapter_type)
return create_spec
def _create_backing_int(self, folder, resource_pool, host, create_spec):
"""Helper for create backing methods."""
LOG.debug("Creating volume backing with spec: %s.", create_spec)
task = self._session.invoke_api(self._session.vim, 'CreateVM_Task',
folder, config=create_spec,
pool=resource_pool, host=host)
task_info = self._session.wait_for_task(task)
backing = task_info.result
LOG.info(_LI("Successfully created volume backing: %s."), backing)
return backing
def create_backing(self, name, size_kb, disk_type, folder, resource_pool,
host, ds_name, profileId=None, adapter_type='lsiLogic',
extra_config=None):
"""Create backing for the volume.
Creates a VM with one VMDK based on the given inputs.
:param name: Name of the backing
:param size_kb: Size in KB of the backing
:param disk_type: VMDK type for the disk
:param folder: Folder, where to create the backing under
:param resource_pool: Resource pool reference
:param host: Host reference
:param ds_name: Datastore name where the disk is to be provisioned
:param profileId: Storage profile ID to be associated with backing
:param adapter_type: Disk adapter type
:param extra_config: Key-value pairs to be written to backing's
extra-config
:return: Reference to the created backing entity
"""
LOG.debug("Creating volume backing with name: %(name)s "
"disk_type: %(disk_type)s size_kb: %(size_kb)s "
"adapter_type: %(adapter_type)s profileId: %(profile)s at "
"folder: %(folder)s resource_pool: %(resource_pool)s "
"host: %(host)s datastore_name: %(ds_name)s.",
{'name': name, 'disk_type': disk_type, 'size_kb': size_kb,
'folder': folder, 'resource_pool': resource_pool,
'ds_name': ds_name, 'profile': profileId, 'host': host,
'adapter_type': adapter_type})
create_spec = self.get_create_spec(
name, size_kb, disk_type, ds_name, profileId=profileId,
adapter_type=adapter_type, extra_config=extra_config)
return self._create_backing_int(folder, resource_pool, host,
create_spec)
def create_backing_disk_less(self, name, folder, resource_pool,
host, ds_name, profileId=None,
extra_config=None):
"""Create disk-less volume backing.
This type of backing is useful for creating volume from image. The
downloaded image from the image service can be copied to a virtual
disk of desired provisioning type and added to the backing VM.
:param name: Name of the backing
:param folder: Folder where the backing is created
:param resource_pool: Resource pool reference
:param host: Host reference
:param ds_name: Name of the datastore used for VM storage
:param profileId: Storage profile ID to be associated with backing
:param extra_config: Key-value pairs to be written to backing's
extra-config
:return: Reference to the created backing entity
"""
LOG.debug("Creating disk-less volume backing with name: %(name)s "
"profileId: %(profile)s at folder: %(folder)s "
"resource pool: %(resource_pool)s host: %(host)s "
"datastore_name: %(ds_name)s.",
{'name': name, 'profile': profileId, 'folder': folder,
'resource_pool': resource_pool, 'host': host,
'ds_name': ds_name})
create_spec = self._get_create_spec_disk_less(
name, ds_name, profileId=profileId, extra_config=extra_config)
return self._create_backing_int(folder, resource_pool, host,
create_spec)
def get_datastore(self, backing):
"""Get datastore where the backing resides.
:param backing: Reference to the backing
:return: Datastore reference to which the backing belongs
"""
return self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, backing,
'datastore').ManagedObjectReference[0]
def get_summary(self, datastore):
"""Get datastore summary.
:param datastore: Reference to the datastore
:return: 'summary' property of the datastore
"""
return self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, datastore,
'summary')
def _create_relocate_spec_disk_locator(self, datastore, disk_type,
disk_device):
"""Creates spec for disk type conversion during relocate."""
cf = self._session.vim.client.factory
disk_locator = cf.create("ns0:VirtualMachineRelocateSpecDiskLocator")
disk_locator.datastore = datastore
disk_locator.diskId = disk_device.key
disk_locator.diskBackingInfo = self._create_disk_backing(disk_type,
None)
return disk_locator
def _get_relocate_spec(self, datastore, resource_pool, host,
disk_move_type, disk_type=None, disk_device=None):
"""Return spec for relocating volume backing.
:param datastore: Reference to the datastore
:param resource_pool: Reference to the resource pool
:param host: Reference to the host
:param disk_move_type: Disk move type option
:param disk_type: Destination disk type
:param disk_device: Virtual device corresponding to the disk
:return: Spec for relocation
"""
cf = self._session.vim.client.factory
relocate_spec = cf.create('ns0:VirtualMachineRelocateSpec')
relocate_spec.datastore = datastore
relocate_spec.pool = resource_pool
relocate_spec.host = host
relocate_spec.diskMoveType = disk_move_type
if disk_type is not None and disk_device is not None:
disk_locator = self._create_relocate_spec_disk_locator(datastore,
disk_type,
disk_device)
relocate_spec.disk = [disk_locator]
LOG.debug("Spec for relocating the backing: %s.", relocate_spec)
return relocate_spec
def relocate_backing(
self, backing, datastore, resource_pool, host, disk_type=None):
"""Relocates backing to the input datastore and resource pool.
The implementation uses moveAllDiskBackingsAndAllowSharing disk move
type.
:param backing: Reference to the backing
:param datastore: Reference to the datastore
:param resource_pool: Reference to the resource pool
:param host: Reference to the host
:param disk_type: destination disk type
"""
LOG.debug("Relocating backing: %(backing)s to datastore: %(ds)s "
"and resource pool: %(rp)s with destination disk type: "
"%(disk_type)s.",
{'backing': backing,
'ds': datastore,
'rp': resource_pool,
'disk_type': disk_type})
# Relocate the volume backing
disk_move_type = 'moveAllDiskBackingsAndAllowSharing'
disk_device = None
if disk_type is not None:
disk_device = self._get_disk_device(backing)
relocate_spec = self._get_relocate_spec(datastore, resource_pool, host,
disk_move_type, disk_type,
disk_device)
task = self._session.invoke_api(self._session.vim, 'RelocateVM_Task',
backing, spec=relocate_spec)
LOG.debug("Initiated relocation of volume backing: %s.", backing)
self._session.wait_for_task(task)
LOG.info(_LI("Successfully relocated volume backing: %(backing)s "
"to datastore: %(ds)s and resource pool: %(rp)s."),
{'backing': backing, 'ds': datastore, 'rp': resource_pool})
def move_backing_to_folder(self, backing, folder):
"""Move the volume backing to the folder.
:param backing: Reference to the backing
:param folder: Reference to the folder
"""
LOG.debug("Moving backing: %(backing)s to folder: %(fol)s.",
{'backing': backing, 'fol': folder})
task = self._session.invoke_api(self._session.vim,
'MoveIntoFolder_Task', folder,
list=[backing])
LOG.debug("Initiated move of volume backing: %(backing)s into the "
"folder: %(fol)s.", {'backing': backing, 'fol': folder})
self._session.wait_for_task(task)
LOG.info(_LI("Successfully moved volume "
"backing: %(backing)s into the "
"folder: %(fol)s."), {'backing': backing, 'fol': folder})
def create_snapshot(self, backing, name, description, quiesce=False):
"""Create snapshot of the backing with given name and description.
:param backing: Reference to the backing entity
:param name: Snapshot name
:param description: Snapshot description
:param quiesce: Whether to quiesce the backing when taking snapshot
:return: Created snapshot entity reference
"""
LOG.debug("Snapshoting backing: %(backing)s with name: %(name)s.",
{'backing': backing, 'name': name})
task = self._session.invoke_api(self._session.vim,
'CreateSnapshot_Task',
backing, name=name,
description=description,
memory=False, quiesce=quiesce)
LOG.debug("Initiated snapshot of volume backing: %(backing)s "
"named: %(name)s.", {'backing': backing, 'name': name})
task_info = self._session.wait_for_task(task)
snapshot = task_info.result
LOG.info(_LI("Successfully created snapshot: %(snap)s for volume "
"backing: %(backing)s."),
{'snap': snapshot, 'backing': backing})
return snapshot
@staticmethod
def _get_snapshot_from_tree(name, root):
"""Get snapshot by name from the snapshot tree root.
:param name: Snapshot name
:param root: Current root node in the snapshot tree
:return: None in the snapshot tree with given snapshot name
"""
if not root:
return None
if root.name == name:
return root.snapshot
if (not hasattr(root, 'childSnapshotList') or
not root.childSnapshotList):
# When root does not have children, the childSnapshotList attr
# is missing sometime. Adding an additional check.
return None
for node in root.childSnapshotList:
snapshot = VMwareVolumeOps._get_snapshot_from_tree(name, node)
if snapshot:
return snapshot
def get_snapshot(self, backing, name):
"""Get snapshot of the backing with given name.
:param backing: Reference to the backing entity
:param name: Snapshot name
:return: Snapshot entity of the backing with given name
"""
snapshot = self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, backing,
'snapshot')
if not snapshot or not snapshot.rootSnapshotList:
return None
for root in snapshot.rootSnapshotList:
return VMwareVolumeOps._get_snapshot_from_tree(name, root)
def snapshot_exists(self, backing):
"""Check if the given backing contains snapshots."""
snapshot = self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, backing,
'snapshot')
if snapshot is None or snapshot.rootSnapshotList is None:
return False
return len(snapshot.rootSnapshotList) != 0
def delete_snapshot(self, backing, name):
"""Delete a given snapshot from volume backing.
:param backing: Reference to the backing entity
:param name: Snapshot name
"""
LOG.debug("Deleting the snapshot: %(name)s from backing: "
"%(backing)s.",
{'name': name, 'backing': backing})
snapshot = self.get_snapshot(backing, name)
if not snapshot:
LOG.info(_LI("Did not find the snapshot: %(name)s for backing: "
"%(backing)s. Need not delete anything."),
{'name': name, 'backing': backing})
return
task = self._session.invoke_api(self._session.vim,
'RemoveSnapshot_Task',
snapshot, removeChildren=False)
LOG.debug("Initiated snapshot: %(name)s deletion for backing: "
"%(backing)s.",
{'name': name, 'backing': backing})
self._session.wait_for_task(task)
LOG.info(_LI("Successfully deleted snapshot: %(name)s of backing: "
"%(backing)s."), {'backing': backing, 'name': name})
def _get_folder(self, backing):
"""Get parent folder of the backing.
:param backing: Reference to the backing entity
:return: Reference to parent folder of the backing entity
"""
return self._get_parent(backing, 'Folder')
def _get_clone_spec(self, datastore, disk_move_type, snapshot, backing,
disk_type, host=None, resource_pool=None,
extra_config=None):
"""Get the clone spec.
:param datastore: Reference to datastore
:param disk_move_type: Disk move type
:param snapshot: Reference to snapshot
:param backing: Source backing VM
:param disk_type: Disk type of clone
:param host: Target host
:param resource_pool: Target resource pool
:param extra_config: Key-value pairs to be written to backing's
extra-config
:return: Clone spec
"""
if disk_type is not None:
disk_device = self._get_disk_device(backing)
else:
disk_device = None
relocate_spec = self._get_relocate_spec(datastore, resource_pool, host,
disk_move_type, disk_type,
disk_device)
cf = self._session.vim.client.factory
clone_spec = cf.create('ns0:VirtualMachineCloneSpec')
clone_spec.location = relocate_spec
clone_spec.powerOn = False
clone_spec.template = False
clone_spec.snapshot = snapshot
if extra_config:
config_spec = cf.create('ns0:VirtualMachineConfigSpec')
config_spec.extraConfig = self._get_extra_config_option_values(
extra_config)
clone_spec.config = config_spec
LOG.debug("Spec for cloning the backing: %s.", clone_spec)
return clone_spec
def clone_backing(self, name, backing, snapshot, clone_type, datastore,
disk_type=None, host=None, resource_pool=None,
extra_config=None):
"""Clone backing.
If the clone_type is 'full', then a full clone of the source volume
backing will be created. Else, if it is 'linked', then a linked clone
of the source volume backing will be created.
:param name: Name for the clone
:param backing: Reference to the backing entity
:param snapshot: Snapshot point from which the clone should be done
:param clone_type: Whether a full clone or linked clone is to be made
:param datastore: Reference to the datastore entity
:param disk_type: Disk type of the clone
:param host: Target host
:param resource_pool: Target resource pool
:param extra_config: Key-value pairs to be written to backing's
extra-config
"""
LOG.debug("Creating a clone of backing: %(back)s, named: %(name)s, "
"clone type: %(type)s from snapshot: %(snap)s on "
"resource pool: %(resource_pool)s, host: %(host)s, "
"datastore: %(ds)s with disk type: %(disk_type)s.",
{'back': backing, 'name': name, 'type': clone_type,
'snap': snapshot, 'ds': datastore, 'disk_type': disk_type,
'host': host, 'resource_pool': resource_pool})
folder = self._get_folder(backing)
if clone_type == LINKED_CLONE_TYPE:
disk_move_type = 'createNewChildDiskBacking'
else:
disk_move_type = 'moveAllDiskBackingsAndDisallowSharing'
clone_spec = self._get_clone_spec(
datastore, disk_move_type, snapshot, backing, disk_type, host=host,
resource_pool=resource_pool, extra_config=extra_config)
task = self._session.invoke_api(self._session.vim, 'CloneVM_Task',
backing, folder=folder, name=name,
spec=clone_spec)
LOG.debug("Initiated clone of backing: %s.", name)
task_info = self._session.wait_for_task(task)
new_backing = task_info.result
LOG.info(_LI("Successfully created clone: %s."), new_backing)
return new_backing
def _reconfigure_backing(self, backing, reconfig_spec):
"""Reconfigure backing VM with the given spec."""
LOG.debug("Reconfiguring backing VM: %(backing)s with spec: %(spec)s.",
{'backing': backing,
'spec': reconfig_spec})
reconfig_task = self._session.invoke_api(self._session.vim,
"ReconfigVM_Task",
backing,
spec=reconfig_spec)
LOG.debug("Task: %s created for reconfiguring backing VM.",
reconfig_task)
self._session.wait_for_task(reconfig_task)
def attach_disk_to_backing(self, backing, size_in_kb, disk_type,
adapter_type, vmdk_ds_file_path):
"""Attach an existing virtual disk to the backing VM.
:param backing: reference to the backing VM
:param size_in_kb: disk size in KB
:param disk_type: virtual disk type
:param adapter_type: disk adapter type
:param vmdk_ds_file_path: datastore file path of the virtual disk to
be attached
"""
LOG.debug("Reconfiguring backing VM: %(backing)s to add new disk: "
"%(path)s with size (KB): %(size)d and adapter type: "
"%(adapter_type)s.",
{'backing': backing,
'path': vmdk_ds_file_path,
'size': size_in_kb,
'adapter_type': adapter_type})
cf = self._session.vim.client.factory
reconfig_spec = cf.create('ns0:VirtualMachineConfigSpec')
specs = self._create_specs_for_disk_add(size_in_kb,
disk_type,
adapter_type,
vmdk_ds_file_path)
reconfig_spec.deviceChange = specs
self._reconfigure_backing(backing, reconfig_spec)
LOG.debug("Backing VM: %s reconfigured with new disk.", backing)
def rename_backing(self, backing, new_name):
"""Rename backing VM.
:param backing: VM to be renamed
:param new_name: new VM name
"""
LOG.info(_LI("Renaming backing VM: %(backing)s to %(new_name)s."),
{'backing': backing,
'new_name': new_name})
rename_task = self._session.invoke_api(self._session.vim,
"Rename_Task",
backing,
newName=new_name)
LOG.debug("Task: %s created for renaming VM.", rename_task)
self._session.wait_for_task(rename_task)
LOG.info(_LI("Backing VM: %(backing)s renamed to %(new_name)s."),
{'backing': backing,
'new_name': new_name})
def change_backing_profile(self, backing, profile_id):
"""Change storage profile of the backing VM.
The current profile is removed if the new profile is None.
"""
LOG.debug("Reconfiguring backing VM: %(backing)s to change profile to:"
" %(profile)s.",
{'backing': backing,
'profile': profile_id})
cf = self._session.vim.client.factory
reconfig_spec = cf.create('ns0:VirtualMachineConfigSpec')
if profile_id is None:
vm_profile = cf.create('ns0:VirtualMachineEmptyProfileSpec')
vm_profile.dynamicType = 'profile'
else:
vm_profile = cf.create('ns0:VirtualMachineDefinedProfileSpec')
vm_profile.profileId = profile_id.uniqueId
reconfig_spec.vmProfile = [vm_profile]
self._reconfigure_backing(backing, reconfig_spec)
LOG.debug("Backing VM: %(backing)s reconfigured with new profile: "
"%(profile)s.",
{'backing': backing,
'profile': profile_id})
def update_backing_disk_uuid(self, backing, disk_uuid):
"""Update backing VM's disk UUID.
:param backing: Reference to backing VM
:param disk_uuid: New disk UUID
"""
LOG.debug("Reconfiguring backing VM: %(backing)s to change disk UUID "
"to: %(disk_uuid)s.",
{'backing': backing,
'disk_uuid': disk_uuid})
disk_device = self._get_disk_device(backing)
disk_device.backing.uuid = disk_uuid
cf = self._session.vim.client.factory
disk_spec = cf.create('ns0:VirtualDeviceConfigSpec')
disk_spec.device = disk_device
disk_spec.operation = 'edit'
reconfig_spec = cf.create('ns0:VirtualMachineConfigSpec')
reconfig_spec.deviceChange = [disk_spec]
self._reconfigure_backing(backing, reconfig_spec)
LOG.debug("Backing VM: %(backing)s reconfigured with new disk UUID: "
"%(disk_uuid)s.",
{'backing': backing,
'disk_uuid': disk_uuid})
def delete_file(self, file_path, datacenter=None):
"""Delete file or folder on the datastore.
:param file_path: Datastore path of the file or folder
"""
LOG.debug("Deleting file: %(file)s under datacenter: %(dc)s.",
{'file': file_path, 'dc': datacenter})
fileManager = self._session.vim.service_content.fileManager
task = self._session.invoke_api(self._session.vim,
'DeleteDatastoreFile_Task',
fileManager,
name=file_path,
datacenter=datacenter)
LOG.debug("Initiated deletion via task: %s.", task)
self._session.wait_for_task(task)
LOG.info(_LI("Successfully deleted file: %s."), file_path)
def create_datastore_folder(self, ds_name, folder_path, datacenter):
"""Creates a datastore folder.
This method returns silently if the folder already exists.
:param ds_name: datastore name
:param folder_path: path of folder to create
:param datacenter: datacenter of target datastore
"""
fileManager = self._session.vim.service_content.fileManager
ds_folder_path = "[%s] %s" % (ds_name, folder_path)
LOG.debug("Creating datastore folder: %s.", ds_folder_path)
try:
self._session.invoke_api(self._session.vim,
'MakeDirectory',
fileManager,
name=ds_folder_path,
datacenter=datacenter)
LOG.info(_LI("Created datastore folder: %s."), folder_path)
except exceptions.FileAlreadyExistsException:
LOG.debug("Datastore folder: %s already exists.", folder_path)
def get_path_name(self, backing):
"""Get path name of the backing.
:param backing: Reference to the backing entity
:return: Path name of the backing
"""
return self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, backing,
'config.files').vmPathName
def get_entity_name(self, entity):
"""Get name of the managed entity.
:param entity: Reference to the entity
:return: Name of the managed entity
"""
return self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, entity, 'name')
def _get_disk_device(self, backing):
"""Get the virtual device corresponding to disk."""
hardware_devices = self._session.invoke_api(vim_util,
'get_object_property',
self._session.vim,
backing,
'config.hardware.device')
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
for device in hardware_devices:
if device.__class__.__name__ == "VirtualDisk":
return device
LOG.error(_LE("Virtual disk device of "
"backing: %s not found."), backing)
raise vmdk_exceptions.VirtualDiskNotFoundException()
def get_vmdk_path(self, backing):
"""Get the vmdk file name of the backing.
The vmdk file path of the backing returned is of the form:
"[datastore1] my_folder/my_vm.vmdk"
:param backing: Reference to the backing
:return: VMDK file path of the backing
"""
disk_device = self._get_disk_device(backing)
backing = disk_device.backing
if backing.__class__.__name__ != "VirtualDiskFlatVer2BackingInfo":
msg = _("Invalid disk backing: %s.") % backing.__class__.__name__
LOG.error(msg)
raise AssertionError(msg)
return backing.fileName
def get_disk_size(self, backing):
"""Get disk size of the backing.
:param backing: backing VM reference
:return: disk size in bytes
"""
disk_device = self._get_disk_device(backing)
return disk_device.capacityInKB * units.Ki
def _get_virtual_disk_create_spec(self, size_in_kb, adapter_type,
disk_type):
"""Return spec for file-backed virtual disk creation."""
cf = self._session.vim.client.factory
spec = cf.create('ns0:FileBackedVirtualDiskSpec')
spec.capacityKb = size_in_kb
spec.adapterType = VirtualDiskAdapterType.get_adapter_type(
adapter_type)
spec.diskType = VirtualDiskType.get_virtual_disk_type(disk_type)
return spec
def create_virtual_disk(self, dc_ref, vmdk_ds_file_path, size_in_kb,
adapter_type='busLogic', disk_type='preallocated'):
"""Create virtual disk with the given settings.
:param dc_ref: datacenter reference
:param vmdk_ds_file_path: datastore file path of the virtual disk
:param size_in_kb: disk size in KB
:param adapter_type: disk adapter type
:param disk_type: vmdk type
"""
virtual_disk_spec = self._get_virtual_disk_create_spec(size_in_kb,
adapter_type,
disk_type)
LOG.debug("Creating virtual disk with spec: %s.", virtual_disk_spec)
disk_manager = self._session.vim.service_content.virtualDiskManager
task = self._session.invoke_api(self._session.vim,
'CreateVirtualDisk_Task',
disk_manager,
name=vmdk_ds_file_path,
datacenter=dc_ref,
spec=virtual_disk_spec)
LOG.debug("Task: %s created for virtual disk creation.", task)
self._session.wait_for_task(task)
LOG.debug("Created virtual disk with spec: %s.", virtual_disk_spec)
def create_flat_extent_virtual_disk_descriptor(
self, dc_ref, path, size_in_kb, adapter_type, disk_type):
"""Create descriptor for a single flat extent virtual disk.
To create the descriptor, we create a virtual disk and delete its flat
extent.
:param dc_ref: reference to the datacenter
:param path: descriptor datastore file path
:param size_in_kb: size of the virtual disk in KB
:param adapter_type: virtual disk adapter type
:param disk_type: type of the virtual disk
"""
LOG.debug("Creating descriptor: %(path)s with size (KB): %(size)s, "
"adapter_type: %(adapter_type)s and disk_type: "
"%(disk_type)s.",
{'path': path.get_descriptor_ds_file_path(),
'size': size_in_kb,
'adapter_type': adapter_type,
'disk_type': disk_type
})
self.create_virtual_disk(dc_ref, path.get_descriptor_ds_file_path(),
size_in_kb, adapter_type, disk_type)
self.delete_file(path.get_flat_extent_ds_file_path(), dc_ref)
LOG.debug("Created descriptor: %s.",
path.get_descriptor_ds_file_path())
def copy_vmdk_file(self, src_dc_ref, src_vmdk_file_path,
dest_vmdk_file_path, dest_dc_ref=None):
"""Copy contents of the src vmdk file to dest vmdk file.
:param src_dc_ref: Reference to datacenter containing src datastore
:param src_vmdk_file_path: Source vmdk file path
:param dest_vmdk_file_path: Destination vmdk file path
:param dest_dc_ref: Reference to datacenter of dest datastore.
If unspecified, source datacenter is used.
"""
LOG.debug('Copying disk: %(src)s to %(dest)s.',
{'src': src_vmdk_file_path,
'dest': dest_vmdk_file_path})
dest_dc_ref = dest_dc_ref or src_dc_ref
diskMgr = self._session.vim.service_content.virtualDiskManager
task = self._session.invoke_api(self._session.vim,
'CopyVirtualDisk_Task',
diskMgr,
sourceName=src_vmdk_file_path,
sourceDatacenter=src_dc_ref,
destName=dest_vmdk_file_path,
destDatacenter=dest_dc_ref,
force=True)
LOG.debug("Initiated copying disk data via task: %s.", task)
self._session.wait_for_task(task)
LOG.info(_LI("Successfully copied disk at: %(src)s to: %(dest)s."),
{'src': src_vmdk_file_path, 'dest': dest_vmdk_file_path})
def delete_vmdk_file(self, vmdk_file_path, dc_ref):
"""Delete given vmdk files.
:param vmdk_file_path: VMDK file path to be deleted
:param dc_ref: Reference to datacenter that contains this VMDK file
"""
LOG.debug("Deleting vmdk file: %s.", vmdk_file_path)
diskMgr = self._session.vim.service_content.virtualDiskManager
task = self._session.invoke_api(self._session.vim,
'DeleteVirtualDisk_Task',
diskMgr,
name=vmdk_file_path,
datacenter=dc_ref)
LOG.debug("Initiated deleting vmdk file via task: %s.", task)
self._session.wait_for_task(task)
LOG.info(_LI("Deleted vmdk file: %s."), vmdk_file_path)
def get_profile(self, backing):
"""Query storage profile associated with the given backing.
:param backing: backing reference
:return: profile name
"""
profile_ids = pbm.get_profiles(self._session, backing)
if profile_ids:
return pbm.get_profiles_by_ids(self._session, profile_ids)[0].name
def _get_all_clusters(self):
clusters = {}
retrieve_result = self._session.invoke_api(vim_util, 'get_objects',
self._session.vim,
'ClusterComputeResource',
self._max_objects)
while retrieve_result:
if retrieve_result.objects:
for cluster in retrieve_result.objects:
name = urllib.parse.unquote(cluster.propSet[0].val)
clusters[name] = cluster.obj
retrieve_result = self.continue_retrieval(retrieve_result)
return clusters
def get_cluster_refs(self, names):
"""Get references to given clusters.
:param names: list of cluster names
:return: Dictionary of cluster names to references
"""
clusters_ref = {}
clusters = self._get_all_clusters()
for name in names:
if name not in clusters:
LOG.error(_LE("Compute cluster: %s not found."), name)
raise vmdk_exceptions.ClusterNotFoundException(cluster=name)
clusters_ref[name] = clusters[name]
return clusters_ref
def get_cluster_hosts(self, cluster):
"""Get hosts in the given cluster.
:param cluster: cluster reference
:return: references to hosts in the cluster
"""
hosts = self._session.invoke_api(vim_util,
'get_object_property',
self._session.vim,
cluster,
'host')
host_refs = []
if hosts and hosts.ManagedObjectReference:
host_refs.extend(hosts.ManagedObjectReference)
return host_refs
| apache-2.0 | 7,185,271,924,319,432,000 | 42.646722 | 79 | 0.567895 | false |
bireme/isisdm | isis/model/tests/test_ordered.py | 1 | 2207 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
Em Python os atributos de uma classe são armazenados em um `dict`, portanto
sua ordem não é preservada. Normalmente a ordem não é realmente importante.
Note no exemplo abaixo que a lista devolvida por `dir(l)` não preserva
a ordem em que foram declarados os atributos na classe `Livro`::
>>> class LivroSimples(object):
... titulo = u''
... isbn = u''
... autores = u''
>>> l = LivroSimples()
>>> dir(l) #doctest: +ELLIPSIS
[...'autores', 'isbn', 'titulo'...]
Para gerar formulários automaticamente a partir da classe, é desejável
respeitar a ordem de declaração dos campos. Usando descritores e uma
metaclasse, é possível preservar esta ordem.
>>> class Livro(OrderedModel):
... titulo = OrderedProperty()
... isbn = OrderedProperty()
... autores = OrderedProperty()
>>> l2 = Livro()
>>> l2.titulo = 'O Alienista'
>>> l2.titulo
'O Alienista'
>>> list(l2)
['titulo', 'isbn', 'autores']
>>> for campo in l2: print campo
titulo
isbn
autores
>>> l3 = Livro()
>>> l3.titulo
Traceback (most recent call last):
...
AttributeError: 'Livro' object has no attribute 'titulo'
>>> l4 = Livro(titulo=u'Alice', autores=[u'Carroll', u'Tenniel'], isbn=u'9781234567890')
>>> for campo, valor in l4.iteritems():
... print '%-8s: %s' % (campo, valor)
titulo : Alice
isbn : 9781234567890
autores : [u'Carroll', u'Tenniel']
Os descritores têm um atributo `order` que é inicializado com um contador da
classe `OrderedProperty` incrementado a cada nova instância. A metaclasse usa
este atributo `order` para ordenar uma lista com os nomes dos campos.
>>> class Bicicleta(OrderedModel):
... rodas = OrderedProperty()
... aro = OrderedProperty()
... cor = OrderedProperty()
...
>>> bike = Bicicleta()
>>> bike.rodas = 2
>>> bike.aro = 26
>>> bike.cor = u'preto'
...
"""
from isis.model.ordered import OrderedModel, OrderedProperty
def test():
import doctest
doctest.testmod()
if __name__=='__main__':
test() | gpl-2.0 | 2,476,869,298,126,630,000 | 28.621622 | 92 | 0.612049 | false |
willblev/RNA_TwiZe | rna_twize.py | 1 | 12512 | import Tkinter
import tkFileDialog
import tkMessageBox
from Tkinter import *
import os
import sys
from math import factorial
class Unbuffered(object):
"""Attempts to create an unbuffered STDOUT"""
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
sys.stdout = Unbuffered(sys.stdout)
files_list=[]
class RNA_Twilight_Zone_Curve(Tkinter.Frame):
"""Tkinter GUI that lets a user select PDB files with RNA structures and creates a Twilight-Zone curve."""
def open_pdb_files(self):
"""Allows the user to select multiple PDB files from a Tkinter prompt"""
if len(self.display_list)>0:
answer = tkMessageBox.askokcancel(message = "Are you sure you want to load new PDB files? Current workspace will be lost.")
if not answer:
return
else:
del files_list[:]
print "#### Started a new project ####"
self.display_list=[]
list_filename_paths = tkFileDialog.askopenfilenames(parent=root,title="Select multiple files (by holding SHIFT or CTRL).", filetypes=[("PDB files","*.pdb"),("All files","*")] )
if len(list_filename_paths)==1:
tkMessageBox.showerror("Too Few PDB Files!","You must select at least two PDB files.")
return
elif len(list_filename_paths)==0:
return
for each_file in list_filename_paths:
filename=os.path.basename(each_file)[:-4]
print >> sys.stderr, "Loaded %s"% filename
if each_file not in files_list: # could use a set to avoid redundancies
files_list.append(each_file)
if filename not in self.display_list:
self.display_list.append(filename)
#Sort the list by id
self.display_list.sort(key=lambda x: x)
#Add the identifiers to the workspace list
self.pdb_id_listbox.delete(0, Tkinter.END)
index = 1
for record in self.display_list:
self.pdb_id_listbox.insert(index, record.upper())
index+=1
print "Loaded %d PDB files."%len(self.display_list)
self.current_SeqRecord = None
print >> sys.stderr, "Locations of PDB files:"
for fils_paths in files_list:
print >> sys.stderr, fils_paths
def open_list_file(self): #Opens a list file and gets each ID
"""Opens a prompt that allows the user to select a text file containing a list of PDB IDs, which is then used to download the PDB files if the do not already exist."""
if len(self.display_list)>0:
answer = tkMessageBox.askokcancel(message = "Are you sure you want to load new PDB files? Current workspace will be lost.")
if answer is False:
return
else:
del files_list[:]
print "#### Started a new project ####"
self.display_list=[]
list_filename_path = tkFileDialog.askopenfilename( title="Select a list of PDB IDs.", filetypes=[("Text files","*.txt"),("Text files","*.tbl"),("Text files","*.tsv"),("Text files","*.csv"),("All files","*")] )
if list_filename_path=="":
return
self.display_list = []
just_path=os.path.dirname(list_filename_path)
new_dir_name=os.path.join(just_path,os.path.basename(list_filename_path)+"_pdb_files")
if not os.path.exists(new_dir_name):
os.makedirs(os.path.join(just_path,os.path.basename(list_filename_path)+"_pdb_files"))
#open list and parse PDB IDs
handle = open(list_filename_path,"r")
entire_file=''
print >> sys.stderr, "Fetching PDB files..."
for line in handle:
entire_file+=line
if "," in entire_file:
pdb_id_list=[x.strip() for x in entire_file.split(',')]
elif ";" in entire_file:
pdb_id_list=[x.strip() for x in entire_file.split(';')]
else:
pdb_id_list=[x.strip() for x in entire_file.split()]
for pdb_id in pdb_id_list:
if pdb_id[:4].upper() not in self.display_list:
self.display_list.append(pdb_id[:4].upper())
self.display_list.sort(key=lambda x: x)
#Add the identifiers to the list
self.pdb_id_listbox.delete(0, Tkinter.END)
index = 1
answer = tkMessageBox.askokcancel(message = "Download %d PDB files? This will probably take between %0.2f and %0.2f minutes. This window will close when process has completed." % (len(self.display_list), len(self.display_list)*0.03,len(self.display_list)*0.07))
if answer is False:
return
from pdb_getter import get_pdb_structure
for record in self.display_list:
self.pdb_id_listbox.insert(index, record.upper())
files_list.append(get_pdb_structure(record,new_dir_name))
index+=1
handle.close()
print "Loaded %d PDB files." % (len(self.display_list))
self.current_SeqRecord = None
print >> sys.stderr, "Locations of PDB files:"
for fils in files_list:
print >> sys.stderr, fils
print "You may now run an analysis with 'File' >> 'Run Analysis'."
def open_previous_files(self):
"""Allows the user to select files from previously running an analysis."""
if len(self.display_list)>0:
answer = tkMessageBox.askokcancel(message = "Are you sure you want to load new PDB files? Current workspace will be lost.")
if answer is False:
return
else:
del files_list[:]
print "#### Started a new project ####"
self.display_list=[]
list_filename_paths = tkFileDialog.askopenfilenames(parent=root,title="Select multiple files (by holding SHIFT or CTRL).", filetypes=[("PDB files","SARA_*.pdb"),("All files","*")] )
if len(list_filename_paths)==0:
return
for each_file in list_filename_paths:
filename=os.path.basename(each_file)[5:-4]
print >> sys.stderr, "Loaded %s"% filename
if each_file not in files_list:
files_list.append(each_file)
if filename not in self.display_list:
self.display_list.append(filename)
#Sort the list by id
self.display_list.sort(key=lambda x: x)
#Add the identifiers to the list
self.pdb_id_listbox.delete(0, Tkinter.END)
index = 1
for record in self.display_list:
self.pdb_id_listbox.insert(index, record.upper())
index+=1
print "Loaded %d files from previous analysis." % len(files_list)
def run_analysis(self):
"""Using the previously selected PDB files, filters out RNA structures that are not identical and between 20 and 500 bases long. This filtered list is then compared with SARA"""
if len(files_list)>0:
runtime_approx=factorial(len(files_list))/(factorial(len(files_list)-2)*factorial(2))
answer = tkMessageBox.askokcancel("Run Analysis","The analysis will probably take between %0.2f and %0.2f minutes to run these comparisons. Do you want to continue now? This window will close when process has completed." % (runtime_approx*0.04,runtime_approx*0.09))
if answer is False:
return
from pdb_comparer import compare_files
refresh_listbox=compare_files(files_list)
print "The analysis has created %d pairwise comparison files."% len(refresh_listbox)
self.pdb_id_listbox.delete(0, Tkinter.END)
index = 1
for record in refresh_listbox:
self.pdb_id_listbox.insert(index, record.upper())
index+=1
self.current_SeqRecord = None
print "You may now plot your results with 'File' >> 'Make Plot'."
else:
tkMessageBox.showerror("No files loaded!","There are currently no files loaded. First you should select PDB files.")
def make_plot(self):
"""Uses the files created by the pairwise alignments to make a plot."""
if len(files_list)>0:
from make_twize_graph import make_twilight_zone_curve
make_twilight_zone_curve(os.path.dirname(files_list[0]))
else:
tkMessageBox.showerror("No files loaded!","There are currently no files loaded. First import PDBs, then run then analysis before you try to plot.")
def show_help(self):
"""Displays the help dialogue, and provides extra information by searching for and opening the README file."""
answer=tkMessageBox.askokcancel(title="Help", message="Welcome to RNA_Twize. The basic flow of the program is as follows: \n 1. Open several PDB files\n 2. Run the analysis\n 3. Plot your results\n 4. Save your plot \nFor more detailed information, please see the README.txt file included with this package.\n\n Open README.txt now?")
if answer:
where_are_we = os.path.dirname(os.path.realpath(__file__))
try:
open_help_string="gedit %s &" % (os.path.join(os.path.dirname(where_are_we),"README.txt"))
os.system(open_help_string)
except:
open_help_string="more %s " % (os.path.join(os.path.dirname(where_are_we),"README.txt"))
os.system(open_help_string)
def show_about(self):
"""Displays a short message from the creators of the program"""
tkMessageBox.showinfo(title="About", message="This program was written by Andres Lanzos Camionai and Will Blevins in 2014. We would like to thank Emidio Capriotti for creating SARA, and Javier Garcia Garcia for providing us with useful templates for our Tkinter GUI.")
def create_left_frame(self):
self.left_frame = Tkinter.LabelFrame(self, text="Workspace List", padx=5, pady=5)
self.create_pdb_id_listbox()
self.left_frame.grid(row=0, column=0, sticky=Tkinter.W+Tkinter.E+Tkinter.N+Tkinter.S)
def create_pdb_id_listbox(self):
"""Creates a frame that contains a listbox with a scroll bar"""
frame = Tkinter.Frame(self.left_frame)
scrollbar = Tkinter.Scrollbar(frame, orient=Tkinter.VERTICAL)
self.pdb_id_listbox = Tkinter.Listbox(frame, selectmode=Tkinter.SINGLE, height=20, yscrollcommand = scrollbar.set)
scrollbar.config(command=self.pdb_id_listbox.yview)
scrollbar.pack( side=Tkinter.RIGHT, fill=Tkinter.Y)
self.pdb_id_listbox.pack( side=Tkinter.LEFT, expand=True, fill=Tkinter.BOTH)
frame.pack( fill=Tkinter.BOTH )
def create_right_frame(self):
"""Makes a tkinter frame"""
self.text_frame = Tkinter.LabelFrame(self, text="Program Feedback", width=400, padx=5, pady=5)
self.text_frame.grid(row=0, column=2, sticky=Tkinter.W)
self.right_frame = Tkinter.Frame(self.text_frame, borderwidth=5)
self.right_frame.grid()
def create_feedback_label(self):
"""A label that scrapes STDOUT and prints it in a feedback window"""
class IORedirector(object):
def __init__(self,TEXT_INFO):
self.TEXT_INFO = TEXT_INFO
class StdoutRedirector(IORedirector):
def write(self,str):
self.TEXT_INFO.config(text=self.TEXT_INFO.cget('text') + str)
self.TEXT_INFO = Label(self.right_frame, height=20, width=70, bg="grey",borderwidth=5, relief=RIDGE)
self.TEXT_INFO.grid(row=1, column=1)
sys.stdout = StdoutRedirector(self.TEXT_INFO)
def quit(self):
if tkMessageBox.askyesno("Quit","Are you sure you want to exit?"):
Tkinter.Frame.quit(self)
exit(0)
#CREATE THE FILEMENU
def create_menu(self):
self.menubar = Tkinter.Menu(self)
filemenu = Tkinter.Menu(self.menubar)
filemenu.add_command(label="Open PDB Files", command=self.open_pdb_files)
filemenu.add_command(label="Open List Of PDBs", command=self.open_list_file)
filemenu.add_command(label="Open Previous Analysis", command=self.open_previous_files)
filemenu.add_separator()
filemenu.add_command(label="Run Analysis", command=self.run_analysis)
filemenu.add_separator()
filemenu.add_command(label="Make Plot", command=self.make_plot)
filemenu.add_separator()
filemenu.add_command(label="QUIT", command=self.quit)
#CREATE THE HELP MENU
helpmenu = Tkinter.Menu(self.menubar)
helpmenu.add_command(label="Help", command=self.show_help)
helpmenu.add_command(label="About", command=self.show_about)
self.menubar.add_cascade(label="File", menu=filemenu)
self.menubar.add_cascade(label="Help", menu=helpmenu)
self.master.config(menu=self.menubar)
def createWidgets(self):
self.create_menu()
self.create_left_frame()
self.create_right_frame()
self.create_feedback_label()
self.grid(row=0)
def __init__(self, master=None, **kwargs):
Tkinter.Frame.__init__(self, master, **kwargs)
self.master.wm_title("RNA TwiZe: Twilight-Zone Curve Maker")
self.master.resizable(width=False, height=False)
#DEFINE ATTRIBUTES
self.display_list = []
self.pdb_id_listbox = None
self.menubar = None
self.current_SeqRecord = None
self.sequence_text = None
self.createWidgets()
"""Makes the GUI pop up in the middle of the screen"""
root = Tkinter.Tk()
app = RNA_Twilight_Zone_Curve(master=root,padx=10, pady=10)
#make screen dimensions work
w = 800
h = 380
# get screen width and height
ws = root.winfo_screenwidth()
hs = root.winfo_screenheight()
# calculate position x, y
x = (ws/2) - (w/2)
y = (hs/2) - (h/2)
root.geometry('%dx%d+%d+%d' % (w, h, x, y))
print "Welcome to RNA_TwiZe. Open files using the toolbar menu to begin."
app.mainloop()
| gpl-3.0 | -2,537,356,710,290,258,400 | 39.7557 | 352 | 0.706682 | false |
nriley/NewsBlur | apps/rss_feeds/icon_importer.py | 2 | 13539 | import urllib2
import lxml.html
import numpy
import scipy
import scipy.misc
import scipy.cluster
import urlparse
import struct
import operator
import gzip
import datetime
import requests
import httplib
from PIL import BmpImagePlugin, PngImagePlugin, Image
from socket import error as SocketError
from boto.s3.key import Key
from StringIO import StringIO
from django.conf import settings
from apps.rss_feeds.models import MFeedPage, MFeedIcon
from utils import log as logging
from utils.feed_functions import timelimit, TimeoutError
from OpenSSL.SSL import Error as OpenSSLError
from pyasn1.error import PyAsn1Error
from requests.packages.urllib3.exceptions import LocationParseError
class IconImporter(object):
def __init__(self, feed, page_data=None, force=False):
self.feed = feed
self.force = force
self.page_data = page_data
self.feed_icon = MFeedIcon.get_feed(feed_id=self.feed.pk)
def save(self):
if not self.force and self.feed.favicon_not_found:
# print 'Not found, skipping...'
return
if (
not self.force
and not self.feed.favicon_not_found
and self.feed_icon.icon_url
and self.feed.s3_icon
):
# print 'Found, but skipping...'
return
image, image_file, icon_url = self.fetch_image_from_page_data()
if not image:
image, image_file, icon_url = self.fetch_image_from_path(force=self.force)
if image:
image = self.normalize_image(image)
try:
color = self.determine_dominant_color_in_image(image)
except IndexError:
return
try:
image_str = self.string_from_image(image)
except TypeError:
return
if len(image_str) > 500000:
image = None
if (image and
(self.force or
self.feed_icon.data != image_str or
self.feed_icon.icon_url != icon_url or
self.feed_icon.not_found or
(settings.BACKED_BY_AWS.get('icons_on_s3') and not self.feed.s3_icon))):
logging.debug(" ---> [%-30s] ~SN~FBIcon difference:~FY color:%s (%s/%s) data:%s url:%s notfound:%s no-s3:%s" % (
self.feed,
self.feed_icon.color != color, self.feed_icon.color, color,
self.feed_icon.data != image_str,
self.feed_icon.icon_url != icon_url,
self.feed_icon.not_found,
settings.BACKED_BY_AWS.get('icons_on_s3') and not self.feed.s3_icon))
self.feed_icon.data = image_str
self.feed_icon.icon_url = icon_url
self.feed_icon.color = color
self.feed_icon.not_found = False
self.feed_icon.save()
if settings.BACKED_BY_AWS.get('icons_on_s3'):
self.save_to_s3(image_str)
if self.feed.favicon_color != color:
self.feed.favicon_color = color
self.feed.favicon_not_found = False
self.feed.save(update_fields=['favicon_color', 'favicon_not_found'])
if not image:
self.feed_icon.not_found = True
self.feed_icon.save()
self.feed.favicon_not_found = True
self.feed.save()
return not self.feed.favicon_not_found
def save_to_s3(self, image_str):
expires = datetime.datetime.now() + datetime.timedelta(days=60)
expires = expires.strftime("%a, %d %b %Y %H:%M:%S GMT")
k = Key(settings.S3_ICONS_BUCKET)
k.key = self.feed.s3_icons_key
k.set_metadata('Content-Type', 'image/png')
k.set_metadata('Expires', expires)
k.set_contents_from_string(image_str.decode('base64'))
k.set_acl('public-read')
self.feed.s3_icon = True
self.feed.save()
def load_icon(self, image_file, index=None):
'''
DEPRECATED
Load Windows ICO image.
See http://en.wikipedia.org/w/index.php?oldid=264332061 for file format
description.
Cribbed and modified from http://djangosnippets.org/snippets/1287/
'''
try:
image_file.seek(0)
header = struct.unpack('<3H', image_file.read(6))
except Exception, e:
return
# Check magic
if header[:2] != (0, 1):
return
# Collect icon directories
directories = []
for i in xrange(header[2]):
directory = list(struct.unpack('<4B2H2I', image_file.read(16)))
for j in xrange(3):
if not directory[j]:
directory[j] = 256
directories.append(directory)
if index is None:
# Select best icon
directory = max(directories, key=operator.itemgetter(slice(0, 3)))
else:
directory = directories[index]
# Seek to the bitmap data
image_file.seek(directory[7])
prefix = image_file.read(16)
image_file.seek(-16, 1)
if PngImagePlugin._accept(prefix):
# Windows Vista icon with PNG inside
try:
image = PngImagePlugin.PngImageFile(image_file)
except IOError:
return
else:
# Load XOR bitmap
try:
image = BmpImagePlugin.DibImageFile(image_file)
except IOError:
return
if image.mode == 'RGBA':
# Windows XP 32-bit color depth icon without AND bitmap
pass
else:
# Patch up the bitmap height
image.size = image.size[0], image.size[1] >> 1
d, e, o, a = image.tile[0]
image.tile[0] = d, (0, 0) + image.size, o, a
# Calculate AND bitmap dimensions. See
# http://en.wikipedia.org/w/index.php?oldid=264236948#Pixel_storage
# for description
offset = o + a[1] * image.size[1]
stride = ((image.size[0] + 31) >> 5) << 2
size = stride * image.size[1]
# Load AND bitmap
image_file.seek(offset)
string = image_file.read(size)
mask = Image.frombytes('1', image.size, string, 'raw',
('1;I', stride, -1))
image = image.convert('RGBA')
image.putalpha(mask)
return image
def fetch_image_from_page_data(self):
image = None
image_file = None
if self.page_data:
content = self.page_data
elif settings.BACKED_BY_AWS.get('pages_on_s3') and self.feed.s3_page:
key = settings.S3_PAGES_BUCKET.get_key(self.feed.s3_pages_key)
compressed_content = key.get_contents_as_string()
stream = StringIO(compressed_content)
gz = gzip.GzipFile(fileobj=stream)
try:
content = gz.read()
except IOError:
content = None
else:
content = MFeedPage.get_data(feed_id=self.feed.pk)
url = self._url_from_html(content)
if not url:
try:
content = requests.get(self.cleaned_feed_link).content
url = self._url_from_html(content)
except (AttributeError, SocketError, requests.ConnectionError,
requests.models.MissingSchema, requests.sessions.InvalidSchema,
requests.sessions.TooManyRedirects,
requests.models.InvalidURL,
requests.models.ChunkedEncodingError,
requests.models.ContentDecodingError,
httplib.IncompleteRead,
LocationParseError, OpenSSLError, PyAsn1Error), e:
logging.debug(" ---> ~SN~FRFailed~FY to fetch ~FGfeed icon~FY: %s" % e)
if url:
image, image_file = self.get_image_from_url(url)
return image, image_file, url
@property
def cleaned_feed_link(self):
if self.feed.feed_link.startswith('http'):
return self.feed.feed_link
return 'http://' + self.feed.feed_link
def fetch_image_from_path(self, path='favicon.ico', force=False):
image = None
url = None
if not force:
url = self.feed_icon.icon_url
if not url and self.feed.feed_link and len(self.feed.feed_link) > 6:
url = urlparse.urljoin(self.feed.feed_link, 'favicon.ico')
if not url:
return None, None, None
image, image_file = self.get_image_from_url(url)
if not image:
url = urlparse.urljoin(self.feed.feed_link, '/favicon.ico')
image, image_file = self.get_image_from_url(url)
# print 'Found: %s - %s' % (url, image)
return image, image_file, url
def get_image_from_url(self, url):
# print 'Requesting: %s' % url
if not url:
return None, None
@timelimit(30)
def _1(url):
headers = {
'User-Agent': 'NewsBlur Favicon Fetcher - %s subscriber%s - %s '
'(Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_1) '
'AppleWebKit/534.48.3 (KHTML, like Gecko) Version/5.1 '
'Safari/534.48.3)' %
(
self.feed.num_subscribers,
's' if self.feed.num_subscribers != 1 else '',
self.feed.permalink
),
'Connection': 'close',
}
try:
request = urllib2.Request(url, headers=headers)
icon = urllib2.urlopen(request).read()
except Exception:
return None
return icon
try:
icon = _1(url)
except TimeoutError:
return None, None
try:
icon_file = StringIO(icon)
image = Image.open(icon_file)
except (IOError, ValueError):
return None, None
return image, icon_file
def _url_from_html(self, content):
url = None
if not content:
return url
try:
if isinstance(content, unicode):
content = content.encode('utf-8')
icon_path = lxml.html.fromstring(content).xpath(
'//link[@rel="icon" or @rel="shortcut icon"]/@href'
)
except (lxml.etree.ParserError, TypeError):
return url
if icon_path:
if str(icon_path[0]).startswith('http'):
url = icon_path[0]
else:
url = urlparse.urljoin(self.feed.feed_link, icon_path[0])
return url
def normalize_image(self, image):
# if image.size != (16, 16):
# image = image.resize((16, 16), Image.BICUBIC)
if image.mode != 'RGBA':
try:
image = image.convert('RGBA')
except IOError:
pass
return image
def determine_dominant_color_in_image(self, image):
NUM_CLUSTERS = 5
# Convert image into array of values for each point.
if image.mode == '1':
image.convert('L')
ar = numpy.array(image)
# ar = scipy.misc.fromimage(image)
shape = ar.shape
# Reshape array of values to merge color bands. [[R], [G], [B], [A]] => [R, G, B, A]
if len(shape) > 2:
ar = ar.reshape(scipy.product(shape[:2]), shape[2])
# Get NUM_CLUSTERS worth of centroids.
ar = ar.astype(numpy.float)
codes, _ = scipy.cluster.vq.kmeans(ar, NUM_CLUSTERS)
# Pare centroids, removing blacks and whites and shades of really dark and really light.
original_codes = codes
for low, hi in [(60, 200), (35, 230), (10, 250)]:
codes = scipy.array([code for code in codes
if not ((code[0] < low and code[1] < low and code[2] < low) or
(code[0] > hi and code[1] > hi and code[2] > hi))])
if not len(codes):
codes = original_codes
else:
break
# Assign codes (vector quantization). Each vector is compared to the centroids
# and assigned the nearest one.
vecs, _ = scipy.cluster.vq.vq(ar, codes)
# Count occurences of each clustered vector.
counts, bins = scipy.histogram(vecs, len(codes))
# Show colors for each code in its hex value.
# colors = [''.join(chr(c) for c in code).encode('hex') for code in codes]
# total = scipy.sum(counts)
# print dict(zip(colors, [count/float(total) for count in counts]))
# Find the most frequent color, based on the counts.
index_max = scipy.argmax(counts)
peak = codes.astype(int)[index_max]
color = ''.join(chr(c) for c in peak).encode('hex')
return color[:6]
def string_from_image(self, image):
output = StringIO()
image.save(output, 'png', quality=95)
contents = output.getvalue()
output.close()
return contents.encode('base64')
| mit | -3,460,954,564,688,705,500 | 35.395161 | 128 | 0.532609 | false |
johnnoone/aiovault | tests/test_ldap.py | 1 | 1590 | from aiovault import Vault
from conftest import async_test
import pytest
@async_test
def test_ldap(dev_server):
client = Vault(dev_server.addr, token=dev_server.root_token)
backend = yield from client.auth.enable('ldap')
configured = yield from backend.configure(url='ldap://ldap.forumsys.com',
userattr='uid',
userdn='dc=example,dc=com',
groupdn='dc=example,dc=com')
assert configured
writen = yield from backend.write_group(name='scientists', policies='foo')
assert writen
token = yield from backend.login(username='tesla', password='password')
assert token['metadata']['username'] == 'tesla'
@async_test
def test_ldap_crud(dev_server):
client = Vault(dev_server.addr, token=dev_server.root_token)
backend = yield from client.auth.enable('ldap')
configured = yield from backend.configure(url='ldap://ldap.forumsys.com',
userattr='uid',
userdn='dc=example,dc=com',
groupdn='dc=example,dc=com')
assert configured
writen = yield from backend.write_group(name='g1', policies='foo')
assert writen
data = yield from backend.read_group(name='g1')
assert data['policies'] == {'foo'}
deleted = yield from backend.delete_group(name='g1')
assert deleted
with pytest.raises(KeyError):
yield from backend.read_group(name='g1')
| bsd-3-clause | 9,064,265,453,553,204,000 | 32.829787 | 78 | 0.579245 | false |
DataDog/integrations-core | tokumx/datadog_checks/tokumx/vendor/pymongo/write_concern.py | 1 | 4561 | # Copyright 2014-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for working with write concerns."""
from datadog_checks.tokumx.vendor.bson.py3compat import integer_types, string_type
from datadog_checks.tokumx.vendor.pymongo.errors import ConfigurationError
class WriteConcern(object):
"""WriteConcern
:Parameters:
- `w`: (integer or string) Used with replication, write operations
will block until they have been replicated to the specified number
or tagged set of servers. `w=<integer>` always includes the replica
set primary (e.g. w=3 means write to the primary and wait until
replicated to **two** secondaries). **w=0 disables acknowledgement
of write operations and can not be used with other write concern
options.**
- `wtimeout`: (integer) Used in conjunction with `w`. Specify a value
in milliseconds to control how long to wait for write propagation
to complete. If replication does not complete in the given
timeframe, a timeout exception is raised.
- `j`: If ``True`` block until write operations have been committed
to the journal. Cannot be used in combination with `fsync`. Prior
to MongoDB 2.6 this option was ignored if the server was running
without journaling. Starting with MongoDB 2.6 write operations will
fail with an exception if this option is used when the server is
running without journaling.
- `fsync`: If ``True`` and the server is running without journaling,
blocks until the server has synced all data files to disk. If the
server is running with journaling, this acts the same as the `j`
option, blocking until write operations have been committed to the
journal. Cannot be used in combination with `j`.
"""
__slots__ = ("__document", "__acknowledged")
def __init__(self, w=None, wtimeout=None, j=None, fsync=None):
self.__document = {}
self.__acknowledged = True
if wtimeout is not None:
if not isinstance(wtimeout, integer_types):
raise TypeError("wtimeout must be an integer")
self.__document["wtimeout"] = wtimeout
if j is not None:
if not isinstance(j, bool):
raise TypeError("j must be True or False")
self.__document["j"] = j
if fsync is not None:
if not isinstance(fsync, bool):
raise TypeError("fsync must be True or False")
if j and fsync:
raise ConfigurationError("Can't set both j "
"and fsync at the same time")
self.__document["fsync"] = fsync
if self.__document and w == 0:
raise ConfigurationError("Can not use w value "
"of 0 with other options")
if w is not None:
if isinstance(w, integer_types):
self.__acknowledged = w > 0
elif not isinstance(w, string_type):
raise TypeError("w must be an integer or string")
self.__document["w"] = w
@property
def document(self):
"""The document representation of this write concern.
.. note::
:class:`WriteConcern` is immutable. Mutating the value of
:attr:`document` does not mutate this :class:`WriteConcern`.
"""
return self.__document.copy()
@property
def acknowledged(self):
"""If ``True`` write operations will wait for acknowledgement before
returning.
"""
return self.__acknowledged
def __repr__(self):
return ("WriteConcern(%s)" % (
", ".join("%s=%s" % kvt for kvt in self.document.items()),))
def __eq__(self, other):
return self.document == other.document
def __ne__(self, other):
return self.document != other.document
def __bool__(self):
return bool(self.document)
| bsd-3-clause | 2,093,304,799,596,190,200 | 40.463636 | 82 | 0.623328 | false |
HHSIDEAlab/hhs_ansible | lib/mark_deployment.py | 1 | 1988 | import argparse
import json
import sys
import urllib.request
from argparse import RawTextHelpFormatter
DESCRIPTION = """
Mark a new deployment in New Relic
Example:
python ./lib/mark_deployment.py \\
--api_key API_KEY_GOES_HERE \\
--app_id APP_ID_GOES_HERE \\
--version VERSION_STRING_GOES_HERE
"""
def main(user, app_id, version, api_key):
data = {
"deployment": {
"revision": version,
"changelog": "",
"description": "",
"user": user
}
}
url = 'https://api.newrelic.com/v2/applications/%s/deployments.json' % app_id
headers = {
'Content-Type': 'application/json',
'X-Api-Key': api_key
}
req = urllib.request.Request(
url, data=json.dumps(data).encode('utf-8'), headers=headers, method='POST')
resp = urllib.request.urlopen(req)
if resp.status != 201:
print("Could not post deployment info to New Relic")
else:
print("Successfully marked deployment in New Relic")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=DESCRIPTION,
formatter_class=RawTextHelpFormatter
)
parser.add_argument(
'--user', dest='user', type=str, default='jenkins',
help='Identifies the user marking the deployment in New Relic'
)
parser.add_argument(
'--app_id', dest='app_id', type=str,
help='The New Relic application ID'
)
parser.add_argument(
'--version', dest='version', type=str,
help='The version or release number of the deployment'
)
parser.add_argument(
'--api_key', dest='api_key', type=str,
help='The New Relic API Key used to authenticate'
)
args = parser.parse_args()
if not args.api_key or not args.app_id or not args.version:
print("Missing required arguments.\n")
parser.print_help()
sys.exit(1)
main(args.user, args.app_id, args.version, args.api_key)
| gpl-2.0 | 9,018,319,330,410,440,000 | 24.818182 | 83 | 0.607646 | false |
wking/pygrader | pygrader/model/assignment.py | 1 | 1233 | # Copyright (C) 2012 W. Trevor King <[email protected]>
#
# This file is part of pygrader.
#
# pygrader is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# pygrader is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# pygrader. If not, see <http://www.gnu.org/licenses/>.
class Assignment (object):
def __init__(self, name, points=1, weight=0, due=0, submittable=True):
self.name = name
self.points = points
self.weight = weight
self.due = due
self.submittable = submittable
def __str__(self):
return '<{} {}>'.format(type(self).__name__, self.name)
def __lt__(self, other):
if self.due < other.due:
return True
elif other.due < self.due:
return False
return self.name < other.name
| gpl-3.0 | -1,820,910,119,033,053,400 | 36.363636 | 79 | 0.6691 | false |
avastjohn/maventy_new | growthcalc/growthcalc.py | 1 | 22700 | '''
Created on Sep 18, 2011
@author: mike
'''
import logging
import healthdb.util
import math
from xml.dom import minidom
import time
from datetime import datetime
from google.appengine.ext import db
from google.appengine.api import datastore_errors
import urllib2
import urllib
from xml.parsers.expat import ExpatError
import csv
# Boundaries for input values from WHO's AnthroComputation.cs.
# The min weight for a child, in kg.
input_minweight = 0.9
# The max weight for a child, in kg.
input_maxweight = 58
# The min length/height for a child, in cm.
input_minlengthorheight = 38
# The max length/height for a child, in cm.
input_maxlengthorheight = 150
# The min HC for a child, in cm.
input_minhc = 25
# The max HC for a child, in cm.
input_maxhc = 64
# Correction used for converting from recumbent to standing
heightcorrection = 0.7
# cutoff number of days for converting from recumbent to standing
height_mindays = 731
# The min age for a child.
mindays = 0
# The max age for a child to be considered in calculations.
maxdays = 1856
# The min length, in cm (WHO standard).
minlength = 45
# The max length, in cm (WHO standard).
maxlength = 110
# The min height, in cm (WHO standard).
minheight = 65
# The max height, in cm (WHO standard).
maxheight = 120
class Sex:
MALE="MALE"
FEMALE="FEMALE"
map = {}
map[MALE] = 1
map[FEMALE] = 2
class Measured:
STANDING="STANDING"
RECUMBENT="RECUMBENT"
def calculate_scores(pmap, visit=None):
""" This function calculates the anthropometric values based on the input
provided by the user at the command prompt. The z-scores and
percentiles are calculated for
Weight-for-age
Length/height-for-age
Weight-for-length
Weight-for-height
BMI-for-age
Head circumference-for-age
We do not at present plan to do: Arm circumference-for-age,
Triceps skinfold-for-age and Subscapular skinfold-for-age.
This program requires access to the WHO datasets. The nine datasets
corresponding to the nine measurements should be made available somewhere
in the classpath. These files should be of
.csv extension, with comma-separated values. The following are the
file names corresponding to its measurement.
Weight-for-age : weianthro.csv
Length/height-for-age : lenanthro.csv
Weight-for-length : wflanthro.csv
Weight-for-height : wfhanthro.csv
BMI-for-age : bmianthro.csv
Head circumference-for-age : hcanthro.csv
Not currently used:
Arm circumference-for-age : acanthro.csv
Triceps skinfold-for-age : tsanthro.csv
Subscapular skinfold-for-age: ssanthro.csv """
attrmap = {}
attrmap['generated_date'] = datetime.now()
if pmap['date_of_visit'] != None and pmap['date_of_birth'] != None:
attrmap['age_in_days'] = (pmap['date_of_visit'] - pmap['date_of_birth']).days
else:
attrmap['age_in_days'] = -1
loh = NormalizedLengthOrHeight(attrmap['age_in_days'],
pmap['length'], pmap['measured'])
attrmap['weight'] = pmap['weight']
attrmap['height'] = loh.lengthOrHeight
if u'head_circumference' in pmap:
attrmap['head_circumference'] = pmap['head_circumference']
anthro = Anthro()
anthroConfigMap = {}
if not pmap['hasOedema']:
attrmap['body_mass_index'] = heightAndWeightToBmi(loh.lengthOrHeight, pmap['weight'])
anthroConfigMap['body_mass_index_for_age'] = anthro.getBodyMassIndexZscoreConfigForAge(Sex.map[pmap['sex']], attrmap['age_in_days'], attrmap['body_mass_index'], attrmap['weight'], attrmap['height'])
anthroConfigMap['weight_for_length_or_height'] = anthro.getWeightZscoreConfigForLengthOrHeight(Sex.map[pmap['sex']], loh, attrmap['weight'], attrmap['age_in_days'])
anthroConfigMap['weight_for_age'] = anthro.getWeightZscoreConfigForAge(Sex.map[pmap['sex']], attrmap['age_in_days'], attrmap['weight'])
else:
attrmap['body_mass_index'] = healthdb.util.NaN
anthroConfigMap['body_mass_index_for_age'] = healthdb.util.NaN
anthroConfigMap['weight_for_length_or_height'] = healthdb.util.NaN
anthroConfigMap['weight_for_age'] = healthdb.util.NaN
if 'head_circumference' in attrmap:
anthroConfigMap['head_circumference_for_age'] = healthdb.util.NaN
anthroConfigMap['length_or_height_for_age'] = anthro.getLengthOrHeightZscoreConfigForAge(Sex.map[pmap['sex']], attrmap['age_in_days'], attrmap['height'], pmap['measured'])
if 'head_circumference' in attrmap:
anthroConfigMap['head_circumference_for_age'] = anthro.getHeadCircumferenceZscoreConfigForAge(Sex.map[pmap['sex']], attrmap['age_in_days'], attrmap['head_circumference'])
for att in VisitStatistics.INDICATORS:
# map key is str(att) because **attrmap requires string keys
if att in anthroConfigMap:
zscore = anthroConfigMap[att]
percentile = zscoreToPercentile(zscore)
attrmap[str(att)] = ZscoreAndPercentile(zscore, percentile)
return healthdb.models.VisitStatistics(parent=visit, **attrmap)
class Anthro():
"""Anthro contains all the parameters for the Box-Cox score computations. """
def getBodyMassIndexZscoreConfigForAge(self, sex, ageInDays, bodyMassIndex, weight, height):
ret = healthdb.util.NaN
hasOedema = False
if hasOedema or ageInDays < mindays or ageInDays > maxdays or not (weight > 0 and height > 0):
ret = healthdb.util.NaN
else:
config = AnthroConfig('growthcalc/bmianthro.csv', bodyMassIndex, sex, ageInDays)
ret = zscoreFromAttribute(config)
return ret
def getWeightZscoreConfigForLengthOrHeight(self, sex, loh, weight, ageInDays):
ret = healthdb.util.NaN
hasOedema = False
if hasOedema or not(input_minweight <= weight and weight <= input_maxweight):
ret = healthdb.util.NaN
else:
if loh.measured == Measured.STANDING:
config = AnthroConfig('growthcalc/wfhanthro.csv', weight, sex, loh.lengthOrHeight)
elif loh.measured == Measured.RECUMBENT:
config = AnthroConfig('growthcalc/wflanthro.csv', weight, sex, loh.lengthOrHeight)
ret = zscoreFromAttribute(config)
return ret
def getWeightZscoreConfigForAge(self, sex, ageInDays, weight):
ret = healthdb.util.NaN
hasOedema = False
if hasOedema or ageInDays < 0 or ageInDays > maxdays or not (input_minweight <= weight and weight <= input_maxweight):
ret = healthdb.util.NaN
else:
config = AnthroConfig('growthcalc/weianthro.csv', weight, sex, ageInDays)
ret = zscoreFromAttribute(config)
return ret
def getLengthOrHeightZscoreConfigForAge(self, sex, ageInDays, height, measured):
ret = healthdb.util.NaN
if ageInDays < 0 or ageInDays > maxdays or not (height >= 1):
ret = healthdb.util.NaN
else:
config = AnthroConfig('growthcalc/lenanthro.csv', height, sex, ageInDays)
ret = zscoreFromAttribute(config)
return ret
def getHeadCircumferenceZscoreConfigForAge(self, sex, ageInDays, headCircumference):
ret = healthdb.util.NaN
if ageInDays < 0 or ageInDays > maxdays or not (input_minhc <= headCircumference and headCircumference <= input_maxhc):
ret = healthdb.util.NaN
else:
config = AnthroConfig('growthcalc/hcanthro.csv', headCircumference, sex, ageInDays)
ret = zscoreFromAttribute(config)
return ret
def zscoreFromAttribute(anthroConfig):
""" Return a restrictred zscore from a map of data filename and physical
attributes.
The filename must fit the design of a WHO data file defined as:
sex,[age|height|length],l,m,s,[loh]
sex: 1 indicating MALE, 2 indicating FEMALE
age: age in days since birth
height: height in cm
length: length in cm
l: power,
m: median, and
s: variation coefficient as used in calculating zscore
loh: 'L' for length, 'H' for height """
for row in csv.DictReader(open(anthroConfig.fileName)):
if 'age' in row:
dataAgeHeightOrLength = row['age']
elif 'length' in row:
dataAgeHeightOrLength = row['length']
elif 'height' in row:
dataAgeHeightOrLength = row['height']
if int(row['sex']) == anthroConfig.sex and float(dataAgeHeightOrLength) == anthroConfig.ageHeightOrLength:
return zscoreOtherRestricted(anthroConfig.measureKey, float(row['l']), float(row['m']), float(row['s']), True)
return healthdb.util.NaN
def zscoreOtherRestricted(measure, power, median, variationCoefficient, computeFinalZScore):
"""Return a restricted zscore.
Modified as follows:
- If within -3 .. 3 inclusive, zscore
- If outside, NaN if computeFinalZScore is false, otherwise
extrapolated in a particular way given by the WHO standard """
zscoreNorm = zscore(measure, power, median, variationCoefficient)
if math.fabs(zscoreNorm) > 3 and computeFinalZScore:
if zscoreNorm > 3:
std3Pos = cutoff(3, power, median, variationCoefficient)
std23Pos = std3Pos - cutoff(2, power, median, variationCoefficient)
zscoreNorm = 3 + ((measure - std3Pos) / std23Pos)
elif zscoreNorm < 3:
std3Neg = cutoff(-3, power, median, variationCoefficient)
std23Neg = cutoff(-2, power, median, variationCoefficient) - std3Neg
zscoreNorm = -3 + ((measure - std3Neg) / std23Neg)
return zscoreNorm
def zscore(measure, power, median, variationCoefficient):
return (math.pow((measure / median), power) - 1) / (power * variationCoefficient)
def cutoff(desiredZscore, power, median, variationCoefficient):
return median * (math.pow((1 + (power * variationCoefficient * desiredZscore)), (1 / power)))
def heightAndWeightToBmi(height, weight):
"""standard metric conversion from weight and height to BMI, height in cm, weight in kg"""
if weight < input_minweight or weight > input_maxweight or height < input_minlengthorheight or height > input_maxlengthorheight:
output = healthdb.util.NaN
else:
output = weight / ((height / 100.0) ** 2.0)
return output
def zscoreToPercentile(zscore):
"""Produce a number between 0 and 100 inclusive that is the percentile for
the given zscore, or Double.NaN if the zscore is outside of -3 to 3."""
retVal = healthdb.util.NaN
# WHO technical specs chapter 7: "However, a restriction was imposed on
# all indicators to enable the derivation of percentiles only within
# the interval corresponding to z-scores between -3 and 3. The
# underlying reasoning is that percentiles beyond +-3 SD are invariant
# to changes in equivalent z-scores. The loss accruing to this
# restriction is small since the inclusion range corresponds to the
# 0.135th to 99.865th percentiles."
if math.fabs(zscore) <= 3:
absVal = math.fabs(zscore)
P1 = (1 - 1 / math.sqrt(2 * math.pi) * math.exp(-math.pow(absVal, 2) / 2)
* (
0.31938 * (1 / (1 + 0.2316419 * absVal))
- 0.356563782 * math.pow((1 / (1 + 0.2316419 * absVal)), 2)
+ 1.781477937 * math.pow((1 / (1 + 0.2316419 * absVal)), 3)
- 1.82125 * math.pow((1 / (1 + 0.2316419 * absVal)), 4)
+ 1.330274429 * math.pow((1 / (1 + 0.2316419 * absVal)), 5)
))
if zscore > 0:
P1 = P1 * 100
else:
P1 = 100 - P1 * 100
if 0 <= P1 and P1 <= 100:
retVal = P1
return retVal
class NormalizedLengthOrHeight():
"""Adjust length-or-height by whether person is standing or recumbent (lying
down)."""
def __init__(self, ageInDays, lengthOrHeight, measured):
self.lengthOrHeight = lengthOrHeight
self.measured = measured
if lengthOrHeight < input_minlengthorheight or lengthOrHeight > input_maxlengthorheight:
self.lengthOrHeight = healthdb.util.NaN
if ageInDays >= height_mindays and measured == Measured.RECUMBENT:
self.lengthOrHeight -= heightcorrection
self.measured = Measured.STANDING
elif 0 <= ageInDays and ageInDays < height_mindays and measured == Measured.STANDING:
self.lengthOrHeight += heightcorrection
self.measured = Measured.RECUMBENT
class ZscoreAndPercentile():
"""A class to contain zscore and percentile, each a float or NaN."""
def __init__(self, zscore, percentile):
self.zscore = zscore
self.percentile = percentile
def __str__(self):
"""String for debugging"""
return "zscore %s percentile %s" % (self.zscore, self.percentile)
def is_alertworthy(self):
"""Alertable if zscore not NaN and <0, and percentile < 25 or NaN.
This means that this statistic shows the patient is in bad shape.
"""
return (not healthdb.util.isNaN(self.zscore)) and self.zscore < 0 and (
healthdb.util.isNaN(self.percentile) or (self.percentile < 25))
def zscore_is_nan(self):
"""Return True if self.zscore is Nan, otherwise false.
Convenience method for Django templates, which have no good logic.
"""
return healthdb.util.isNaN(self.zscore)
def percentile_is_nan(self):
"""Return True if self.percentile is Nan, otherwise false.
Convenience method for Django templates, which have no good logic.
"""
return healthdb.util.isNaN(self.percentile)
class AnthroConfig:
def __init__(self, fileName, measureKey, sex, ageHeightOrLength):
self.fileName = fileName
self.measureKey = measureKey
self.sex = sex
self.ageHeightOrLength = ageHeightOrLength
class ZscoreAndPercentileProperty(db.Property):
"""A ZscoreAndPercentile property class."""
data_type = ZscoreAndPercentile
def get_value_for_datastore(self, model_instance):
zandp = super(ZscoreAndPercentileProperty, self
).get_value_for_datastore(model_instance)
if zandp:
zandp = str(zandp.zscore) + ':' + str(zandp.percentile)
return zandp
def make_value_from_datastore(self, value):
ret = None
if value:
zscore, percentile = value.split(':')
try:
zscore = float(zscore)
except ValueError, dummy:
assert healthdb.util.isNaNString(zscore), 'value is %s, zscore is ' % (
value, zscore)
zscore = healthdb.util.NaN
try:
percentile = float(percentile)
except ValueError, dummy:
#logging.warning('percentile was invalid: %s' % percentile)
# On some platforms, float('NaN') doesn't work
assert healthdb.util.isNaNString(percentile), 'value is %s, percentile is ' % (
value, percentile)
percentile = healthdb.util.NaN
ret = ZscoreAndPercentile(zscore, percentile)
return ret
def validate(self, value):
value = super(ZscoreAndPercentileProperty, self).validate(value)
if value is None or isinstance(value, ZscoreAndPercentile):
return value
elif isinstance(value, basestring):
return self.make_value_from_datastore(value)
raise db.BadValueError(
"Property %s must be a ZscoreAndPercentile or string." % self.name)
class VisitStatistics(db.Model):
# Constants for datastore
GROWTHSERVER_MALE = Sex.MALE
GROWTHSERVER_FEMALE = Sex.FEMALE
GROWTHSERVER_STANDING = Measured.STANDING
GROWTHSERVER_RECUMBENT = Measured.RECUMBENT
# Different models computed from WHO model
INDICATORS = [u'weight_for_length_or_height', u'weight_for_age',
u'length_or_height_for_age', u'body_mass_index_for_age',
u'head_circumference_for_age']
# parent is Visit
generated_date = db.DateTimeProperty(required=True)
weight_for_length_or_height = ZscoreAndPercentileProperty(required=True)
weight_for_age = ZscoreAndPercentileProperty(required=True)
length_or_height_for_age = ZscoreAndPercentileProperty(required=True)
body_mass_index_for_age = ZscoreAndPercentileProperty(required=True)
head_circumference_for_age = ZscoreAndPercentileProperty(required=False)
age_in_days = db.IntegerProperty(required=True)
body_mass_index = db.FloatProperty(required=True)
@property
def id(self):
return self.key().id()
def __str__(self):
return ("<VisitStatistics id=%s, generated_date=%s, "
"age_in_days=%s, body_mass_index=%s, "
"weight_for_length_or_height=%s, "
"weight_for_age=%s, "
"length_or_height_for_age=%s, "
"body_mass_index_for_age=%s, "
"head_circumference_for_age=%s" % (
self.id,
self.generated_date,
self.age_in_days,
self.body_mass_index,
self.weight_for_length_or_height,
self.weight_for_age,
self.length_or_height_for_age,
self.body_mass_index_for_age,
self.head_circumference_for_age))
@staticmethod
def _parse_zscore_and_percentile(att, doc):
zandp = None
results_elem = doc.getElementsByTagName('results')
if results_elem:
attp = att + u'_percentile'
attz = att + u'_zscore'
zscore = results_elem[0].getAttribute(attz)
# Note: float('NaN') only works sometimes, so go by the string instead
if zscore and not healthdb.util.isNaNString(zscore):
zscore = float(zscore)
else:
zscore = healthdb.util.NaN
percentile = results_elem[0].getAttribute(attp)
if percentile and not healthdb.util.isNaNString(percentile):
percentile = float(percentile)
else:
percentile = healthdb.util.NaN
if zscore and percentile:
zandp = ZscoreAndPercentile(zscore, percentile)
return zandp
@staticmethod
def _parse_visit_statistics(result, visit=None):
'''Parse an XML string from growthserver, return a VisitStatistics object with visit as its parent
'''
#logging.info("start parse visit obj %s" % visit)
visit_stats = None
try:
doc = minidom.parseString(result)
assert doc.documentElement.tagName == 'growthserver_response'
attrmap = {}
response_elem = doc.getElementsByTagName('growthserver_response')
stime = time.strptime(response_elem[0].getAttribute('date_generated'),
"%Y-%m-%d %H:%M:%S +0000")
attrmap['generated_date'] = datetime(*stime[:6])
results_elem = doc.getElementsByTagName('results')
att = u'age_in_days'
attrmap[str(att)] = int(results_elem[0].getAttribute(att))
att = u'body_mass_index'
bmi = results_elem[0].getAttribute(att)
try:
bmi = float(bmi)
except ValueError, err:
assert healthdb.util.isNaNString(bmi), 'bmi is ' % bmi
# TODO(dan): Unit test that NaN bmi is okay
bmi = NaN
attrmap[str(att)] = bmi
if not healthdb.util.isNaN(bmi):
try:
for att in VisitStatistics.INDICATORS:
# map key is str(att) because **attrmap requires string keys
attrmap[str(att)] = VisitStatistics._parse_zscore_and_percentile(
att, doc)
#print "attrmap: %s" % attrmap
visit_stats = VisitStatistics(parent=visit, **attrmap)
except ValueError, err:
logging.error("Couldn't parse visit statistics xml: %s from '%s'"
% (err, result))
except datastore_errors.BadValueError, err:
logging.error("Visit statistics missing values: %s: from '%s'"
% (err, result))
except ExpatError, err:
logging.error("error '%s' parsing '%s'" % (err, result))
return visit_stats
def is_alertworthy(self):
ret = False
for indicator in VisitStatistics.INDICATORS:
if hasattr(self, indicator):
zandp = getattr(self, indicator)
if zandp and zandp.is_alertworthy():
ret = True
return ret
def get_zandp(self, indicator):
return getattr(self, indicator)
def get_worst_zscore(self):
"""Get the worst zscore of any indicator, NOT INCLUDING NaNs!
We ignore NaNs because they are troublesome to sort or filter by.
"""
worst_zscore = None
for indicator in VisitStatistics.INDICATORS:
if hasattr(self, indicator):
zandp = getattr(self, indicator)
if zandp and not healthdb.util.isNaN(zandp.zscore):
if worst_zscore is None or worst_zscore > zandp.zscore:
# logging.info("new worst_zscore = %s" % zandp.zscore)
worst_zscore = zandp.zscore
# logging.info("worst_zscore = %s" % worst_zscore)
return worst_zscore
# TODO(dan): Unit test this method
@staticmethod
def get_stats_for_visit(visit):
patient = visit.get_patient()
# TODO(dan): Add oedema as an attribute in future
hasOedema = False
return VisitStatistics.get_stats(patient.birth_date,
visit.visit_date,
patient.sex,
visit.weight,
visit.head_circumference,
visit.height,
visit.height_position,
hasOedema,
visit)
@staticmethod
def get_stats(birth_date, visit_date, sex, weight, head_circumference, height,
height_position, hasOedema, visit = None):
'''Get growth statistics from growthserver.
head_circumference is optional
sex is Patient.MALE or Patient.FEMALE
height_position is Visit.STANDING or Visit.RECUMBENT
'''
# For debugging a future version:
#rooturl = 'http://4.latest.growthserver.appspot.com/growthserver'
rooturl = 'http://growthserver.appspot.com/growthserver'
# TODO(dan): Return none if visit date is too far, or rather push that
# logic to the growthserver
pmap = {'date_of_birth': birth_date,
'date_of_visit': visit_date,
'sex' : VisitStatistics.SEX_MAP[sex],
'weight' : weight,
'length' : height,
'measured' : VisitStatistics.HEIGHT_POSITION_MAP[height_position],
'hasOedema' : hasOedema,
'format' : 'xml'}
# head_circumference is optional
if head_circumference: pmap['head_circumference'] = head_circumference
remote_growthcalc = False
if remote_growthcalc:
try:
data = urllib.urlencode(pmap)
result = urllib2.urlopen(rooturl, data)
result_string = result.read()
visit_stats = VisitStatistics._parse_visit_statistics(result_string, visit)
logging.debug("result %s" % result_string)
except urllib2.URLError, e:
logging.error("get_stats_for_visit: %s" % e)
visit_stats = None
else:
visit_stats = calculate_scores(pmap, visit)
return visit_stats
| bsd-3-clause | 5,793,959,453,705,045,000 | 36.959866 | 202 | 0.662643 | false |
dladd/pyFormex | pyformex/compat_2k.py | 1 | 1618 | # $Id$
##
## This file is part of pyFormex 0.8.9 (Fri Nov 9 10:49:51 CET 2012)
## pyFormex is a tool for generating, manipulating and transforming 3D
## geometrical models by sequences of mathematical operations.
## Home page: http://pyformex.org
## Project page: http://savannah.nongnu.org/projects/pyformex/
## Copyright 2004-2012 (C) Benedict Verhegghe ([email protected])
## Distributed under the GNU General Public License version 3 or later.
##
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see http://www.gnu.org/licenses/.
##
"""pyFormex compatibility module for Python2.x
The compatibility modules for different Python versions are intended
to wrap code changes between the versions into functions located at
a single place.
Note that we can not implement this as a single module and test for
the Python version inside that module. The differences between the
versions might cause compilation to fail.
"""
from __future__ import print_function
def execFile(f,*args):
return execfile(f,*args)
# End
| gpl-3.0 | -7,526,737,087,426,599,000 | 38.463415 | 77 | 0.747837 | false |
Subsets and Splits