repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
mlperf/training_results_v0.7 | Google/benchmarks/resnet/implementations/resnet-research-TF-tpu-v4-512/resnet_main.py | 1 | 14273 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Train a ResNet-50 model on ImageNet on TPU."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from absl import app
from absl import flags
import numpy as np
import tensorflow.compat.v1 as tf
from REDACTED.mlp_log import mlp_log
from REDACTED.resnet import imagenet_input
from REDACTED.resnet import lars_util
from REDACTED.resnet import resnet_model
from REDACTED.util import train_and_eval_runner
FLAGS = flags.FLAGS
# Model specific flags
flags.DEFINE_string(
'data_dir', default=None,
help=('The directory where the ImageNet input data is stored. Please see'
' the README.md for the expected data format.'))
flags.DEFINE_integer(
'resnet_depth', default=50,
help=('Depth of ResNet model to use. Must be one of {18, 34, 50, 101, 152,'
' 200}. ResNet-18 and 34 use the pre-activation residual blocks'
' without bottleneck layers. The other models use pre-activation'
' bottleneck layers. Deeper models require more training time and'
' more memory and may require reducing --train_batch_size to prevent'
' running out of memory.'))
flags.DEFINE_integer(
'train_steps', default=112590,
help=('The number of steps to use for training. Default is 112590 steps'
' which is approximately 90 epochs at batch size 1024. This flag'
' should be adjusted according to the --train_batch_size flag.'))
flags.DEFINE_integer(
'train_batch_size', default=1024, help='Batch size for training.')
flags.DEFINE_integer(
'eval_batch_size', default=1024, help='Batch size for evaluation.')
flags.DEFINE_integer(
'num_train_images', default=1281167, help='Size of training data set.')
flags.DEFINE_integer(
'num_eval_images', default=50000, help='Size of evaluation data set.')
flags.DEFINE_integer(
'num_label_classes', default=1000, help='Number of classes, at least 2')
flags.DEFINE_integer(
'steps_per_eval', default=1251,
help=('Controls how often evaluation is performed. Since evaluation is'
' fairly expensive, it is advised to evaluate as infrequently as'
' possible (i.e. up to --train_steps, which evaluates the model only'
' after finishing the entire training regime).'))
flags.DEFINE_integer(
'iterations_per_loop', default=1251,
help=('Number of steps to run on TPU before outfeeding metrics to the CPU.'
' If the number of iterations in the loop would exceed the number of'
' train steps, the loop will exit before reaching'
' --iterations_per_loop. The larger this value is, the higher the'
' utilization on the TPU.'))
flags.DEFINE_integer('num_replicas', default=8, help=('Number of replicas.'))
flags.DEFINE_string(
'precision', default='bfloat16',
help=('Precision to use; one of: {bfloat16, float32}'))
flags.DEFINE_float(
'base_learning_rate', default=0.1,
help=('Base learning rate when train batch size is 256.'))
flags.DEFINE_float(
'momentum', default=0.9,
help=('Momentum parameter used in the MomentumOptimizer.'))
flags.DEFINE_float(
'weight_decay', default=1e-4,
help=('Weight decay coefficiant for l2 regularization.'))
flags.DEFINE_float(
'label_smoothing', default=0.0,
help=('Label smoothing parameter used in the softmax_cross_entropy'))
flags.DEFINE_bool('enable_lars',
default=False,
help=('Enable LARS optimizer for large batch training.'))
flags.DEFINE_float('poly_rate', default=0.0,
help=('Set LARS/Poly learning rate.'))
flags.DEFINE_float(
'stop_threshold', default=0.759, help=('Stop threshold for MLPerf.'))
flags.DEFINE_integer('image_size', 224, 'The input image size.')
flags.DEFINE_integer(
'distributed_group_size',
default=1,
help=('When set to > 1, it will enable distributed batch normalization'))
tf.flags.DEFINE_multi_integer(
'input_partition_dims',
default=None,
help=('Number of partitions on each dimension of the input. Each TPU core'
' processes a partition of the input image in parallel using spatial'
' partitioning.'))
flags.DEFINE_bool(
'use_space_to_depth',
default=False,
help=('Enable space-to-depth optimization for conv-0.'))
# Learning rate schedule
LR_SCHEDULE = [ # (multiplier, epoch to start) tuples
(1.0, 5), (0.1, 30), (0.01, 60), (0.001, 80)
]
# The input tensor is in the range of [0, 255], we need to scale them to the
# range of [0, 1]
MEAN_RGB = [0.485 * 255, 0.456 * 255, 0.406 * 255]
STDDEV_RGB = [0.229 * 255, 0.224 * 255, 0.225 * 255]
def learning_rate_schedule(current_epoch):
"""Handles linear scaling rule, gradual warmup, and LR decay.
The learning rate starts at 0, then it increases linearly per step.
After 5 epochs we reach the base learning rate (scaled to account
for batch size).
After 30, 60 and 80 epochs the learning rate is divided by 10.
After 90 epochs training stops and the LR is set to 0. This ensures
that we train for exactly 90 epochs for reproducibility.
Args:
current_epoch: `Tensor` for current epoch.
Returns:
A scaled `Tensor` for current learning rate.
"""
mlp_log.mlperf_print('lars_opt_base_learning_rate', FLAGS.base_learning_rate)
scaled_lr = FLAGS.base_learning_rate * (FLAGS.train_batch_size / 256.0)
decay_rate = (scaled_lr * LR_SCHEDULE[0][0] *
current_epoch / LR_SCHEDULE[0][1])
for mult, start_epoch in LR_SCHEDULE:
decay_rate = tf.where(current_epoch < start_epoch,
decay_rate, scaled_lr * mult)
return decay_rate
def resnet_model_fn(features, labels, is_training):
"""The model_fn for ResNet to be used with TPU.
Args:
features: `Tensor` of batched images.
labels: `Tensor` of labels for the data samples
is_training: whether this is training
Returns:
train_op, logits
"""
if isinstance(features, dict):
features = features['feature']
if FLAGS.use_space_to_depth:
if FLAGS.train_batch_size // FLAGS.num_replicas > 8:
features = tf.reshape(
features, [FLAGS.image_size // 2, FLAGS.image_size // 2, 12, -1])
features = tf.transpose(features, [3, 0, 1, 2]) # HWCN to NHWC
else:
features = tf.reshape(
features, [FLAGS.image_size // 2, FLAGS.image_size // 2, -1, 12])
features = tf.transpose(features, [2, 0, 1, 3]) # HWNC to NHWC
else:
if FLAGS.train_batch_size // FLAGS.num_replicas > 8:
features = tf.reshape(features,
[FLAGS.image_size, FLAGS.image_size, 3, -1])
features = tf.transpose(features, [3, 0, 1, 2]) # HWCN to NHWC
else:
features = tf.reshape(features,
[FLAGS.image_size, FLAGS.image_size, -1, 3])
features = tf.transpose(features, [2, 0, 1, 3]) # HWCN to NHWC
# Normalize the image to zero mean and unit variance.
if FLAGS.use_space_to_depth:
features -= tf.constant(MEAN_RGB, shape=[1, 1, 12], dtype=features.dtype)
features /= tf.constant(STDDEV_RGB, shape=[1, 1, 12], dtype=features.dtype)
else:
features -= tf.constant(MEAN_RGB, shape=[1, 1, 3], dtype=features.dtype)
features /= tf.constant(STDDEV_RGB, shape=[1, 1, 3], dtype=features.dtype)
# This nested function allows us to avoid duplicating the logic which
# builds the network, for different values of --precision.
def build_network():
with tf.variable_scope('resnet', reuse=tf.AUTO_REUSE):
network = resnet_model.resnet_v1(
resnet_depth=FLAGS.resnet_depth,
num_classes=FLAGS.num_label_classes,
use_space_to_depth=FLAGS.use_space_to_depth,
num_replicas=FLAGS.num_replicas,
distributed_group_size=FLAGS.distributed_group_size)
return network(inputs=features, is_training=is_training)
if FLAGS.precision == 'bfloat16':
with tf.tpu.bfloat16_scope():
logits = build_network()
logits = tf.cast(logits, tf.float32)
elif FLAGS.precision == 'float32':
logits = build_network()
if not is_training:
total_correct = tf.reduce_sum(
tf.cast(
tf.equal(tf.cast(tf.argmax(logits, axis=1), labels.dtype), labels),
tf.int32))
return None, {'total_correct': tf.reshape(total_correct, [-1])}
# Calculate loss, which includes softmax cross entropy and L2 regularization.
one_hot_labels = tf.one_hot(labels, FLAGS.num_label_classes)
cross_entropy = tf.losses.softmax_cross_entropy(
logits=logits,
onehot_labels=one_hot_labels,
label_smoothing=FLAGS.label_smoothing)
# Add weight decay to the loss for non-batch-normalization variables.
if FLAGS.enable_lars:
loss = cross_entropy
else:
loss = cross_entropy + FLAGS.weight_decay * tf.add_n([
tf.nn.l2_loss(v)
for v in tf.trainable_variables()
if 'batch_normalization' not in v.name
])
global_step = tf.train.get_or_create_global_step()
steps_per_epoch = FLAGS.num_train_images / FLAGS.train_batch_size
current_epoch = (tf.cast(global_step, tf.float32) / steps_per_epoch)
mlp_log.mlperf_print(
'model_bn_span',
FLAGS.distributed_group_size *
(FLAGS.train_batch_size // FLAGS.num_replicas))
if FLAGS.enable_lars:
learning_rate = 0.0
mlp_log.mlperf_print('opt_name', 'lars')
optimizer = lars_util.init_lars_optimizer(current_epoch)
else:
mlp_log.mlperf_print('opt_name', 'sgd')
learning_rate = learning_rate_schedule(current_epoch)
optimizer = tf.train.MomentumOptimizer(
learning_rate=learning_rate, momentum=FLAGS.momentum, use_nesterov=True)
optimizer = tf.tpu.CrossShardOptimizer(optimizer)
# Batch normalization requires UPDATE_OPS to be added as a dependency to
# the train operation.
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
train_op = optimizer.minimize(loss, global_step)
return train_op, None
def main(unused_argv):
def eval_init_fn(cur_step):
"""Executed before every eval."""
steps_per_epoch = FLAGS.num_train_images / FLAGS.train_batch_size
epoch = cur_step // steps_per_epoch
mlp_log.mlperf_print(
'block_start',
None,
metadata={
'first_epoch_num': epoch,
'epoch_count': 4
})
def eval_finish_fn(cur_step, eval_output, summary_writer):
"""Executed after every eval."""
steps_per_epoch = FLAGS.num_train_images / FLAGS.train_batch_size
epoch = cur_step // steps_per_epoch
eval_accuracy = float(np.sum(
eval_output['total_correct'])) / FLAGS.num_eval_images
if summary_writer:
with tf.Graph().as_default():
summary_writer.add_summary(
tf.Summary(value=[
tf.Summary.Value(tag='accuracy', simple_value=eval_accuracy)
]), cur_step)
mlp_log.mlperf_print(
'eval_accuracy',
eval_accuracy,
metadata={
'epoch_num': epoch + FLAGS.iterations_per_loop // steps_per_epoch
})
mlp_log.mlperf_print(
'block_stop',
None,
metadata={
'first_epoch_num': epoch,
'epoch_count': 4
})
if eval_accuracy >= FLAGS.stop_threshold:
mlp_log.mlperf_print('run_stop', None, metadata={'status': 'success'})
return True
else:
return False
def run_finish_fn(success):
if not success:
mlp_log.mlperf_print('run_stop', None, metadata={'status': 'abort'})
mlp_log.mlperf_print('run_final', None)
low_level_runner = train_and_eval_runner.TrainAndEvalRunner(
FLAGS.iterations_per_loop, FLAGS.train_steps,
int(math.ceil(FLAGS.num_eval_images / FLAGS.eval_batch_size)),
FLAGS.num_replicas)
mlp_log.mlperf_print('cache_clear', True)
mlp_log.mlperf_print('init_start', None)
mlp_log.mlperf_print('global_batch_size', FLAGS.train_batch_size)
mlp_log.mlperf_print('lars_opt_weight_decay', FLAGS.weight_decay)
mlp_log.mlperf_print('lars_opt_momentum', FLAGS.momentum)
mlp_log.mlperf_print('submission_benchmark', 'resnet')
mlp_log.mlperf_print('submission_division', 'closed')
mlp_log.mlperf_print('submission_org', 'google')
mlp_log.mlperf_print('submission_platform', 'tpu-v3-%d' % FLAGS.num_replicas)
mlp_log.mlperf_print('submission_status', 'research')
assert FLAGS.precision == 'bfloat16' or FLAGS.precision == 'float32', (
'Invalid value for --precision flag; must be bfloat16 or float32.')
input_dtype = tf.bfloat16 if FLAGS.precision == 'bfloat16' else tf.float32
cache_decoded_image = True if FLAGS.num_replicas > 2048 else False
imagenet_train, imagenet_eval = [
imagenet_input.get_input_fn( # pylint: disable=g-complex-comprehension
FLAGS.data_dir,
is_training,
input_dtype,
FLAGS.image_size,
FLAGS.input_partition_dims is None,
cache_decoded_image=cache_decoded_image)
for is_training in [True, False]
]
low_level_runner.initialize(imagenet_train, imagenet_eval, resnet_model_fn,
FLAGS.train_batch_size, FLAGS.eval_batch_size,
FLAGS.input_partition_dims)
mlp_log.mlperf_print('train_samples', FLAGS.num_train_images)
mlp_log.mlperf_print('eval_samples', FLAGS.num_eval_images)
mlp_log.mlperf_print('init_stop', None)
mlp_log.mlperf_print('run_start', None)
low_level_runner.train_and_eval(eval_init_fn, eval_finish_fn, run_finish_fn)
if __name__ == '__main__':
app.run(main)
| apache-2.0 | 8,271,973,849,442,803,000 | 36.072727 | 80 | 0.66342 | false |
laurenbarker/SHARE | project/urls.py | 1 | 2269 | """share URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.conf.urls import url, include
from django.conf import settings
from django.views.generic.base import RedirectView
from django.contrib.staticfiles.storage import staticfiles_storage
from revproxy.views import ProxyView
from osf_oauth2_adapter import views as osf_oauth2_adapter_views
from api.views import APIVersionRedirectView, source_icon_view
urlpatterns = [
url(r'^admin/', admin.site.urls),
# url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^api/v2/', include('api.urls', namespace='api')),
url(r'^api/(?P<path>(?!v\d+).*)', APIVersionRedirectView.as_view()),
url(r'^api/v1/', include('api.urls_v1', namespace='api_v1')),
url(r'^o/', include('oauth2_provider.urls', namespace='oauth2_provider')),
url(r'^accounts/social/login/cancelled/', osf_oauth2_adapter_views.login_errored_cancelled),
url(r'^accounts/social/login/error/', osf_oauth2_adapter_views.login_errored_cancelled),
url(r'^accounts/', include('allauth.urls')),
url(r'^$', RedirectView.as_view(url='{}/'.format(settings.EMBER_SHARE_PREFIX))),
url(r'^favicon.ico$', RedirectView.as_view(
url=staticfiles_storage.url('favicon.ico'),
permanent=False
), name='favicon'),
url(r'^icons/(?P<source_name>[^/]+).ico$', source_icon_view, name='source_icon'),
]
if settings.DEBUG:
# import debug_toolbar
urlpatterns += [
# url(r'^__debug__/', include(debug_toolbar.urls)),
url(r'^(?P<path>{}/.*)$'.format(settings.EMBER_SHARE_PREFIX), ProxyView.as_view(upstream=settings.EMBER_SHARE_URL)),
]
| apache-2.0 | -4,742,425,690,949,599,000 | 44.38 | 124 | 0.690172 | false |
xigt/lgid | lgid/util.py | 1 | 12489 |
"""
Utility functions for language idenfication tasks
"""
from collections import defaultdict, namedtuple
from freki.serialize import FrekiDoc
import unicodedata
import re
import os
import logging
import unidecode
def read_language_table(path):
"""
Read language table at *path* and return the {name:[code]} mapping
Language names are normalized to remove diacritics, parentheticals,
and excess spacing. The result is lowercased to avoid hash-misses
based on capitalization differences.
Args:
path: file path to a tab-separated language table where the
first column is the language name and following columns
contain language codes for that name
Returns:
dictionary mapping normalized language names to lists of codes
"""
logging.info('Reading language table: ' + path)
table = defaultdict(list)
for line in open(path, encoding='utf-8'):
if line.strip():
name, codes = line.rstrip().split('\t', 1)
codes = codes.split()
norm = unicode_normalize_characters(name) # remove diacritics
norm = re.sub(r' \([^)]*\)', '', norm) # remove parentheticals
norm = re.sub(r'\s+', ' ', norm).strip() # normalize spacing
norm = norm.lower() # lowercase
table[norm].extend(codes)
for norm in table:
table[norm] = sorted(set(table[norm])) # remove duplicates
logging.info(str(len(table)) + ' language names in table')
return table
def unicode_normalize_characters(s):
"""
Apply a unicode transformation to normalize accented characters to
their near-ASCII equivalent.
"""
return ''.join(c for c in unicodedata.normalize('NFKD', s)
if not unicodedata.combining(c))
def hard_normalize_characters(s):
"""
Apply a transformation to replace Unicode characters with
an ASCII representation.
"""
return unidecode.unidecode(s)
def read_odin_language_model(pairs, config, gram_type):
"""
Read an ODIN language model for a (language name, ISO code) pair.
Args:
pairs: a list of (name, code) pairs to construct models for
config: model parameters
gram_type: what type of gram to use - 'character', 'word', or 'morpheme'
Returns:
list of tuples of ngrams, or None if no language model exists for
the given name-ISO pairing
"""
if gram_type != 'character' and gram_type != 'word' and gram_type != 'morpheme':
raise ValueError("argument 'gram_type' not 'character', 'word', or 'morpheme'")
all_lms = {}
for lang_name, iso_code in pairs:
lang_name = lang_name.replace('/', '-')
norm_name = hard_normalize_characters(lang_name)
base_path = config['locations']['odin-language-model']
code_only = config['parameters']['code-only-odin-lms'] == 'yes'
if code_only:
file_basename = iso_code
else:
file_basename = '{}_{}'.format(iso_code, norm_name)
if gram_type == 'character':
file_name = '{}/{}.char'.format(base_path, file_basename)
n = int(config['parameters']['character-n-gram-size'])
elif gram_type == 'word':
file_name = '{}/{}.word'.format(base_path, file_basename)
n = int(config['parameters']['word-n-gram-size'])
else:
file_name = '{}/{}.morph'.format(base_path, file_basename)
n = int(config['parameters']['morpheme-n-gram-size'])
try:
with open(file_name, encoding='utf8') as f:
lines = f.readlines()
except FileNotFoundError:
continue
lm = set()
for line in lines:
if line.strip() == '':
continue
line = line.split()[0] if gram_type == 'characters' else line.split()[:-1]
if len(line) <= n:
feature = tuple(line)
lm.add(feature)
all_lms[(lang_name, iso_code)] = lm
return all_lms
def read_crubadan_language_model(pairs, config, gram_type):
"""
Read a Crubadan language model for a (language name, ISO code) pair.
Args:
pairs: a list of (name, code) pairs to construct models for
config: model parameters
gram_type: what type of gram to use - 'character' or 'word'
Returns:
list of tuples of ngrams, or None if no language model exists for
the given name-ISO pairing
"""
if gram_type != 'character' and gram_type != 'word':
raise ValueError("argument 'gram_type' not 'character' or 'word'")
import csv
base_path = config['locations']['crubadan-language-model']
table = open(config['locations']['crubadan-directory-index'], encoding='utf8')
reader = csv.reader(table)
header = next(reader) # discard header row
dir_map = {}
for row in reader:
name = row[0]
iso = row[1]
directory = row[2].strip()
dir_map[(name, iso)] = directory
table.close()
if gram_type == 'character':
file_basename = {
3: "-chartrigrams.txt"
}.get(int(config['parameters']['crubadan-char-size']))
else:
file_basename = {
1: "-words.txt",
2: "-wordbigrams.txt"
}.get(int(config['parameters']['crubadan-word-size']))
all_lms = {}
for lang_name, iso_code in pairs:
try:
this_dir = dir_map[(lang_name, iso_code)]
crubadan_code = this_dir.split("_")[1]
with open("{}/{}/{}{}".format(base_path, this_dir, crubadan_code, file_basename), encoding='utf8') as f:
lines = f.readlines()
except (FileNotFoundError, KeyError, IndexError):
continue
lm = set()
for line in lines:
if line.strip() == '':
continue
line = line.split()[:-1]
feature = tuple(line[0]) if gram_type == 'character' else tuple(line)
lm.add(feature)
all_lms[(lang_name, iso_code)] = lm
return all_lms
def encode_instance_id(doc_id, span_id, line_no, lang_name, lang_code):
return (doc_id, span_id, line_no, lang_name, lang_code)
def decode_instance_id(s):
doc_id, span_id, line_no, lang_name, lang_code = s.id
return doc_id, span_id, int(line_no), lang_name, lang_code
def spans(doc):
"""
Scan the FrekiDoc *doc* and yield the IGT spans found
This requires the documents to have the span_id attribute from
IGT detection.
"""
span = []
span_id = None
for line in doc.lines():
new_span_id = line.attrs.get('span_id')
if new_span_id != span_id and span:
yield span
span = []
span_id = new_span_id
if new_span_id is not None:
span.append(line)
span_id = new_span_id
if span:
yield span
def find_common_codes(infiles, config):
"""
Build the res file showing the most common code for each language
:param infiles: list of freki filepaths
:param config: config object
:return: None, writes to most-common-codes location
"""
dialect_count = defaultdict(lambda: defaultdict(int))
locs = config['locations']
lgtable = {}
if locs['language-table']:
lgtable = read_language_table(locs['language-table'])
for infile in infiles:
doc = FrekiDoc.read(infile)
for span in spans(doc):
if not span:
continue
for line in span:
if 'L' in line.tag:
lgname = line.attrs.get('lang_name', '???').lower()
lgcode = line.attrs.get('lang_code', 'und')
if len(lgcode.split(':')) > 1:
parts = lgcode.split(':')
for part in parts:
dialect_count[lgname][part] += 1
else:
dialect_count[lgname][lgcode] += 1
out = open(locs['most-common-codes'], 'w', encoding='utf8')
for key in lgtable:
a_line = ''
if len(lgtable[key]) == 1:
a_line = key + '\t'
a_line += lgtable[key][0]
a_line += '\n'
if dialect_count[key]:
a_line = key + '\t'
a_line += sorted(dialect_count[key], key=lambda x: dialect_count[key][x], reverse=True)[0]
a_line += '\n'
out.write(a_line)
def generate_language_name_mapping(config):
"""
Generates mappings from all words appearing in the langauge table
to unique ints. Writes the mappings to a file.
Also generates and writes to a file mappings from each language
name to the sequence of ints making up its name.
Also generates and writes to a file mappings from each word appearing
in the language table to a list of languages where that word appears
in the name.
Args:
config: parameters/settings
"""
normcaps = {
'upper': str.upper,
'lower': str.lower,
'title': str.title
}.get(config['parameters'].get('mention-capitalization', 'default'), str)
locs = config['locations']
lgtable = {}
if locs['language-table']:
lgtable = read_language_table(locs['language-table'])
words = set()
word_associations = {}
for lang in lgtable:
for word in lang.split():
words.add(normcaps(word.strip()))
word_mappings = {}
with open(locs['word-index'], 'w', encoding='utf8') as f:
i = 10000 # so that all words will have a mapping 5 digits long
for word in words:
f.write('{}\t{}\n'.format(word, i))
word_mappings[word] = i
i += 1
lang_mappings = {}
with open(locs['language-index'], 'w', encoding='utf8') as f:
for lang in lgtable:
index = ''
for word in lang.split():
index += str(word_mappings[normcaps(word.strip())])
f.write('{}\t{}\n'.format(normcaps(lang), index))
lang_mappings[lang] = index
word_associations = {}
with open(locs['word-language-mapping'], 'w', encoding='utf8') as f:
for lang in lgtable:
for word in lang.split():
word = normcaps(word.strip())
if word in word_associations:
word_associations[word].append(lang_mappings[lang])
else:
word_associations[word] = [lang_mappings[lang]]
for word in words:
f.write('{}\t{}\n'.format(word, ','.join(word_associations[word])))
LangTable = namedtuple(
'LangTable',
('int_to_lang',
'lang_to_int',
'int_to_word',
'word_to_int',
'word_to_lang')
)
def read_language_mapping_table(config):
"""
Reads from a file mappings from language names to int sequences
and builds a dictionary mapping from ints to language names and
one mapping from language names to ints.
Args:
config: parameters/settings
Returns:
a LangTable object containing all the mapping tables
"""
normcaps = {
'upper': str.upper,
'lower': str.lower,
'title': str.title
}.get(config['parameters'].get('mention-capitalization', 'default'), str)
locs = config['locations']
lang_to_int = {}
int_to_lang = {}
word_to_int = {}
int_to_word = {}
word_to_lang = {}
if not os.path.exists(locs['language-index']) or not os.path.exists(locs['word-index']) or not os.path.exists(locs['word-language-mapping']):
generate_language_name_mapping(config)
with open(locs['language-index'], encoding='utf8') as f:
for line in f.readlines():
line = line.split('\t')
lang_to_int[normcaps(line[0])] = line[1].strip()
int_to_lang[line[1].strip()] = normcaps(line[0])
with open(locs['word-index'], encoding='utf8') as f:
for line in f.readlines():
line = line.split('\t')
word_to_int[normcaps(line[0])] = line[1].strip()
int_to_word[line[1].strip()] = normcaps(line[0])
with open(locs['word-language-mapping'], encoding='utf8') as f:
for line in f.readlines():
line = line.split('\t')
word_to_lang[normcaps(line[0]).strip()] = line[1].strip().split(',')
return LangTable(int_to_lang, lang_to_int, int_to_word, word_to_int, word_to_lang)
| mit | 8,431,576,550,482,798,000 | 34.180282 | 145 | 0.575707 | false |
lhillber/qops | figure2.py | 1 | 5048 | import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import os
from figure3 import select, ket, exp
from matrix import ops
from measures import local_entropies_from_rhos, local_exp_vals_from_rhos
from mpl_toolkits.axes_grid1 import ImageGrid
from matplotlib import rc
rc("text", usetex=True)
font = {"size": 11, "weight": "normal"}
mpl.rc(*("font",), **font)
mpl.rcParams["pdf.fonttype"] = 42
mpl.rcParams["text.latex.preamble"] = [
r"\usepackage{amsmath}",
r"\usepackage{sansmath}", # sanserif math
r"\sansmath",
]
if __name__ == "__main__":
names = {
"c1_f0": {"name": ket("010"), "ls": "-", "c": "C5", "m": "v"},
"exp-z": {"name": exp("\hat{\sigma_j}^z"), "ls": "-", "c": "C5", "m": "v"},
"exp-x": {"name": exp("\hat{\sigma_j}^x"), "ls": "-", "c": "C5", "m": "v"},
"s-2": {"name": " $s^{(2)}_j$", "ls": "-", "c": "C5", "m": "v"},
}
cmaps = ["inferno_r", "inferno"]
plot_fname = "figures/figure2/figure2_V5.pdf"
fig = plt.figure(figsize=(4.75, 3.7))
Skey = ["3.6", "3.13", "3.14", "5.4", "5.2"]
measures = ["exp-z", "s-2"]
IC = "c1_f0"
L = 18
T = (L - 1) * 3 + 1 # plot ylim
letts1 = [
r"$\mathrm{A}$",
r"$\mathrm{C}$",
r"$\mathrm{E}$",
r"$\mathrm{G}$",
r"$\mathrm{I}$",
]
letts2 = [
r"$\mathrm{B}$",
r"$\mathrm{D}$",
r"$\mathrm{F}$",
r"$\mathrm{H}$",
r"$\mathrm{J}$",
]
clett1 = ["w", "w", "w", "w", "w"]
clett2 = ["k", "k", "k", "w", "k"]
letts = [letts1, letts2]
cletts = [clett1, clett2]
for row, (meas, letti, cli) in enumerate(zip(measures, letts, cletts)):
grid = ImageGrid(
fig,
int("21" + str(1 + row)),
nrows_ncols=(1, 5),
direction="row",
axes_pad=0.1,
add_all=True,
cbar_mode="single",
cbar_location="right",
cbar_size="20%",
cbar_pad=0.05,
)
for col, (S, lett, cl) in enumerate(zip(Skey, letti, cli)):
N, S = map(int, S.split("."))
ax = grid[col]
if N == 3:
sim = select(L=L, S=S, IC=IC, V="H", BC="0")
if sim is None:
print("No sim!")
continue
S = sim["S"]
L = sim["L"]
IC = sim["IC"]
h5file = sim["h5file"]
if meas[0] == "e":
ticks = [-1, 1]
ticklabels = ["↑", "↓"]
else:
ticks = [0, 1]
ticklabels = ["$0$","$1$"]
vmin, vmax = ticks
d = h5file[meas]
elif N == 5:
der = "/home/lhillber/documents/research/cellular_automata/qeca/qops"
der = os.path.join(der, f"qca_output/hamiltonian/rule{S}/rho_i.npy")
one_site = np.load(der)
one_site = one_site.reshape(2000, 22, 2, 2)
one_site = one_site[::, 2:-2, :, :]
T5, L5, *_ = one_site.shape
d = np.zeros((T5, L5))
ti = 0
for t, rhoi in enumerate(one_site):
if t % 10 == 0:
if meas == "exp-z":
d[ti, :] = local_exp_vals_from_rhos(rhoi, ops["Z"])
elif meas == "s-2":
d[ti, :] = local_entropies_from_rhos(rhoi, order=2)
ti += 1
I = ax.imshow(
d[0:T],
origin="lower",
interpolation=None,
cmap=cmaps[row],
vmin=vmin,
vmax=vmax,
)
ax.cax.colorbar(I)
ax.cax.set_yticks(ticks)
ax.cax.set_yticklabels(ticklabels)
ax.set_xticks([0, 8, 17])
ax.set_yticks([i * (L - 1) for i in range(4)])
ax.set_yticklabels([])
ax.set_xticklabels([])
ax.text(0.5, 46, lett, color=cl, family="sans-serif", weight="bold")
if col == len(Skey) - 1:
ax.cax.text(
1.6,
0.5,
names[meas]["name"],
rotation=0,
transform=ax.transAxes,
ha="left",
va="center",
)
if row == 0 and col < 3:
ax.set_title(r"$T_{%d}$" % S)
elif row == 0 and col > 2:
ax.set_title(r"${F_{%d}}$" % S)
ax.tick_params(direction="out")
grid[0].set_yticklabels(["$"+str(i * (L - 1))+"$" for i in range(4)])
grid[0].set_xticklabels(["$0$", "$8$", "$17$"])
grid[0].set_xlabel("$j$", labelpad=0)
grid[0].set_ylabel("$t$", labelpad=0)
fig.subplots_adjust(hspace=0.1, left=0.05, top=0.93)
plt.savefig(plot_fname, dpi=300)
print("plot saved to ", plot_fname)
| mit | -4,930,191,000,067,315,000 | 32.184211 | 85 | 0.419112 | false |
MoisesTedeschi/python | Scripts-Python/Modulos-Diversos/python-com-scrapy/Lib/site-packages/w3lib/encoding.py | 1 | 10288 | # -*- coding: utf-8 -*-
"""
Functions for handling encoding of web pages
"""
import re, codecs, encodings
from sys import version_info
_HEADER_ENCODING_RE = re.compile(r'charset=([\w-]+)', re.I)
def http_content_type_encoding(content_type):
"""Extract the encoding in the content-type header
>>> import w3lib.encoding
>>> w3lib.encoding.http_content_type_encoding("Content-Type: text/html; charset=ISO-8859-4")
'iso8859-4'
"""
if content_type:
match = _HEADER_ENCODING_RE.search(content_type)
if match:
return resolve_encoding(match.group(1))
# regexp for parsing HTTP meta tags
_TEMPLATE = r'''%s\s*=\s*["']?\s*%s\s*["']?'''
_SKIP_ATTRS = '''(?:\\s+
[^=<>/\\s"'\x00-\x1f\x7f]+ # Attribute name
(?:\\s*=\\s*
(?: # ' and " are entity encoded (', "), so no need for \', \"
'[^']*' # attr in '
|
"[^"]*" # attr in "
|
[^'"\\s]+ # attr having no ' nor "
))?
)*?''' # must be used with re.VERBOSE flag
_HTTPEQUIV_RE = _TEMPLATE % ('http-equiv', 'Content-Type')
_CONTENT_RE = _TEMPLATE % ('content', r'(?P<mime>[^;]+);\s*charset=(?P<charset>[\w-]+)')
_CONTENT2_RE = _TEMPLATE % ('charset', r'(?P<charset2>[\w-]+)')
_XML_ENCODING_RE = _TEMPLATE % ('encoding', r'(?P<xmlcharset>[\w-]+)')
# check for meta tags, or xml decl. and stop search if a body tag is encountered
_BODY_ENCODING_PATTERN = r'<\s*(?:meta%s(?:(?:\s+%s|\s+%s){2}|\s+%s)|\?xml\s[^>]+%s|body)' % (
_SKIP_ATTRS, _HTTPEQUIV_RE, _CONTENT_RE, _CONTENT2_RE, _XML_ENCODING_RE)
_BODY_ENCODING_STR_RE = re.compile(_BODY_ENCODING_PATTERN, re.I | re.VERBOSE)
_BODY_ENCODING_BYTES_RE = re.compile(_BODY_ENCODING_PATTERN.encode('ascii'),
re.I | re.VERBOSE)
def html_body_declared_encoding(html_body_str):
'''Return the encoding specified in meta tags in the html body,
or ``None`` if no suitable encoding was found
>>> import w3lib.encoding
>>> w3lib.encoding.html_body_declared_encoding(
... """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
... "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
... <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
... <head>
... <title>Some title</title>
... <meta http-equiv="content-type" content="text/html;charset=utf-8" />
... </head>
... <body>
... ...
... </body>
... </html>""")
'utf-8'
>>>
'''
# html5 suggests the first 1024 bytes are sufficient, we allow for more
chunk = html_body_str[:4096]
if isinstance(chunk, bytes):
match = _BODY_ENCODING_BYTES_RE.search(chunk)
else:
match = _BODY_ENCODING_STR_RE.search(chunk)
if match:
encoding = match.group('charset') or match.group('charset2') \
or match.group('xmlcharset')
if encoding:
return resolve_encoding(encoding)
# Default encoding translation
# this maps cannonicalized encodings to target encodings
# see http://www.whatwg.org/specs/web-apps/current-work/multipage/parsing.html#character-encodings-0
# in addition, gb18030 supercedes gb2312 & gbk
# the keys are converted using _c18n_encoding and in sorted order
DEFAULT_ENCODING_TRANSLATION = {
'ascii': 'cp1252',
'big5': 'big5hkscs',
'euc_kr': 'cp949',
'gb2312': 'gb18030',
'gb_2312_80': 'gb18030',
'gbk': 'gb18030',
'iso8859_11': 'cp874',
'iso8859_9': 'cp1254',
'latin_1': 'cp1252',
'macintosh': 'mac_roman',
'shift_jis': 'cp932',
'tis_620': 'cp874',
'win_1251': 'cp1251',
'windows_31j': 'cp932',
'win_31j': 'cp932',
'windows_874': 'cp874',
'win_874': 'cp874',
'x_sjis': 'cp932',
'zh_cn': 'gb18030'
}
def _c18n_encoding(encoding):
"""Cannonicalize an encoding name
This performs normalization and translates aliases using python's
encoding aliases
"""
normed = encodings.normalize_encoding(encoding).lower()
return encodings.aliases.aliases.get(normed, normed)
def resolve_encoding(encoding_alias):
"""Return the encoding that `encoding_alias` maps to, or ``None``
if the encoding cannot be interpreted
>>> import w3lib.encoding
>>> w3lib.encoding.resolve_encoding('latin1')
'cp1252'
>>> w3lib.encoding.resolve_encoding('gb_2312-80')
'gb18030'
>>>
"""
c18n_encoding = _c18n_encoding(encoding_alias)
translated = DEFAULT_ENCODING_TRANSLATION.get(c18n_encoding, c18n_encoding)
try:
return codecs.lookup(translated).name
except LookupError:
return None
_BOM_TABLE = [
(codecs.BOM_UTF32_BE, 'utf-32-be'),
(codecs.BOM_UTF32_LE, 'utf-32-le'),
(codecs.BOM_UTF16_BE, 'utf-16-be'),
(codecs.BOM_UTF16_LE, 'utf-16-le'),
(codecs.BOM_UTF8, 'utf-8')
]
_FIRST_CHARS = set(c[0] for (c, _) in _BOM_TABLE)
def read_bom(data):
r"""Read the byte order mark in the text, if present, and
return the encoding represented by the BOM and the BOM.
If no BOM can be detected, ``(None, None)`` is returned.
>>> import w3lib.encoding
>>> w3lib.encoding.read_bom(b'\xfe\xff\x6c\x34')
('utf-16-be', '\xfe\xff')
>>> w3lib.encoding.read_bom(b'\xff\xfe\x34\x6c')
('utf-16-le', '\xff\xfe')
>>> w3lib.encoding.read_bom(b'\x00\x00\xfe\xff\x00\x00\x6c\x34')
('utf-32-be', '\x00\x00\xfe\xff')
>>> w3lib.encoding.read_bom(b'\xff\xfe\x00\x00\x34\x6c\x00\x00')
('utf-32-le', '\xff\xfe\x00\x00')
>>> w3lib.encoding.read_bom(b'\x01\x02\x03\x04')
(None, None)
>>>
"""
# common case is no BOM, so this is fast
if data and data[0] in _FIRST_CHARS:
for bom, encoding in _BOM_TABLE:
if data.startswith(bom):
return encoding, bom
return None, None
# Python decoder doesn't follow unicode standard when handling
# bad utf-8 encoded strings. see http://bugs.python.org/issue8271
codecs.register_error('w3lib_replace', lambda exc: (u'\ufffd', exc.end))
def to_unicode(data_str, encoding):
"""Convert a str object to unicode using the encoding given
Characters that cannot be converted will be converted to ``\\ufffd`` (the
unicode replacement character).
"""
return data_str.decode(encoding, 'replace' if version_info[0:2] >= (3, 3) else 'w3lib_replace')
def html_to_unicode(content_type_header, html_body_str,
default_encoding='utf8', auto_detect_fun=None):
r'''Convert raw html bytes to unicode
This attempts to make a reasonable guess at the content encoding of the
html body, following a similar process to a web browser.
It will try in order:
* http content type header
* BOM (byte-order mark)
* meta or xml tag declarations
* auto-detection, if the `auto_detect_fun` keyword argument is not ``None``
* default encoding in keyword arg (which defaults to utf8)
If an encoding other than the auto-detected or default encoding is used,
overrides will be applied, converting some character encodings to more
suitable alternatives.
If a BOM is found matching the encoding, it will be stripped.
The `auto_detect_fun` argument can be used to pass a function that will
sniff the encoding of the text. This function must take the raw text as an
argument and return the name of an encoding that python can process, or
None. To use chardet, for example, you can define the function as::
auto_detect_fun=lambda x: chardet.detect(x).get('encoding')
or to use UnicodeDammit (shipped with the BeautifulSoup library)::
auto_detect_fun=lambda x: UnicodeDammit(x).originalEncoding
If the locale of the website or user language preference is known, then a
better default encoding can be supplied.
If `content_type_header` is not present, ``None`` can be passed signifying
that the header was not present.
This method will not fail, if characters cannot be converted to unicode,
``\\ufffd`` (the unicode replacement character) will be inserted instead.
Returns a tuple of ``(<encoding used>, <unicode_string>)``
Examples:
>>> import w3lib.encoding
>>> w3lib.encoding.html_to_unicode(None,
... b"""<!DOCTYPE html>
... <head>
... <meta charset="UTF-8" />
... <meta name="viewport" content="width=device-width" />
... <title>Creative Commons France</title>
... <link rel='canonical' href='http://creativecommons.fr/' />
... <body>
... <p>Creative Commons est une organisation \xc3\xa0 but non lucratif
... qui a pour dessein de faciliter la diffusion et le partage des oeuvres
... tout en accompagnant les nouvelles pratiques de cr\xc3\xa9ation \xc3\xa0 l\xe2\x80\x99\xc3\xa8re numerique.</p>
... </body>
... </html>""")
('utf-8', u'<!DOCTYPE html>\n<head>\n<meta charset="UTF-8" />\n<meta name="viewport" content="width=device-width" />\n<title>Creative Commons France</title>\n<link rel=\'canonical\' href=\'http://creativecommons.fr/\' />\n<body>\n<p>Creative Commons est une organisation \xe0 but non lucratif\nqui a pour dessein de faciliter la diffusion et le partage des oeuvres\ntout en accompagnant les nouvelles pratiques de cr\xe9ation \xe0 l\u2019\xe8re numerique.</p>\n</body>\n</html>')
>>>
'''
enc = http_content_type_encoding(content_type_header)
bom_enc, bom = read_bom(html_body_str)
if enc is not None:
# remove BOM if it agrees with the encoding
if enc == bom_enc:
html_body_str = html_body_str[len(bom):]
elif enc == 'utf-16' or enc == 'utf-32':
# read endianness from BOM, or default to big endian
# tools.ietf.org/html/rfc2781 section 4.3
if bom_enc is not None and bom_enc.startswith(enc):
enc = bom_enc
html_body_str = html_body_str[len(bom):]
else:
enc += '-be'
return enc, to_unicode(html_body_str, enc)
if bom_enc is not None:
return bom_enc, to_unicode(html_body_str[len(bom):], bom_enc)
enc = html_body_declared_encoding(html_body_str)
if enc is None and (auto_detect_fun is not None):
enc = auto_detect_fun(html_body_str)
if enc is None:
enc = default_encoding
return enc, to_unicode(html_body_str, enc)
| gpl-3.0 | 4,184,339,390,128,500,700 | 36.547445 | 483 | 0.630735 | false |
xialingxiao/andokaelk | roles/image/files/oauth2/oauth2_proxy/oauth2_proxy.py | 1 | 2932 | #!/usr/bin/env python3
################################################################################
# Abstract:
# A wrapper for oauth2_proxy
#
# Description:
#
#
# Copyright (c) 2015 Dragon Law
# Project: Dragon Law Data Manager
# Creation: Lingxiao Xia
# Creation Date: 18/05/2015
################################################################################
import os, subprocess, time, argparse, string, random, fileinput, sys, signal
from datetime import datetime
def cookie_id_generator(size=32, chars=string.ascii_uppercase + string.digits + string.ascii_lowercase):
return ''.join(random.choice(chars) for _ in range(size))
class oauth2_proxy_handler():
def __init__(self,proxy,configFile,logFile,timeOut):
self.proxy = proxy
self.configFile = configFile
self.logFile = logFile
self.timeOut = timeOut
def cookie_setter(self):
session_id = ''
with fileinput.input(self.configFile,inplace=1) as f:
for line in f:
if 'cookie_secret = ' in line:
session_id = cookie_id_generator()
line = 'cookie_secret = "'+session_id+'"\n'
sys.stdout.write(line)
return session_id
def run(self):
while True:
session_id = self.cookie_setter()
with open(self.logFile, 'a+') as log:
rightNow = datetime.now()
timestamp = str(rightNow.year).zfill(4)+'/'+str(rightNow.month).zfill(2)+'/'+str(rightNow.day).zfill(2)+' '+str(rightNow.hour).zfill(2)+':'+str(rightNow.minute).zfill(2)+':'+str(rightNow.second).zfill(2)
log.write(timestamp+' Logging session with id: '+session_id+'\n')
log.flush()
self.proc = subprocess.Popen(self.proxy+' -config="'+self.configFile+'"',shell=True, stdout=log, stderr=log, preexec_fn=os.setsid)
time.sleep(self.timeOut)
os.killpg(os.getpgid(self.proc.pid), signal.SIGTERM)
self.proc.terminate()
self.proc.wait()
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('proxy', metavar="PROXY", nargs = '?', default ='', help = "Location of oauth2_proxy binary.")
parser.add_argument('configFile', metavar="CFG_FILE", nargs = '?', default ='', help = "Configuration file for oauth2_proxy")
parser.add_argument('logFile', metavar="LOG_FILE", nargs = '?', default ='', help = "Log file to write to for oauth2_proxy")
parser.add_argument('sessionTimeOut', metavar="TIMEOUT", nargs = '?', type= int , default = 86400, help = "Integer session time-out length in seconds.")
args = parser.parse_args()
oauth2_proxy_handler(args.proxy,args.configFile,args.logFile,args.sessionTimeOut).run()
| mit | -2,359,374,295,190,575,600 | 40.295775 | 219 | 0.55764 | false |
evandromr/python_scitools | lombscargle.py | 1 | 2281 | #!/bin/env python
import numpy as np
import scipy.signal as ss
import astropy.io.fits as fits
import matplotlib.pyplot as plt
inpt = str(raw_input("File: "))
lc = fits.open(inpt)
bin = float(raw_input("bin size (or camera resolution): "))
# Convert to big-endian array is necessary to the lombscargle function
time = np.array(lc[1].data["TIME"], dtype='float64')
time -= time.min()
rate = np.array(lc[1].data["RATE"], dtype='float64')
# Exclude NaN and negative values -------------------------
print ''
print 'Excluding nan and negative values...'
print ''
exclude = []
for i in xrange(len(rate)):
if rate[i] > 0:
pass
else:
exclude.append(i)
exclude = np.array(exclude)
nrate = np.delete(rate, exclude)
ntime = np.delete(time, exclude)
#-----------------------------------------------------------
# normalize rate array
mean = nrate.mean()
nrate -= nrate.mean()
# normalization to the periodogram
norm = ntime.shape[0]
# duration of observation
interval = time.max()-time.min()
# minimum frequency limited to 0,01/T
freqmin = 0.01/interval
# maximium Nyquist frequency limited by time resolution
freqmax = 1.0/(2.0*bin)
# size of the array of frequencies
nint = 10*len(nrate)
# Frequency array
freqs = np.linspace(freqmin, freqmax, nint)
# scipy.signal.lombscargle uses angular frequencies
afreqs = 2.0*np.pi*freqs
print 'f_max = ', max(freqs)
print 'f_min = ', min(freqs)
print "T_obs =", interval
print "N_points = ", norm
print "N_freqs = ", nint
# Ther periodogram itself
pgram = ss.lombscargle(ntime, nrate, afreqs)
# Normalize pgram to units of nrate/freq
pnorm = np.sqrt(4.0*(pgram/norm))
# # Plot lightcurve on top panel
# plt.subplot(2, 1, 1)
# plt.plot(ntime, nrate, 'bo-')
# plt.xlabel('Tempo (s)', fontsize=12)
# plt.ylabel('Cts. s$^{{-1}}$', fontsize=12)
# plt.xlim(time.min(), time.max())
#
# # Plot powerspectrum on bottom panel
# plt.subplot(2, 1, 2)
plt.plot(freqs, pnorm, 'b-',
label='T$_{{pico}}$ = {0:.0f} s'.format(1.0/freqs[np.argmax(pnorm)]))
plt.xlabel('Frequencia (Hz)', fontsize=12)
plt.ylabel('Potencia', fontsize=12)
plt.xlim(min(freqs), max(freqs))
plt.legend(loc=1)
# save and show plot
plt.savefig('lombscargle_tests.pdf', bbox_width='tight', format='pdf',
orientation='landscape')
plt.show()
| mit | 2,129,934,250,010,130,400 | 24.344444 | 73 | 0.658483 | false |
huzq/scikit-learn | examples/release_highlights/plot_release_highlights_0_22_0.py | 1 | 10224 | """
========================================
Release Highlights for scikit-learn 0.22
========================================
.. currentmodule:: sklearn
We are pleased to announce the release of scikit-learn 0.22, which comes
with many bug fixes and new features! We detail below a few of the major
features of this release. For an exhaustive list of all the changes, please
refer to the :ref:`release notes <changes_0_22>`.
To install the latest version (with pip)::
pip install --upgrade scikit-learn
or with conda::
conda install -c conda-forge scikit-learn
"""
# %%
# New plotting API
# ----------------
#
# A new plotting API is available for creating visualizations. This new API
# allows for quickly adjusting the visuals of a plot without involving any
# recomputation. It is also possible to add different plots to the same
# figure. The following example illustrates :class:`~metrics.plot_roc_curve`,
# but other plots utilities are supported like
# :class:`~inspection.plot_partial_dependence`,
# :class:`~metrics.plot_precision_recall_curve`, and
# :class:`~metrics.plot_confusion_matrix`. Read more about this new API in the
# :ref:`User Guide <visualizations>`.
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.metrics import plot_roc_curve
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import make_classification
import matplotlib.pyplot as plt
X, y = make_classification(random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
svc = SVC(random_state=42)
svc.fit(X_train, y_train)
rfc = RandomForestClassifier(random_state=42)
rfc.fit(X_train, y_train)
svc_disp = plot_roc_curve(svc, X_test, y_test)
rfc_disp = plot_roc_curve(rfc, X_test, y_test, ax=svc_disp.ax_)
rfc_disp.figure_.suptitle("ROC curve comparison")
plt.show()
# %%
# Stacking Classifier and Regressor
# ---------------------------------
# :class:`~ensemble.StackingClassifier` and
# :class:`~ensemble.StackingRegressor`
# allow you to have a stack of estimators with a final classifier or
# a regressor.
# Stacked generalization consists in stacking the output of individual
# estimators and use a classifier to compute the final prediction. Stacking
# allows to use the strength of each individual estimator by using their output
# as input of a final estimator.
# Base estimators are fitted on the full ``X`` while
# the final estimator is trained using cross-validated predictions of the
# base estimators using ``cross_val_predict``.
#
# Read more in the :ref:`User Guide <stacking>`.
from sklearn.datasets import load_iris
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
from sklearn.ensemble import StackingClassifier
from sklearn.model_selection import train_test_split
X, y = load_iris(return_X_y=True)
estimators = [
('rf', RandomForestClassifier(n_estimators=10, random_state=42)),
('svr', make_pipeline(StandardScaler(),
LinearSVC(random_state=42)))
]
clf = StackingClassifier(
estimators=estimators, final_estimator=LogisticRegression()
)
X_train, X_test, y_train, y_test = train_test_split(
X, y, stratify=y, random_state=42
)
clf.fit(X_train, y_train).score(X_test, y_test)
# %%
# Permutation-based feature importance
# ------------------------------------
#
# The :func:`inspection.permutation_importance` can be used to get an
# estimate of the importance of each feature, for any fitted estimator:
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier
from sklearn.inspection import permutation_importance
X, y = make_classification(random_state=0, n_features=5, n_informative=3)
feature_names = np.array([f'x_{i}' for i in range(X.shape[1])])
rf = RandomForestClassifier(random_state=0).fit(X, y)
result = permutation_importance(rf, X, y, n_repeats=10, random_state=0,
n_jobs=-1)
fig, ax = plt.subplots()
sorted_idx = result.importances_mean.argsort()
ax.boxplot(result.importances[sorted_idx].T,
vert=False, labels=feature_names[sorted_idx])
ax.set_title("Permutation Importance of each feature")
ax.set_ylabel("Features")
fig.tight_layout()
plt.show()
# %%
# Native support for missing values for gradient boosting
# -------------------------------------------------------
#
# The :class:`ensemble.HistGradientBoostingClassifier`
# and :class:`ensemble.HistGradientBoostingRegressor` now have native
# support for missing values (NaNs). This means that there is no need for
# imputing data when training or predicting.
from sklearn.experimental import enable_hist_gradient_boosting # noqa
from sklearn.ensemble import HistGradientBoostingClassifier
import numpy as np
X = np.array([0, 1, 2, np.nan]).reshape(-1, 1)
y = [0, 0, 1, 1]
gbdt = HistGradientBoostingClassifier(min_samples_leaf=1).fit(X, y)
print(gbdt.predict(X))
# %%
# Precomputed sparse nearest neighbors graph
# ------------------------------------------
# Most estimators based on nearest neighbors graphs now accept precomputed
# sparse graphs as input, to reuse the same graph for multiple estimator fits.
# To use this feature in a pipeline, one can use the `memory` parameter, along
# with one of the two new transformers,
# :class:`neighbors.KNeighborsTransformer` and
# :class:`neighbors.RadiusNeighborsTransformer`. The precomputation
# can also be performed by custom estimators to use alternative
# implementations, such as approximate nearest neighbors methods.
# See more details in the :ref:`User Guide <neighbors_transformer>`.
from tempfile import TemporaryDirectory
from sklearn.neighbors import KNeighborsTransformer
from sklearn.manifold import Isomap
from sklearn.pipeline import make_pipeline
X, y = make_classification(random_state=0)
with TemporaryDirectory(prefix="sklearn_cache_") as tmpdir:
estimator = make_pipeline(
KNeighborsTransformer(n_neighbors=10, mode='distance'),
Isomap(n_neighbors=10, metric='precomputed'),
memory=tmpdir)
estimator.fit(X)
# We can decrease the number of neighbors and the graph will not be
# recomputed.
estimator.set_params(isomap__n_neighbors=5)
estimator.fit(X)
# %%
# KNN Based Imputation
# ------------------------------------
# We now support imputation for completing missing values using k-Nearest
# Neighbors.
#
# Each sample's missing values are imputed using the mean value from
# ``n_neighbors`` nearest neighbors found in the training set. Two samples are
# close if the features that neither is missing are close.
# By default, a euclidean distance metric
# that supports missing values,
# :func:`~metrics.nan_euclidean_distances`, is used to find the nearest
# neighbors.
#
# Read more in the :ref:`User Guide <knnimpute>`.
import numpy as np
from sklearn.impute import KNNImputer
X = [[1, 2, np.nan], [3, 4, 3], [np.nan, 6, 5], [8, 8, 7]]
imputer = KNNImputer(n_neighbors=2)
print(imputer.fit_transform(X))
# %%
# Tree pruning
# ------------
#
# It is now possible to prune most tree-based estimators once the trees are
# built. The pruning is based on minimal cost-complexity. Read more in the
# :ref:`User Guide <minimal_cost_complexity_pruning>` for details.
X, y = make_classification(random_state=0)
rf = RandomForestClassifier(random_state=0, ccp_alpha=0).fit(X, y)
print("Average number of nodes without pruning {:.1f}".format(
np.mean([e.tree_.node_count for e in rf.estimators_])))
rf = RandomForestClassifier(random_state=0, ccp_alpha=0.05).fit(X, y)
print("Average number of nodes with pruning {:.1f}".format(
np.mean([e.tree_.node_count for e in rf.estimators_])))
# %%
# Retrieve dataframes from OpenML
# -------------------------------
# :func:`datasets.fetch_openml` can now return pandas dataframe and thus
# properly handle datasets with heterogeneous data:
from sklearn.datasets import fetch_openml
titanic = fetch_openml('titanic', version=1, as_frame=True)
print(titanic.data.head()[['pclass', 'embarked']])
# %%
# Checking scikit-learn compatibility of an estimator
# ---------------------------------------------------
# Developers can check the compatibility of their scikit-learn compatible
# estimators using :func:`~utils.estimator_checks.check_estimator`. For
# instance, the ``check_estimator(LinearSVC())`` passes.
#
# We now provide a ``pytest`` specific decorator which allows ``pytest``
# to run all checks independently and report the checks that are failing.
#
# ..note::
# This entry was slightly updated in version 0.24, where passing classes
# isn't supported anymore: pass instances instead.
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.utils.estimator_checks import parametrize_with_checks
@parametrize_with_checks([LogisticRegression(), DecisionTreeRegressor()])
def test_sklearn_compatible_estimator(estimator, check):
check(estimator)
# %%
# ROC AUC now supports multiclass classification
# ----------------------------------------------
# The :func:`roc_auc_score` function can also be used in multi-class
# classification. Two averaging strategies are currently supported: the
# one-vs-one algorithm computes the average of the pairwise ROC AUC scores, and
# the one-vs-rest algorithm computes the average of the ROC AUC scores for each
# class against all other classes. In both cases, the multiclass ROC AUC scores
# are computed from the probability estimates that a sample belongs to a
# particular class according to the model. The OvO and OvR algorithms support
# weighting uniformly (``average='macro'``) and weighting by the prevalence
# (``average='weighted'``).
#
# Read more in the :ref:`User Guide <roc_metrics>`.
from sklearn.datasets import make_classification
from sklearn.svm import SVC
from sklearn.metrics import roc_auc_score
X, y = make_classification(n_classes=4, n_informative=16)
clf = SVC(decision_function_shape='ovo', probability=True).fit(X, y)
print(roc_auc_score(y, clf.predict_proba(X), multi_class='ovo'))
| bsd-3-clause | -890,616,271,821,941,900 | 36.450549 | 79 | 0.720559 | false |
F5Networks/f5-ansible | ansible_collections/f5networks/f5_modules/plugins/modules/bigip_selfip.py | 1 | 28056 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2016, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: bigip_selfip
short_description: Manage Self-IPs on a BIG-IP system
description:
- Manage Self-IP addresses on a BIG-IP system.
version_added: "1.0.0"
options:
address:
description:
- The IP addresses for the new self IP. This value is ignored upon update
as addresses themselves cannot be changed after they are created.
- This value is required when creating new self IPs.
type: str
allow_service:
description:
- Configure port lockdown for the self IP. By default, the self IP has a
"default deny" policy. This can be changed to allow TCP and UDP ports,
as well as specific protocols. This list should contain C(protocol):C(port)
values.
type: list
elements: str
name:
description:
- The name of the self IP to create.
- If this parameter is not specified, it defaults to the value supplied
in the C(address) parameter.
type: str
required: True
description:
description:
- Description of the traffic selector.
type: str
netmask:
description:
- The netmask for the self IP. When creating a new self IP, this value
is required.
type: str
state:
description:
- When C(present), guarantees the self IP exists with the provided
attributes.
- When C(absent), removes the self IP from the system.
type: str
choices:
- absent
- present
default: present
traffic_group:
description:
- The traffic group for the self IP addresses in an active-active,
redundant load balancer configuration. When creating a new self IP, if
this value is not specified, the default is C(/Common/traffic-group-local-only).
type: str
vlan:
description:
- The VLAN for the new self IPs. When creating a new self
IP, this value is required.
type: str
route_domain:
description:
- The route domain id of the system. When creating a new self IP, if
this value is not specified, the default value is C(0).
- This value cannot be changed after it is set.
type: int
fw_enforced_policy:
description:
- Specifies an AFM policy to attach to Self IP.
type: str
version_added: "1.1.0"
partition:
description:
- Device partition to manage resources on. You can set different partitions
for self IPs, but the address used may not match any other address used
by a self IP. Thus, self IPs are not isolated by partitions as
other resources on a BIG-IP are.
type: str
default: Common
extends_documentation_fragment: f5networks.f5_modules.f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Create Self IP
bigip_selfip:
address: 10.10.10.10
name: self1
netmask: 255.255.255.0
vlan: vlan1
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Create Self IP with a Route Domain
bigip_selfip:
name: self1
address: 10.10.10.10
netmask: 255.255.255.0
vlan: vlan1
route_domain: 10
allow_service: default
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Delete Self IP
bigip_selfip:
name: self1
state: absent
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
- name: Allow management web UI to be accessed on this Self IP
bigip_selfip:
name: self1
state: absent
allow_service:
- tcp:443
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Allow HTTPS and SSH access to this Self IP
bigip_selfip:
name: self1
state: absent
allow_service:
- tcp:443
- tcp:22
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Allow all services access to this Self IP
bigip_selfip:
name: self1
state: absent
allow_service:
- all
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Allow only GRE and IGMP protocols access to this Self IP
bigip_selfip:
name: self1
state: absent
allow_service:
- gre:0
- igmp:0
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
- name: Allow all TCP, but no other protocols access to this Self IP
bigip_selfip:
name: self1
state: absent
allow_service:
- tcp:0
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
allow_service:
description: Services that are allowed via this self IP.
returned: changed
type: list
sample: ['igmp:0','tcp:22','udp:53']
address:
description: The address for the self IP.
returned: changed
type: str
sample: 192.0.2.10
name:
description: The name of the self IP.
returned: created
type: str
sample: self1
netmask:
description: The netmask of the self IP.
returned: changed
type: str
sample: 255.255.255.0
traffic_group:
description: The traffic group of which the self IP is a member.
returned: changed
type: str
sample: traffic-group-local-only
vlan:
description: The VLAN set on the self IP.
returned: changed
type: str
sample: vlan1
fw_enforced_policy:
description: Specifies an AFM policy to be attached to the self IP.
returned: changed
type: str
sample: /Common/afm-blocking-policy
'''
import re
from datetime import datetime
from ansible.module_utils.basic import (
AnsibleModule, env_fallback
)
from ipaddress import (
ip_network, ip_interface, ip_address
)
from ..module_utils.bigip import F5RestClient
from ..module_utils.common import (
F5ModuleError, AnsibleF5Parameters, transform_name, f5_argument_spec, fq_name
)
from ..module_utils.compare import cmp_str_with_none
from ..module_utils.ipaddress import (
is_valid_ip, ipv6_netmask_to_cidr
)
from ..module_utils.icontrol import tmos_version
from ..module_utils.teem import send_teem
class Parameters(AnsibleF5Parameters):
api_map = {
'trafficGroup': 'traffic_group',
'allowService': 'allow_service',
'fwEnforcedPolicy': 'fw_enforced_policy',
'fwEnforcedPolicyReference': 'fw_policy_link',
}
api_attributes = [
'trafficGroup',
'allowService',
'vlan',
'address',
'description',
'fwEnforcedPolicy',
'fwEnforcedPolicyReference',
]
updatables = [
'traffic_group',
'allow_service',
'vlan',
'netmask',
'address',
'description',
'fw_enforced_policy',
'fw_policy_link',
]
returnables = [
'traffic_group',
'allow_service',
'vlan',
'route_domain',
'netmask',
'address',
'description',
]
@property
def vlan(self):
if self._values['vlan'] is None:
return None
return fq_name(self.partition, self._values['vlan'])
class ModuleParameters(Parameters):
@property
def address(self):
address = "{0}%{1}/{2}".format(
self.ip, self.route_domain, self.netmask
)
return address
@property
def ip(self):
if self._values['address'] is None:
return None
if is_valid_ip(self._values['address']):
return self._values['address']
else:
raise F5ModuleError(
'The provided address is not a valid IP address'
)
@property
def traffic_group(self):
if self._values['traffic_group'] is None:
return None
return fq_name(self.partition, self._values['traffic_group'])
@property
def route_domain(self):
if self._values['route_domain'] is None:
return None
result = int(self._values['route_domain'])
return result
@property
def netmask(self):
if self._values['netmask'] is None:
return None
result = -1
try:
result = int(self._values['netmask'])
if 0 < result < 256:
pass
except ValueError:
if is_valid_ip(self._values['netmask']):
addr = ip_address(u'{0}'.format(str(self._values['netmask'])))
if addr.version == 4:
ip = ip_network(u'0.0.0.0/%s' % str(self._values['netmask']))
result = ip.prefixlen
else:
result = ipv6_netmask_to_cidr(self._values['netmask'])
if result < 0:
raise F5ModuleError(
'The provided netmask {0} is neither in IP or CIDR format'.format(result)
)
return result
@property
def allow_service(self):
"""Verifies that a supplied service string has correct format
The string format for port lockdown is PROTOCOL:PORT. This method
will verify that the provided input matches the allowed protocols
and the port ranges before submitting to BIG-IP.
The only allowed exceptions to this rule are the following values
* all
* default
* none
These are special cases that are handled differently in the API.
"all" is set as a string, "default" is set as a one item list, and
"none" removes the key entirely from the REST API.
:raises F5ModuleError:
"""
if self._values['allow_service'] is None:
return None
result = []
allowed_protocols = [
'eigrp', 'egp', 'gre', 'icmp', 'igmp', 'igp', 'ipip',
'l2tp', 'ospf', 'pim', 'tcp', 'udp'
]
special_protocols = [
'all', 'none', 'default'
]
for svc in self._values['allow_service']:
if svc in special_protocols:
result = [svc]
break
elif svc in allowed_protocols:
full_service = '{0}:0'.format(svc)
result.append(full_service)
else:
tmp = svc.split(':')
if tmp[0] not in allowed_protocols:
raise F5ModuleError(
"The provided protocol '%s' is invalid" % (tmp[0])
)
try:
port = int(tmp[1])
except Exception:
raise F5ModuleError(
"The provided port '%s' is not a number" % (tmp[1])
)
if port < 0 or port > 65535:
raise F5ModuleError(
"The provided port '{0}' must be between 0 and 65535".format(port)
)
else:
result.append(svc)
result = sorted(list(set(result)))
return result
@property
def fw_enforced_policy(self):
if self._values['fw_enforced_policy'] is None:
return None
if self._values['fw_enforced_policy'] in ['none', '']:
return None
name = self._values['fw_enforced_policy']
return fq_name(self.partition, name)
@property
def fw_policy_link(self):
policy = self.fw_enforced_policy
if policy is None:
return None
tmp = policy.split('/')
link = dict(link='https://localhost/mgmt/tm/security/firewall/policy/~{0}~{1}'.format(tmp[1], tmp[2]))
return link
@property
def description(self):
if self._values['description'] is None:
return None
elif self._values['description'] in ['none', '']:
return ''
return self._values['description']
class ApiParameters(Parameters):
@property
def allow_service(self):
if self._values['allow_service'] is None:
return None
if self._values['allow_service'] == 'all':
self._values['allow_service'] = ['all']
return sorted(self._values['allow_service'])
@property
def destination_ip(self):
if self._values['address'] is None:
return None
try:
pattern = r'(?P<rd>%[0-9]+)'
addr = re.sub(pattern, '', self._values['address'])
ip = ip_interface(u'{0}'.format(addr))
return ip.with_prefixlen
except ValueError:
raise F5ModuleError(
"The provided destination is not an IP address"
)
@property
def netmask(self):
ip = ip_interface(self.destination_ip)
return int(ip.network.prefixlen)
@property
def ip(self):
result = ip_interface(self.destination_ip)
return str(result.ip)
@property
def description(self):
if self._values['description'] in [None, 'none']:
return None
return self._values['description']
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
@property
def allow_service(self):
if self._values['allow_service'] is None:
return None
if self._values['allow_service'] == ['all']:
return 'all'
return sorted(self._values['allow_service'])
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def address(self):
return None
@property
def allow_service(self):
"""Returns services formatted for consumption by f5-sdk update
The BIG-IP endpoint for services takes different values depending on
what you want the "allowed services" to be. It can be any of the
following
- a list containing "protocol:port" values
- the string "all"
- a null value, or None
This is a convenience function to massage the values the user has
supplied so that they are formatted in such a way that BIG-IP will
accept them and apply the specified policy.
"""
if self.want.allow_service is None:
return None
result = self.want.allow_service
if result[0] == 'none' and self.have.allow_service is None:
return None
elif self.have.allow_service is None:
return result
elif result[0] == 'all' and self.have.allow_service[0] != 'all':
return ['all']
elif result[0] == 'none':
return []
elif set(self.want.allow_service) != set(self.have.allow_service):
return result
@property
def netmask(self):
if self.want.netmask is None:
return None
ip = self.have.ip
if is_valid_ip(ip):
if self.want.route_domain is not None:
want = "{0}%{1}/{2}".format(ip, self.want.route_domain, self.want.netmask)
have = "{0}%{1}/{2}".format(ip, self.want.route_domain, self.have.netmask)
elif self.have.route_domain is not None:
want = "{0}%{1}/{2}".format(ip, self.have.route_domain, self.want.netmask)
have = "{0}%{1}/{2}".format(ip, self.have.route_domain, self.have.netmask)
else:
want = "{0}/{1}".format(ip, self.want.netmask)
have = "{0}/{1}".format(ip, self.have.netmask)
if want != have:
return want
else:
raise F5ModuleError(
'The provided address/netmask value "{0}" was invalid'.format(self.have.ip)
)
@property
def traffic_group(self):
if self.want.traffic_group != self.have.traffic_group:
return self.want.traffic_group
@property
def description(self):
return cmp_str_with_none(self.want.description, self.have.description)
@property
def fw_policy_link(self):
if self.want.fw_enforced_policy is None:
return None
if self.want.fw_enforced_policy == self.have.fw_enforced_policy:
return None
if self.want.fw_policy_link != self.have.fw_policy_link:
return self.want.fw_policy_link
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.have = None
self.want = ModuleParameters(params=self.module.params)
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = ApiParameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if k in ['netmask']:
changed['address'] = change
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def exec_module(self):
start = datetime.now().isoformat()
version = tmos_version(self.client)
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
send_teem(start, self.client, self.module, version)
return result
def present(self):
if self.exists():
changed = self.update()
else:
changed = self.create()
return changed
def absent(self):
changed = False
if self.exists():
changed = self.remove()
return changed
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the Self IP")
return True
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def create(self):
if self.want.address is None or self.want.netmask is None:
raise F5ModuleError(
'An address and a netmask must be specified'
)
if self.want.vlan is None:
raise F5ModuleError(
'A VLAN name must be specified'
)
if self.want.route_domain is None:
rd = self.read_partition_default_route_domain_from_device()
self.want.update({'route_domain': rd})
if self.want.traffic_group is None:
self.want.update({'traffic_group': '/Common/traffic-group-local-only'})
if self.want.route_domain is None:
self.want.update({'route_domain': 0})
if self.want.allow_service:
if 'all' in self.want.allow_service:
self.want.update(dict(allow_service=['all']))
elif 'none' in self.want.allow_service:
self.want.update(dict(allow_service=[]))
elif 'default' in self.want.allow_service:
self.want.update(dict(allow_service=['default']))
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
if self.exists():
return True
else:
raise F5ModuleError("Failed to create the Self IP")
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/net/self/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return True
errors = [401, 403, 409, 500, 501, 502, 503, 504]
if resp.status in errors or 'code' in response and response['code'] in errors:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/net/self/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
if self.want.fw_enforced_policy:
payload = dict(
fwEnforcedPolicy=self.want.fw_enforced_policy,
fwEnforcedPolicyReference=self.want.fw_policy_link
)
uri = "https://{0}:{1}/mgmt/tm/net/self/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name),
)
resp = self.client.api.patch(uri, json=payload)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return True
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/net/self/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/net/self/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.delete(uri)
if resp.status == 200:
return True
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/net/self/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
def read_partition_default_route_domain_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/auth/partition/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.partition
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return int(response['defaultRouteDomain'])
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
address=dict(),
allow_service=dict(
elements='str',
type='list',
),
name=dict(required=True),
netmask=dict(),
traffic_group=dict(),
vlan=dict(),
route_domain=dict(type='int'),
description=dict(),
fw_enforced_policy=dict(),
state=dict(
default='present',
choices=['present', 'absent']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| gpl-3.0 | 8,649,483,992,823,860,000 | 29.528836 | 110 | 0.571536 | false |
wreid/Jacunski_Exercises | python_scripts/ppi_network.py | 1 | 2585 | #!/usr/bin/env
import networkx as nx
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
from sys import argv
from sys import exit
def main(args):
input_file1 = args[1]
input_file2 = args[2]
inp1 = sanitize_name(input_file1)
inp2 = sanitize_name(input_file2)
Y = nx.Graph()
H = nx.Graph()
load_txt(inp2, Y)
del_dupl(Y)
load_txt(inp1, H)
del_dupl(H)
print '\nYEAST'
MY = largest_component(Y)
print 'HUMAN'
MH = largest_component(H)
plt.xlabel('Degree', fontsize=14, color='black')
plt.ylabel('Frequency', fontsize=14, color='black')
plt.title('PPI of Human and Yeast genomes (BioGrid)', fontsize=16, color='black')
plt.autoscale(enable=True)
n1, bins1, patches1 = plt.hist(nx.degree(MY).values(), \
bins=np.max(nx.degree(MY).values())/25, log=True, histtype='bar', \
color='red', alpha=.8)
n2, bins2, patches2 = plt.hist(nx.degree(MH).values(), \
bins=np.max(nx.degree(MH).values())/25, log=True, histtype='bar', \
color='black', alpha=.3)
d, p = stats.ks_2samp(n1, n2)
print 'D value of %f' % d
print 'P value of %f' % p
plt.show()
plt.close()
def sanitize_name(filen):
try:
inp = str(filen)
except TypeError:
print 'Input file name must be a string.'
exit()
return inp
def load_txt(fname, graph):
"""
loads text from a file, removes whitespace and loads
each line as two nodes connected by an edge
"""
f = open(fname, 'rb')
txt = f.readlines()
for line in txt:
line.strip()
l = tuple(line.split())
if l[0] != l[1]:
graph.add_edge(*l)
def del_dupl(graph):
"""
iterates through graph, deleting duplicate edges
"""
for edge in graph.edges():
if edge[0] == edge[1]:
graph.remove_edge(edge[0], edge[1])
def largest_component(graph):
"""
makes a new graph of the largest component in the input graph
"""
# find and output graph
graphs = sorted(list(nx.connected_component_subgraphs(graph, copy=True)), key=len, reverse=True)
out_graph = graphs[0]
# print info about removed
removed = 0
for subgraph in graphs[1:]:
removed = removed + len(subgraph.nodes())
print '%d nodes removed' % removed
print '%d components removed' % len(graphs[1:])
print '%d nodes and %d edges in main component\n' % (len(out_graph.nodes()), \
len(out_graph.edges()))
return out_graph
if __name__ == '__main__':
main(argv) | gpl-2.0 | 3,597,497,723,429,499,000 | 23.865385 | 100 | 0.598839 | false |
opinnmr/stjornbord | register/registration.py | 1 | 2138 | from django.forms import ValidationError
from stjornbord.user.models import validate_username
import datetime
def slugify(name):
import unicodedata as ud
import string
name = unicode(name)
slug = u''.join(c for c in ud.normalize('NFD', name.lower()) if ud.combining(c) == 0)
slug = slug.replace(u'\xfe', "th")
slug = slug.replace(u'\xe6', "ae")
slug = slug.replace(u'\xf0', "d")
allowed = string.ascii_lowercase + " " + string.digits
return u''.join(c for c in slug if c in allowed)
def suggest_usernames(first_name, last_name):
"""
Suggest usernames based on the students name.
F = Given name
M1,M2,.. = Middle name(s)
S = Surname
Lower case = initial
"""
names = slugify("%s %s" % (first_name, last_name)).split(" ")
f_name, m_name, s_name = names[0], names[1:-1], names[-1]
f_init, m_init, s_init = f_name[0], ''.join([c[0] for c in m_name]), s_name[0]
suggestions = []
def add_suggestion(username):
# Append last two digits of the year to username suggestion. This
# is to make it easier to maintain unique usernames
username = "%s%s" % (username, datetime.date.today().strftime("%y"))
if username not in suggestions:
try:
# See if this is a valid username
validate_username(username)
except ValidationError:
# Username invalid
pass
else:
# Username valid, append to suggestion list
suggestions.append(username)
# F
add_suggestion( f_name )
# FS
add_suggestion( f_name + s_name )
# Fm, Fs, Fms
add_suggestion( f_name + m_init )
add_suggestion( f_name + m_init + s_init )
add_suggestion( f_name + s_init )
# FmS
add_suggestion( f_name + m_init + s_name )
# fms
add_suggestion( f_init + m_init + s_init )
# FM
for n in m_name:
add_suggestion( f_name + n )
# MS
for n in m_name:
add_suggestion( n + s_name )
# S
add_suggestion( s_name )
return suggestions
| gpl-3.0 | 2,208,290,807,513,363,200 | 25.395062 | 89 | 0.572965 | false |
comses/catalog | catalog/core/tests/test_views.py | 1 | 10103 | import json
import logging
from unittest import mock
from unittest.mock import patch
from django.contrib.auth.models import User
from haystack.query import SearchQuerySet
from citation.models import Publication
from .common import BaseTest
logger = logging.getLogger(__name__)
MAX_EXAMPLES = 30
CONTACT_US_URL = 'core:contact_us'
DASHBOARD_URL = 'core:dashboard'
HAYSTACK_SEARCH_URL = 'core:haystack_search'
PUBLICATIONS_URL = 'citation:publications'
PUBLICATION_DETAIL_URL = 'citation:publication_detail'
USER_PROFILE_URL = 'core:user_profile'
WORKFLOW_URL = 'core:curator_workflow'
HOME_URL = 'core:public-home'
class UrlTest(BaseTest):
TEST_URLS = (CONTACT_US_URL, DASHBOARD_URL, HAYSTACK_SEARCH_URL, PUBLICATIONS_URL, USER_PROFILE_URL, WORKFLOW_URL)
def test_urls(self):
self.login()
for url in UrlTest.TEST_URLS:
response = self.get(url)
self.assertTrue(200, response.status_code)
class AuthTest(BaseTest):
def test_login(self):
response = self.get(self.login_url)
self.assertTrue(200, response.status_code)
def test_login_with_bad_credentials(self):
response = self.post(
self.login_url, {'username': 'wrong_username', 'password': 'temporary'})
self.assertTrue(200, response.status_code)
self.assertTrue(b'Please enter a correct username and password.' in response.content)
def test_login_with_good_credentials(self):
response = self.post(self.login_url, {'username': self.default_username, 'password': self.default_password})
self.assertTrue(200, response.status_code)
self.assertTrue(self.reverse(HOME_URL) in response['Location'])
def test_login_with_inactive_user(self):
self.user.is_active = False
self.user.save()
response = self.post(self.login_url, {'username': self.default_username, 'password': self.default_password})
self.assertTrue(200, response.status_code)
def test_logout(self):
response = self.get(self.logout_url)
self.assertTrue(302, response.status_code)
class ProfileViewTest(BaseTest):
def test_profile_view(self):
self.without_login_and_with_login_test(USER_PROFILE_URL)
def test_profile_update(self):
first_name, last_name = 'Egg', 'Timer'
url = self.reverse(USER_PROFILE_URL, query_parameters={'format': 'json'})
self.login()
old_email = self.user.email
response = self.post(url, {'first_name': first_name,
'last_name': last_name,
'username': self.user.username,
})
self.assertEqual(200, response.status_code)
user = User.objects.get(username=self.user.username)
# check for updated values
self.assertEqual(user.first_name, first_name)
self.assertEqual(user.last_name, last_name)
# ensure email has not been changed
self.assertEqual(user.email, old_email)
def test_profile_invalid_email_update(self):
first_name, last_name = 'Egg', 'Timer'
url = self.reverse(USER_PROFILE_URL, query_parameters={'format': 'json'})
self.login()
response = self.post(url, {'first_name': first_name,
'last_name': last_name,
'username': self.user.username,
'email': "[email protected]"
})
# Updating email should return status code 400 - but for now we are ignoring it
self.assertEqual(200, response.status_code)
def test_profile_invalid_update(self):
first_name, last_name = 'Egg', 'Timer'
username = ' sldfkj kljsf # A//?'
url = self.reverse(USER_PROFILE_URL, query_parameters={'format': 'json'})
self.login()
response = self.post(url, {'first_name': first_name, 'last_name': last_name,
'username': username})
self.assertTrue(400, response.status_code)
class IndexViewTest(BaseTest):
def test_index_view(self):
self.without_login_and_with_login_test(self.index_url, before_status=200)
class DashboardViewTest(BaseTest):
def test_dashboard_view(self):
self.without_login_and_with_login_test(DASHBOARD_URL)
class PublicationsViewTest(BaseTest):
def test_publications_view(self):
self.without_login_and_with_login_test(PUBLICATIONS_URL)
def test_publication_view_with_query_parameter(self):
self.login()
url = self.reverse(PUBLICATIONS_URL)
response = self.get(url + "?page=-1")
self.assertEqual(404, response.status_code)
class PublicationDetailViewTest(BaseTest):
def test_canonical_publication_detail_view(self):
journal_title = 'Econometrica'
container = self.create_container(name=journal_title)
container.save()
publication_title = 'A very model model'
p = self.create_publication(title=publication_title, added_by=self.user, container=container)
p.save()
self.logout()
url = self.reverse(PUBLICATION_DETAIL_URL, kwargs={'pk': 999999})
self.without_login_and_with_login_test(url, after_status=404)
url = self.reverse(PUBLICATION_DETAIL_URL, kwargs={'pk': p.pk, 'slug': p.slug})
response = self.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, journal_title)
self.assertContains(response, publication_title)
self.logout()
url = self.reverse(PUBLICATION_DETAIL_URL,
query_parameters={'format': 'json'},
kwargs={'pk': p.pk})
self.without_login_and_with_login_test(url, after_status=302)
self.logout()
url = self.reverse(PUBLICATION_DETAIL_URL, query_parameters={
'format': 'json'}, kwargs={'pk': 999999})
self.without_login_and_with_login_test(url, after_status=404)
@patch('citation.views.PublicationSerializer')
def test_publication_detail_save_uses_publication_serializer(self, publication_serializer):
class MockPublicationSerializer:
def is_valid(self):
return True
def save(self, user=None):
return Publication()
@property
def data(self):
return {}
publication_serializer.return_value = MockPublicationSerializer()
container = self.create_container(name='Econometrica')
container.save()
p = self.create_publication(title='Akjhdfjk kjjd', added_by=self.user, container=container)
p.save()
self.login()
url = p.get_absolute_url()
response = self.put('{}?format=json'.format(url), json.dumps({}), content_type="application/json")
self.assertEqual(200, response.status_code)
class SearchViewTest(BaseTest):
def test_search_with_no_query_parameters(self):
self.without_login_and_with_login_test(
self.reverse(HAYSTACK_SEARCH_URL))
def test_search_with_all_query_parameters(self):
query_parameters = {
'q': '',
'publication_start_date': '1/1/2014',
'publication_end_date': '1/1/2015',
'contact_email': True,
'status': Publication.Status.REVIEWED,
'journal': 'ECOLOGICAL MODELLING',
'tags': 'Agriculture',
'authors': 'Guiller',
'assigned_curator': 'yhsieh22',
'flagged': False,
'is_archived': True
}
self.logout()
print("Query Parameter", query_parameters)
url = self.reverse(
HAYSTACK_SEARCH_URL, query_parameters=query_parameters)
self.without_login_and_with_login_test(url)
# Test to verify if it returns same output list or not
p = SearchQuerySet().filter(is_primary=True, date_published__gte='2014-01-01T00:00:00Z',
date_published__lte='2015-01-01T00:00:00Z',
status=Publication.Status.REVIEWED, container__name='ECOLOGICAL MODELLING',
authors='Guiller', assigned_curator='yhsieh22', flagged=False,
is_archived=True).count()
self.login()
url = self.reverse(HAYSTACK_SEARCH_URL)
response = self.client.get(
url + "?q=&publication_start_date=1%2F1%2F2014&publication_end_date=1%2F1%2F2015&status=&journal=\
ECOLOGICAL+MODELLING&tags=Agriculture&authors=Guiller&assigned_curator=yhsieh22&flagged=False&is_archived=True")
object_count = response.context['object_list']
self.assertEqual(200, response.status_code)
if p < 25 or len(object_count) < 25:
self.assertEquals(p, len(object_count))
def test_search_with_few_query_parameters(self):
query_parameters = {
'q': '',
'publication_start_date': '1/1/2014',
'publication_end_date': '1/1/2015',
'contact_email': 'on',
'status': Publication.Status.UNREVIEWED
}
url = self.reverse(
HAYSTACK_SEARCH_URL, query_parameters=query_parameters)
self.without_login_and_with_login_test(url)
class ContactViewTest(BaseTest):
def test_contact_view(self):
self.without_login_and_with_login_test(
self.reverse(CONTACT_US_URL), before_status=200)
@patch('catalog.core.views.ContactFormSerializer.save', return_value=None)
@patch('catalog.core.views.ContactFormSerializer.is_valid', return_value=True)
@patch('catalog.core.views.ContactFormSerializer.data', new_callable=mock.PropertyMock)
def test_contact_info_route_is_validated(self, data, is_valid, save):
data.return_value = {}
url = self.reverse(CONTACT_US_URL, query_parameters={'format': 'json'})
self.post(url, {})
data.assert_any_call()
is_valid.assert_any_call()
save.assert_any_call()
| gpl-3.0 | 7,917,445,301,192,095,000 | 39.09127 | 124 | 0.623082 | false |
california-civic-data-coalition/django-calaccess-processed-data | calaccess_processed_elections/proxies/opencivicdata/__init__.py | 1 | 1911 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Proxy models for augmenting our source data tables with methods useful for processing.
"""
from .core import (
OCDDivisionProxy,
OCDMembershipProxy,
OCDOrganizationProxy,
OCDOrganizationIdentifierProxy,
OCDOrganizationNameProxy,
OCDJurisdictionProxy,
OCDPersonProxy,
OCDPersonIdentifierProxy,
OCDPersonNameProxy,
OCDPostProxy
)
from .elections import (
OCDBallotMeasureContestProxy,
OCDBallotMeasureContestIdentifierProxy,
OCDBallotMeasureContestOptionProxy,
OCDBallotMeasureContestSourceProxy,
OCDCandidateContestProxy,
OCDCandidateContestPostProxy,
OCDCandidateContestSourceProxy,
OCDCandidacyProxy,
OCDCandidacySourceProxy,
OCDElectionProxy,
OCDElectionIdentifierProxy,
OCDElectionSourceProxy,
OCDPartyProxy,
OCDRetentionContestProxy,
OCDRetentionContestIdentifierProxy,
OCDRetentionContestOptionProxy,
OCDRetentionContestSourceProxy,
)
__all__ = (
'RawFilerToFilerTypeCdManager',
'OCDDivisionProxy',
'OCDJurisdictionProxy',
'OCDMembershipProxy',
'OCDOrganizationProxy',
'OCDOrganizationIdentifierProxy',
'OCDOrganizationNameProxy',
'OCDPersonProxy',
'OCDPersonIdentifierProxy',
'OCDPersonNameProxy',
'OCDPostProxy',
"OCDBallotMeasureContestProxy",
"OCDBallotMeasureContestIdentifierProxy",
"OCDBallotMeasureContestOptionProxy",
"OCDBallotMeasureContestSourceProxy",
"OCDCandidateContestProxy",
"OCDCandidateContestPostProxy",
"OCDCandidateContestSourceProxy",
"OCDCandidacyProxy",
"OCDCandidacySourceProxy",
"OCDElectionProxy",
"OCDElectionIdentifierProxy",
"OCDElectionSourceProxy",
"OCDPartyProxy",
"OCDRetentionContestProxy",
"OCDRetentionContestIdentifierProxy",
"OCDRetentionContestOptionProxy",
"OCDRetentionContestSourceProxy"
)
| mit | -7,379,257,993,281,909,000 | 27.522388 | 86 | 0.761905 | false |
tisnik/fabric8-analytics-common | integration-tests/features/steps/authorization.py | 1 | 3810 | """Test steps that are related to authorization tokens for the server API and jobs API."""
import os
import datetime
import json
from behave import when, then
import jwt
from jwt.contrib.algorithms.pycrypto import RSAAlgorithm
DEFAULT_AUTHORIZATION_TOKEN_FILENAME = "private_key.pem"
# try to register the SHA256 algorithm, but because the algorithm might be
# already registered (which is ok), don't fail in such situation
try:
jwt.register_algorithm('RS256', RSAAlgorithm(RSAAlgorithm.SHA256))
except ValueError as e:
print("Warning: the following exception occured during SHA256 algorithm " +
"initialization: {msg}".format(msg=e))
@then('I should get the proper authorization token')
def is_proper_authorization_token_for_server_api(context):
"""Check if the test has any authorization token for server API."""
assert context.token is not None
# TODO: check the token content
@then('I should get the proper job API authorization token')
def is_proper_authorization_token_for_jobs_api(context):
"""Check if the test has any authorization token for the Jobs API."""
assert context.jobs_api_token is not None
# TODO: check the token content
@when('I acquire the authorization token')
def acquire_authorization_token(context):
"""Acquire the authorization token.
The token is read from the environment variable or is to be generated from
the given .pem file (private key).
Alternatively the REFRESH_TOKEN (offline token) can be used to get
the temporary access token - it should be done just once in environment.py.
"""
recommender_token = os.environ.get("RECOMMENDER_API_TOKEN")
# log.info ("TOKEN: {}\n\n".format(recommender_token))
# if access_token has been acquired via refresh/offline token, let's use it
# (and don't call AUTH API several times - it is not encouraged)
if context.access_token is not None:
context.token = context.access_token
elif recommender_token is not None:
context.token = recommender_token
else:
generate_authorization_token(context, DEFAULT_AUTHORIZATION_TOKEN_FILENAME)
@when('I generate authorization token from the private key {private_key}')
def generate_authorization_token(context, private_key):
"""Generate authorization token from the private key."""
expiry = datetime.datetime.utcnow() + datetime.timedelta(days=90)
userid = "testuser"
path_to_private_key = 'data/{private_key}'.format(private_key=private_key)
# initial value
context.token = None
with open(path_to_private_key) as fin:
private_key = fin.read()
payload = {
'exp': expiry,
'iat': datetime.datetime.utcnow(),
'sub': userid,
'username': userid
}
token = jwt.encode(payload, key=private_key, algorithm='RS256')
decoded = token.decode('utf-8')
# print(decoded)
context.token = decoded
@then("I should get API token")
def check_api_token(context):
"""Check the API token existence."""
try:
j = json.loads(context.kerb_request)
except ValueError:
print(context.kerb_request)
raise
assert j["token"]
@when("I acquire the use_key for 3scale")
def acquire_user_key_for_3scale(context):
"""Acquire the user_key for 3scale.
The user_key is read from the environment variable
"""
three_scale_preview_user_key = os.environ.get("THREE_SCALE_PREVIEW_USER_KEY")
context.three_scale_preview_user_key = three_scale_preview_user_key
@then('I should get the proper user_key')
def is_proper_user_key_for_three_scale_preview(context):
"""Check if the test has user_key for three scale API."""
assert context.three_scale_preview_user_key is not None
# TODO: check the key content
| apache-2.0 | -1,218,336,546,389,011,700 | 33.954128 | 90 | 0.698163 | false |
qsnake/h5py | h5py/tests/test_h5p.py | 1 | 3238 | #+
#
# This file is part of h5py, a low-level Python interface to the HDF5 library.
#
# Copyright (C) 2008 Andrew Collette
# http://h5py.alfven.org
# License: BSD (See LICENSE.txt for full license)
#
# $Date$
#
#-
import unittest
import numpy
from h5py import *
HDFNAME = 'attributes.hdf5'
TYPES = {h5p.FILE_CREATE: h5p.PropFCID,
h5p.FILE_ACCESS: h5p.PropFAID,
h5p.DATASET_CREATE: h5p.PropDCID,
h5p.DATASET_XFER: h5p.PropDXID }
class TestH5P(unittest.TestCase):
def test_create_get_class(self):
for typecode, cls in TYPES.iteritems():
instance = h5p.create(typecode)
self.assertEqual(type(instance), cls)
self.assert_(instance.get_class().equal(typecode))
class TestFCID(unittest.TestCase):
def setUp(self):
self.p = h5p.create(h5p.FILE_CREATE)
def test_version(self):
vers = self.p.get_version()
self.assertEqual(len(vers), 4)
def test_userblock(self):
for size in (512,1024,2048):
self.p.set_userblock(size)
self.assertEqual(self.p.get_userblock(), size)
def test_sizes(self):
sizes = [(2,4), (8,16)]
for a, s in sizes:
self.p.set_sizes(a,s)
self.assertEqual(self.p.get_sizes(), (a,s))
class TestFAID(unittest.TestCase):
CLOSE_DEGREES = (h5f.CLOSE_WEAK,
h5f.CLOSE_SEMI,
h5f.CLOSE_STRONG,
h5f.CLOSE_DEFAULT)
def setUp(self):
self.p = h5p.create(h5p.FILE_ACCESS)
def test_fclose_degree(self):
for deg in self.CLOSE_DEGREES:
self.p.set_fclose_degree(deg)
self.assertEqual(self.p.get_fclose_degree(), deg)
def test_fapl_core(self):
settings = (2*1024*1024, 1)
self.p.set_fapl_core(*settings)
self.assertEqual(self.p.get_fapl_core(), settings)
def test_sieve(self):
self.p.get_sieve_buf_size()
self.p.set_sieve_buf_size(128*1024)
self.assertEqual(self.p.get_sieve_buf_size(), 128*1024)
class TestDCID(unittest.TestCase):
LAYOUTS = (h5d.COMPACT,
h5d.CONTIGUOUS,
h5d.CHUNKED)
CHUNKSIZES = ((1,), (4,4), (16,32,4))
def setUp(self):
self.p = h5p.create(h5p.DATASET_CREATE)
def test_layout(self):
for l in self.LAYOUTS:
self.p.set_layout(l)
self.assertEqual(self.p.get_layout(), l)
def test_chunk(self):
for c in self.CHUNKSIZES:
self.p.set_chunk(c)
self.assertEqual(self.p.get_chunk(), c)
def test_fill_value(self):
vals = [ numpy.array(1.0), numpy.array(2.0), numpy.array(4, dtype='=u8'),
numpy.array( (1,2,3.5+6j), dtype=[('a','<i4'),('b','=f8'),('c','<c16')] )]
self.assertEqual(self.p.fill_value_defined(), h5d.FILL_VALUE_DEFAULT)
for val in vals:
self.p.set_fill_value(val)
holder = numpy.ndarray(val.shape, val.dtype)
self.p.get_fill_value(holder)
self.assertEqual(holder, val)
self.assertEqual(self.p.fill_value_defined(), h5d.FILL_VALUE_USER_DEFINED)
class TestDXID(unittest.TestCase):
pass
| bsd-3-clause | -5,724,678,001,424,972,000 | 25.983333 | 91 | 0.582458 | false |
PSU-OIT-ARC/django-local-settings | setup.py | 1 | 2003 | import sys
from setuptools import find_packages, setup
py_version = sys.version_info[:2]
py_version_dotted = '{0.major}.{0.minor}'.format(sys.version_info)
supported_py_versions = ('2.7', '3.3', '3.4', '3.5', '3.6')
if py_version_dotted not in supported_py_versions:
sys.stderr.write('WARNING: django-local-settings does not officially support Python ')
sys.stderr.write(py_version_dotted)
sys.stderr.write('\n')
with open('VERSION') as version_fp:
VERSION = version_fp.read().strip()
with open('README.md') as readme_fp:
long_description = readme_fp.read()
install_requires = [
'six',
]
if py_version < (3, 0):
install_requires.append('configparser')
# NOTE: Keep this Django version up to date with the latest Django
# release that works for the versions of Python we support.
# This is used to get up and running quickly; tox is used to test
# all supported Python/Django version combos.
if py_version == (3, 3):
django_spec = 'django>=1.8,<1.9',
else:
django_spec = 'django>=1.10,<1.11',
setup(
name='django-local-settings',
version=VERSION,
author='Wyatt Baldwin',
author_email='[email protected]',
url='https://github.com/PSU-OIT-ARC/django-local-settings',
description='A system for dealing with local settings in Django projects',
long_description=long_description,
packages=find_packages(),
install_requires=install_requires,
extras_require={
'dev': [
'coverage>=4',
django_spec,
'flake8',
'tox>=2.6.0',
],
},
classifiers=[
'Development Status :: 4 - Beta',
'Framework :: Django',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
] + [
'Programming Language :: Python :: {v}'.format(v=v)for v in supported_py_versions
],
entry_points="""
[console_scripts]
make-local-settings = local_settings:make_local_settings
""",
)
| mit | 6,929,469,746,565,898,000 | 26.438356 | 90 | 0.630055 | false |
rbrnk/phpffl-gamestats | update_player_ids.py | 1 | 3644 | import sys
import MySQLdb
import MySQLdb.cursors
import json
import nflgame
import os
import config as c
db = MySQLdb.connect(host=c.dbhost, user=c.dbuser, passwd=c.dbpass, db=c.dbname, cursorclass=MySQLdb.cursors.DictCursor)
# File to save a lookup table to be used for nflgame -> cbs player id lookup
lookupfile = os.path.dirname(os.path.abspath(__file__))+"/playerids.txt"
nflids = dict()
new_adds = 0
def main():
global nflids
cur = db.cursor()
nflids = load_nflids()
nomatch = 0
done = 0
players = nflgame.players
active = " and active = 'yes'"
#active = ""
for p in players:
first = players[p].first_name.replace(".","")
last = players[p].last_name
pos = players[p].position
team = players[p].team
status = players[p].status
# print "%s %s" % (first, last)
# if status != "ACT":
# continue
if nflids.get(str(p)) is not None:
done += 1
continue
cur.execute("Select ID from players where replace(firstname,'.','') like '%%%s%%' and lastname = '%s' %s" % (first.replace("'","''"),last.replace("'","''"),active))
query = ("Select ID from players where replace(firstname,'.','') like '%%%s%%' and lastname = '%s' %s" % (first.replace("'","''"),last.replace("'","''"),active))
if cur.rowcount == 0: # Not found in phpffl players table
print "No match in phpffl using: ( %s+%s ) nfl_id: %s" % (first, last, p)
nomatch += 1
continue
elif cur.rowcount == 1: # Found using firstname and lastname
row = cur.fetchone()
AddID(p, row["ID"])
else: # Found too many, add position to query
cur.execute("Select ID from players where replace(firstname,'.','') like '%%%s%%' and lastname = '%s' and positionID = '%s' %s" % (first.replace("'","''"),last.replace("'","''"),pos,active))
if cur.rowcount == 0: # Not found after adding pos in phpffl players table, must be a duplicate
print 'Duplicate in phpffl using: ( %s+%s ) nfl_id: %s' % (first,last,p)
continue
elif cur.rowcount == 1: # Found using firstname, lastname, and pos
row = cur.fetchone()
AddID(p, row["ID"])
else: # Found too many, add team to query
if pos == None:
cur.execute("Select ID from players where replace(firstname,'.','') like '%%%s%%' and lastname = '%s' and teamID = '%s' %s" % (first.replace("'","''"),last.replace("'","''"),team,active))
else:
cur.execute("Select ID from players where replace(firstname,'.','') like '%%%s%%' and lastname = '%s' and positionID = '%s' and teamID = '%s' %s" % (first.replace("'","''"),last.replace("'","''"),pos,team,active))
if cur.rowcount == 1: # Found using firstname, lastname, pos, and team
row = cur.fetchone()
AddID(p, row["ID"])
else: # Not found and is and is a duplicate
print 'Duplicate in phpffl using: ( %s+%s+POS:"%s" ) nfl_id: %s' % (first,last,pos,p)
continue
save_nflids()
print
print "-----------------------------------"
print "Newly added: %s" % (str(new_adds))
print "Already exist: %s" % str(done)
print "No match in phpffl: %s" % str(nomatch)
def load_nflids():
ids = dict()
if not os.path.isfile(lookupfile):
open(lookupfile, 'a').close()
with open(lookupfile) as f:
for line in f:
nfl_id,phpffl_id = line.strip().split(",")
ids[nfl_id] = phpffl_id
return ids
def save_nflids():
with open(lookupfile,'w') as f:
for n in nflids:
f.write(n+','+nflids[n]+'\n')
def AddID(nfl_id, phpffl_id):
global new_adds
nflids[nfl_id] = phpffl_id
new_adds += 1
main()
| mit | -5,664,523,390,465,720,000 | 36.958333 | 223 | 0.587267 | false |
kichkasch/gxpphonetools | gxpphonewatcher/SIPWatcher.py | 1 | 1871 | #! /usr/bin/python
#
# SIP Watcher
#
# Michael Pilgermann ([email protected])
# Version 0.1 (2008-10-31)
#
#
#import sys
import pjsua as pj
import WatcherApplet
import threading
import gtk
import thread
# Globals
#current_call = None
acc = None
acc_cb = None
#gui = None
# Callback to receive events from account
class MyAccountCallback(pj.AccountCallback):
global acc
gui = None
sem = None
def __init__(self, account, gui):
pj.AccountCallback.__init__(self, account)
self.gui = gui
# print self.__dict__
def wait(self):
self.sem = threading.Semaphore(0)
self.sem.acquire()
def on_incoming_call(self, call):
print "Incoming call from ", call.info().remote_uri
self.gui.showEvent("Test", 10)
if self.sem:
self.sem.release()
def on_reg_state(self):
print "Status of account changed to ", acc.info().reg_reason
def startupSipBackend(gui):
global acc
global acc_cb
try:
lib = pj.Lib()
lib.init(log_cfg = None)
transport = lib.create_transport(pj.TransportType.UDP, pj.TransportConfig(5060))
lib.start()
acc_cfg = pj.AccountConfig("030.sip.arcor.de", "03053140698", "67631411")
acc = lib.create_account(acc_cfg)
acc_cb = MyAccountCallback(acc, gui)
acc.set_callback(acc_cb)
acc.set_basic_status(1)
while gui.up:
acc_cb.wait()
acc.delete()
lib.destroy()
except pj.Error, err:
print 'Error creating account:', err
def shutdownSipBackend():
acc_cb.sem.release()
gtk.gdk.threads_init()
gui = WatcherApplet.Gui()
thread.start_new_thread(startupSipBackend, (gui,))
gtk.gdk.threads_enter()
gui.initGui()
gtk.gdk.threads_leave()
shutdownSipBackend()
| gpl-3.0 | -8,859,535,708,912,758,000 | 22.098765 | 88 | 0.609834 | false |
jabesq/home-assistant | homeassistant/components/notion/__init__.py | 1 | 9994 | """Support for Notion."""
import asyncio
import logging
from aionotion import async_get_client
from aionotion.errors import InvalidCredentialsError, NotionError
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import ATTR_ATTRIBUTION, CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import (
aiohttp_client, config_validation as cv, device_registry as dr)
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect, async_dispatcher_send)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_time_interval
from .config_flow import configured_instances
from .const import (
DATA_CLIENT, DEFAULT_SCAN_INTERVAL, DOMAIN, TOPIC_DATA_UPDATE)
_LOGGER = logging.getLogger(__name__)
ATTR_SYSTEM_MODE = 'system_mode'
ATTR_SYSTEM_NAME = 'system_name'
DATA_LISTENER = 'listener'
DEFAULT_ATTRIBUTION = 'Data provided by Notion'
SENSOR_BATTERY = 'low_battery'
SENSOR_DOOR = 'door'
SENSOR_GARAGE_DOOR = 'garage_door'
SENSOR_LEAK = 'leak'
SENSOR_MISSING = 'missing'
SENSOR_SAFE = 'safe'
SENSOR_SLIDING = 'sliding'
SENSOR_SMOKE_CO = 'alarm'
SENSOR_TEMPERATURE = 'temperature'
SENSOR_WINDOW_HINGED_HORIZONTAL = 'window_hinged_horizontal'
SENSOR_WINDOW_HINGED_VERTICAL = 'window_hinged_vertical'
BINARY_SENSOR_TYPES = {
SENSOR_BATTERY: ('Low Battery', 'battery'),
SENSOR_DOOR: ('Door', 'door'),
SENSOR_GARAGE_DOOR: ('Garage Door', 'garage_door'),
SENSOR_LEAK: ('Leak Detector', 'moisture'),
SENSOR_MISSING: ('Missing', 'connectivity'),
SENSOR_SAFE: ('Safe', 'door'),
SENSOR_SLIDING: ('Sliding Door/Window', 'door'),
SENSOR_SMOKE_CO: ('Smoke/Carbon Monoxide Detector', 'smoke'),
SENSOR_WINDOW_HINGED_HORIZONTAL: ('Hinged Window', 'window'),
SENSOR_WINDOW_HINGED_VERTICAL: ('Hinged Window', 'window'),
}
SENSOR_TYPES = {
SENSOR_TEMPERATURE: ('Temperature', 'temperature', '°C'),
}
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
})
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass, config):
"""Set up the Notion component."""
hass.data[DOMAIN] = {}
hass.data[DOMAIN][DATA_CLIENT] = {}
hass.data[DOMAIN][DATA_LISTENER] = {}
if DOMAIN not in config:
return True
conf = config[DOMAIN]
if conf[CONF_USERNAME] in configured_instances(hass):
return True
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={'source': SOURCE_IMPORT},
data={
CONF_USERNAME: conf[CONF_USERNAME],
CONF_PASSWORD: conf[CONF_PASSWORD]
}))
return True
async def async_setup_entry(hass, config_entry):
"""Set up Notion as a config entry."""
session = aiohttp_client.async_get_clientsession(hass)
try:
client = await async_get_client(
config_entry.data[CONF_USERNAME],
config_entry.data[CONF_PASSWORD],
session)
except InvalidCredentialsError:
_LOGGER.error('Invalid username and/or password')
return False
except NotionError as err:
_LOGGER.error('Config entry failed: %s', err)
raise ConfigEntryNotReady
notion = Notion(hass, client, config_entry.entry_id)
await notion.async_update()
hass.data[DOMAIN][DATA_CLIENT][config_entry.entry_id] = notion
for component in ('binary_sensor', 'sensor'):
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(
config_entry, component))
async def refresh(event_time):
"""Refresh Notion sensor data."""
_LOGGER.debug('Refreshing Notion sensor data')
await notion.async_update()
async_dispatcher_send(hass, TOPIC_DATA_UPDATE)
hass.data[DOMAIN][DATA_LISTENER][
config_entry.entry_id] = async_track_time_interval(
hass,
refresh,
DEFAULT_SCAN_INTERVAL)
return True
async def async_unload_entry(hass, config_entry):
"""Unload a Notion config entry."""
hass.data[DOMAIN][DATA_CLIENT].pop(config_entry.entry_id)
cancel = hass.data[DOMAIN][DATA_LISTENER].pop(config_entry.entry_id)
cancel()
for component in ('binary_sensor', 'sensor'):
await hass.config_entries.async_forward_entry_unload(
config_entry, component)
return True
async def register_new_bridge(hass, bridge, config_entry_id):
"""Register a new bridge."""
device_registry = await dr.async_get_registry(hass)
device_registry.async_get_or_create(
config_entry_id=config_entry_id,
identifiers={
(DOMAIN, bridge['hardware_id'])
},
manufacturer='Silicon Labs',
model=bridge['hardware_revision'],
name=bridge['name'] or bridge['id'],
sw_version=bridge['firmware_version']['wifi']
)
class Notion:
"""Define a class to handle the Notion API."""
def __init__(self, hass, client, config_entry_id):
"""Initialize."""
self._client = client
self._config_entry_id = config_entry_id
self._hass = hass
self.bridges = {}
self.sensors = {}
self.tasks = {}
async def async_update(self):
"""Get the latest Notion data."""
tasks = {
'bridges': self._client.bridge.async_all(),
'sensors': self._client.sensor.async_all(),
'tasks': self._client.task.async_all(),
}
results = await asyncio.gather(*tasks.values(), return_exceptions=True)
for attr, result in zip(tasks, results):
if isinstance(result, NotionError):
_LOGGER.error(
'There was an error while updating %s: %s', attr, result)
continue
holding_pen = getattr(self, attr)
for item in result:
if attr == 'bridges' and item['id'] not in holding_pen:
# If a new bridge is discovered, register it:
self._hass.async_create_task(
register_new_bridge(
self._hass, item, self._config_entry_id))
holding_pen[item['id']] = item
class NotionEntity(Entity):
"""Define a base Notion entity."""
def __init__(
self,
notion,
task_id,
sensor_id,
bridge_id,
system_id,
name,
device_class):
"""Initialize the entity."""
self._async_unsub_dispatcher_connect = None
self._attrs = {ATTR_ATTRIBUTION: DEFAULT_ATTRIBUTION}
self._bridge_id = bridge_id
self._device_class = device_class
self._name = name
self._notion = notion
self._sensor_id = sensor_id
self._state = None
self._system_id = system_id
self._task_id = task_id
@property
def available(self):
"""Return True if entity is available."""
return self._task_id in self._notion.tasks
@property
def device_class(self):
"""Return the device class."""
return self._device_class
@property
def device_state_attributes(self) -> dict:
"""Return the state attributes."""
return self._attrs
@property
def device_info(self):
"""Return device registry information for this entity."""
bridge = self._notion.bridges[self._bridge_id]
sensor = self._notion.sensors[self._sensor_id]
return {
'identifiers': {
(DOMAIN, sensor['hardware_id'])
},
'manufacturer': 'Silicon Labs',
'model': sensor['hardware_revision'],
'name': sensor['name'],
'sw_version': sensor['firmware_version'],
'via_device': (DOMAIN, bridge['hardware_id'])
}
@property
def name(self):
"""Return the name of the sensor."""
return '{0}: {1}'.format(
self._notion.sensors[self._sensor_id]['name'], self._name)
@property
def should_poll(self):
"""Disable entity polling."""
return False
@property
def unique_id(self):
"""Return a unique, unchanging string that represents this sensor."""
task = self._notion.tasks[self._task_id]
return '{0}_{1}'.format(self._sensor_id, task['task_type'])
async def _update_bridge_id(self):
"""Update the entity's bridge ID if it has changed.
Sensors can move to other bridges based on signal strength, etc.
"""
sensor = self._notion.sensors[self._sensor_id]
if self._bridge_id == sensor['bridge']['id']:
return
self._bridge_id = sensor['bridge']['id']
device_registry = await dr.async_get_registry(self.hass)
bridge = self._notion.bridges[self._bridge_id]
bridge_device = device_registry.async_get_device(
{DOMAIN: bridge['hardware_id']}, set())
this_device = device_registry.async_get_device(
{DOMAIN: sensor['hardware_id']})
device_registry.async_update_device(
this_device.id, via_device_id=bridge_device.id)
async def async_added_to_hass(self):
"""Register callbacks."""
@callback
def update():
"""Update the entity."""
self.hass.async_create_task(self._update_bridge_id())
self.async_schedule_update_ha_state(True)
self._async_unsub_dispatcher_connect = async_dispatcher_connect(
self.hass, TOPIC_DATA_UPDATE, update)
async def async_will_remove_from_hass(self):
"""Disconnect dispatcher listener when removed."""
if self._async_unsub_dispatcher_connect:
self._async_unsub_dispatcher_connect()
| apache-2.0 | 4,869,603,951,443,741,000 | 31.444805 | 79 | 0.61373 | false |
dylanseago/CommunityFund | server/communityfund/apps/home/models.py | 1 | 2355 | from django.db import models
from django.contrib.auth.models import User
from datetime import datetime
from django.core.urlresolvers import reverse
from django.db.models import Sum
from communityfund.apps.home.templatetags.custom_tags import currency_filter
class DatedModel(models.Model):
created_on = models.DateTimeField(auto_now_add=True)
modified_on = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
class Community(DatedModel):
creator = models.ForeignKey(User, related_name="communities_created")
name = models.TextField(max_length=100)
description = models.TextField(max_length=1000)
subscribers = models.ManyToManyField(User, related_name="subscriptions")
def get_absolute_url(self):
return reverse('community', args=[str(self.id)])
def __str__(self):
return self.name
class Meta:
verbose_name_plural = "Communities"
class Project(DatedModel):
initiator = models.ForeignKey(User, related_name="projects_created")
community = models.ForeignKey(Community, related_name="projects")
name = models.TextField(max_length=100)
summary = models.TextField(max_length=250)
about = models.TextField(max_length=20000)
goal = models.PositiveIntegerField()
deadline = models.DateTimeField()
@property
def num_funded(self):
return self.fundings.count()
@property
def amount_funded(self):
amount_funded = self.fundings.all().aggregate(Sum('amount'))['amount__sum']
return amount_funded if amount_funded else 0
@property
def percent_funded(self):
return round((self.amount_funded / self.goal) * 100)
@property
def goal_reached(self):
return self.amount_funded >= self.goal
@property
def over(self):
return datetime.now() > self.time_to_fund
def get_absolute_url(self):
return reverse('project', args=[str(self.id)])
def __str__(self):
return self.name
class Funding(models.Model):
project = models.ForeignKey(Project, related_name="fundings")
user = models.ForeignKey(User, related_name="fundings")
amount = models.PositiveIntegerField()
funded_on = models.DateTimeField(auto_now_add=True)
def __str__(self):
return '{} by {}'.format(currency_filter(self.amount), self.user.get_full_name()) | apache-2.0 | -5,548,570,505,306,578,000 | 30.413333 | 89 | 0.692994 | false |
mithro/googleplus2wordpress | tests.py | 1 | 12594 | #!/usr/bin/python2.7
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test module for plus.py,
You can run the test by running:
$ python -m tests
"""
__author__ = '[email protected]'
import json
import os
try:
import unittest2 as unittest
except ImportError:
import unittest
from mock import patch, MagicMock, Mock
class TestGooglePost(unittest.TestCase):
maxDiff = None
def setUp(self):
self.oauth2client_mock = MagicMock()
self.oauth2client_mock.flow_from_clientsecrets = Mock()
self.config = MagicMock()
self.config.EMBEDLY_KEY = ''
modules = {
'oauth2client.client': self.oauth2client_mock,
'oauth2client.flow_from_clientsecrets':
self.oauth2client_mock.flow_from_clientsecrets,
'config': self.config
}
self.module_patcher = patch.dict('sys.modules', modules)
self.module_patcher.start()
self.maxDiff = None
def tearDown(self):
self.module_patcher.stop()
def load_data(self, filename, type="json"):
"""
Load the file data, from json into gdata.
"""
file_ = open(os.path.join(
os.path.dirname(__file__),
"test_documents", filename))
content = file_.read()
file_.close()
if type == 'json':
return json.loads(content)
return content
def do_test_equal(self, post_class, filename, result,
method=None, equal_function='assertEqual'):
"""Helper for test equal"""
gdata = self.load_data(filename)
gid = ''
post = post_class(gid, gdata)
post.render()
result_tmpl = getattr(post, method, 'ERROR')() if method else post.content.strip()
getattr(self, equal_function, None)(
result,
result_tmpl)
def mock_embedly(self, expected_return_value):
"""Mock embedly object"""
import plus
if not isinstance(expected_return_value, (list, tuple)):
expected_return_value = [expected_return_value, ]
plus.OEMBED_CONSUMER = MagicMock()
embed = MagicMock()
embed.getData = MagicMock(side_effect=expected_return_value)
plus.OEMBED_CONSUMER.embed.return_value = embed
class TestPhoto(TestGooglePost):
def test_photo_from_google_plus(self):
from plus import PhotoPost
#we need to strip, since the render add
result = """<img class="alignnone" src="https://images0-focus-opensocial.googleusercontent.com/gadgets/proxy?container=focus&gadget=a&resize_h=100&url=https%3A%2F%2Flh5.googleusercontent.com%2F-YhGQ2IKWJok%2FUDR4WL8APXI%2FAAAAAAAAAOI%2FdjbWuClePMk%2Fs0-d%2F14-05-07_1132.jpg" alt="">"""
self.mock_embedly({})
self.do_test_equal(PhotoPost, 'sample_pic_without_content.json', result)
def test_photo_from_picasa_web(self):
from plus import PhotoPost
result = """<img class="alignnone" src="https://images0-focus-opensocial.googleusercontent.com/gadgets/proxy?container=focus&gadget=a&resize_h=100&url=https%3A%2F%2Flh6.googleusercontent.com%2F-D0vjgEuIKFM%2FT-rhhY-iBbI%2FAAAAAAAAIJw%2FSUL6I7p1Sh4%2Fw288-h288%2FSkyline%252BWinterfest.jpg" alt="">"""
self.mock_embedly({})
self.do_test_equal(PhotoPost, 'sample_picasa.json', result)
def test_photo_from_flickr(self):
from plus import PhotoPost
result = """<img class="alignnone" src="http://farm8.staticflickr.com/7061/6987228783_2b951598c9_s.jpg" alt="Infinity London Underground EXPLORED #1 My Top 40 Click Best viewed hereClick Please check out my new group City and Architecture No images or links in comments, many thanks!!!">"""
self.mock_embedly(self.load_data('embedly_flickr.json'))
self.do_test_equal(PhotoPost, 'sample_pic_flickr_without_content.json', result)
def test_photo_from_smugmug(self):
from plus import PhotoPost
result = """<img class="alignnone" src="http://fotoeffects.smugmug.com/Daily-shots-for-the-dailies/Dailies/i-VNkmwF6/0/M/DSC6450-M.jpg" alt="">"""
self.mock_embedly(self.load_data('embedly_smugmug.json'))
self.do_test_equal(PhotoPost, 'sample_smugmug.json', result)
class TestVideo(TestGooglePost):
def test_video_youtube(self):
from plus import VideoPost as Post
self.do_test_equal(Post,
'sample_video_youtube.json',
'http://www.youtube.com/watch?v=SF1Tndsfobc')
def test_video_blip_tv(self):
from plus import VideoPost as Post
self.do_test_equal(Post,
'sample_video_blip.json',
'http://blip.tv/pycon-us-videos-2009-2010-2011/pycon-2011-python-ides-panel-4901374')
def test_video_vimeo(self):
from plus import VideoPost as Post
self.do_test_equal(Post,
'sample_video_vimeo.json',
'http://www.vimeo.com/20743963')
class TestMultiple(TestGooglePost):
def test_multiple_photos(self):
from plus import GalleryPost as Post
self.mock_embedly(self.load_data('embedly_multiple_photos.json', type='json'))
result = self.load_data("result_multiple_photos.html", type='html')
self.do_test_equal(Post, 'sample_multi_img.json', result, equal_function='assertMultiLineEqual')
def test_multiple_videos(self):
from plus import GalleryPost as Post
self.mock_embedly(self.load_data("embedly_multiple_videos.json", type='json'))
result = self.load_data('result_multiple_videos.html', type='html')
self.do_test_equal(Post, 'sample_multi_vid.json', result, equal_function='assertMultiLineEqual')
def test_single_linked(self):
from plus import WebPagePost
self.mock_embedly(self.load_data('embedly_single_linked.json', type='json'))
result = self.load_data('result_single_linked.html', type='html')
self.do_test_equal(WebPagePost, 'sample_webpage.json', result, equal_function='assertMultiLineEqual')
def test_multiple_photo_video(self):
from plus import GalleryPost as Post
self.mock_embedly(self.load_data('embedly_multi_photo_video.json', type='json'))
result = self.load_data('result_multi_photo_video.html', type="html")
self.do_test_equal(Post, 'sample_photo_video_content.json', result, equal_function='assertMultiLineEqual')
class TestPhotoContent(TestGooglePost):
def test_photo_from_google_plus(self):
from plus import PhotoPost
result = """<img class="alignnone" src="https://images0-focus-opensocial.googleusercontent.com/gadgets/proxy?container=focus&gadget=a&resize_h=100&url=https%3A%2F%2Flh3.googleusercontent.com%2F-pO-hpo7EM7E%2FTv55RUxDaUI%2FAAAAAAAAAMk%2FW3HP0NZUdjg%2Fw288-h288%2Fcrop.png" alt="">"""
self.mock_embedly({})
self.do_test_equal(PhotoPost, 'sample_pic_with_content.json', result)
def test_photo_from_picasa_web(self):
from plus import PhotoPost
result = """<img class="alignnone" src="https://images0-focus-opensocial.googleusercontent.com/gadgets/proxy?container=focus&gadget=a&resize_h=100&url=https%3A%2F%2Flh5.googleusercontent.com%2F-B-U72k6hExM%2FUAHNU_bEb4I%2FAAAAAAAAHDg%2FhxWdDTvWnNY%2Fw288-h288%2F11-C-611%252B%2525281%252529.jpg" alt="">"""
self.mock_embedly({})
self.do_test_equal(PhotoPost, 'sample_picasa_with_content.json', result)
def test_photo_from_flickr(self):
from plus import PhotoPost
result = """<img class="alignnone" src="http://farm8.staticflickr.com/7061/6987228783_2b951598c9_s.jpg" alt="Infinity London Underground EXPLORED #1 My Top 40 Click Best viewed hereClick Please check out my new group City and Architecture No images or links in comments, many thanks!!!">"""
#self.mock_embedly({})
self.mock_embedly(self.load_data('embedly_flickr_with_content.json'))
self.do_test_equal(PhotoPost, 'sample_pic_flickr_with_content.json', result)
def test_photo_from_smugmug(self):
from plus import PhotoPost
result = """<img class="alignnone" src="http://fotoeffects.smugmug.com/Daily-shots-for-the-dailies/Dailies/i-VNkmwF6/0/M/DSC6450-M.jpg" alt="">"""
self.mock_embedly(self.load_data('embedly_smug_mug_with_content.json'))
self.do_test_equal(PhotoPost, 'sample_smugmug_with_content.json', result)
class TestVideoContent(TestGooglePost):
def test_video_youtube(self):
from plus import VideoPost as Post
self.do_test_equal(Post,
'sample_video_youtube_with_content.json',
'http://www.youtube.com/watch?v=YcFHeTaS9ew')
def test_video_blip_tv(self):
from plus import VideoPost as Post
self.do_test_equal(Post,
'sample_video_blip_with_content.json',
'http://blip.tv/pycon-us-videos-2009-2010-2011/pycon-2011-hidden-treasures-in-the-standard-library-4901130')
def test_video_vimeo(self):
from plus import VideoPost as Post
self.do_test_equal(Post,
'sample_video_vimeo_with_content.json',
'http://www.vimeo.com/1622823')
class TestMultipleContent(TestGooglePost):
def test_multiple_photos(self):
from plus import GalleryPost as Post
self.mock_embedly(self.load_data('embedly_multiple_photos_content.json'))
result = self.load_data('result_multiple_photos_with_content.html', type='html')
self.do_test_equal(Post, 'sample_multi_img_with_content.json', result, equal_function='assertMultiLineEqual')
def test_multiple_videos(self):
from plus import GalleryPost as Post
self.mock_embedly(self.load_data('embedly_multiple_videos_content.json'))
result = self.load_data('result_multiple_videos_content.html', type='html')
self.do_test_equal(Post, 'sample_multi_vid.json', result, equal_function='assertMultiLineEqual')
def test_single_linked(self):
from plus import WebPagePost
result = self.load_data('result_single_linked_content.html', type="html")
self.mock_embedly(self.load_data('embedly_linked_content.json'))
self.do_test_equal(WebPagePost, 'sample_webpage_with_content.json', result, equal_function='assertMultiLineEqual')
class TestShare(TestGooglePost):
def test_share(self):
from plus import TextPost
gdata = self.load_data('sample_share.json')
post = TextPost('', gdata)
self.assertTrue(gdata['object'].get('id', '') != '')
self.assertTrue(gdata['annotation'] != None)
def test_linked_share(self):
from plus import TextPost
gdata = self.load_data('sample_link_share.json')
post = TextPost('', gdata)
post.render()
self.assertIsNotNone(gdata['object']['id'])
self.assertEqual('', gdata['annotation'])
def test_pic_share(self):
from plus import TextPost
gdata = self.load_data('sample_pic_share.json')
post = TextPost('', gdata)
post.render()
self.assertIsNotNone(gdata['object']['id'])
self.assertEqual('', gdata['annotation'])
def test_video_share(self):
from plus import TextPost
gdata = self.load_data('sample_video_share.json')
post = TextPost('', gdata)
post.render()
self.assertIsNotNone(gdata['object']['id'])
self.assertEqual('', gdata['annotation'])
class TestUtils(TestGooglePost):
def test_title_generation(self):
from plus import WebPagePost
self.mock_embedly([{'title': "From mock"}])
gdata = self.load_data('sample_webpage.json')
post = WebPagePost('', gdata)
post.render()
self.assertMultiLineEqual("""From mock""", post.title)
class TestGeocode(TestGooglePost):
def test_post(self):
from plus import PhotoPost
result = self.load_data('result_geocode.html', type='html')
self.mock_embedly({})
self.do_test_equal(PhotoPost, 'sample_pic_with_geocode.json', result, 'render_geocode')
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -3,529,921,760,208,876,000 | 39.365385 | 314 | 0.666905 | false |
JeffRoy/mi-dataset | mi/dataset/driver/flord_l_wfp/sio/test/test_flord_l_wfp_sio_telemetered_driver.py | 1 | 1077 | #!/home/mworden/uframes/ooi/uframe-1.0/python/bin/python
__author__ = 'mworden'
from mi.core.log import get_logger
log = get_logger()
from mi.idk.config import Config
import unittest
import os
from mi.dataset.driver.flord_l_wfp.sio.flord_l_wfp_sio_telemetered_driver import parse
from mi.dataset.dataset_driver import ParticleDataHandler
class DriverTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_one(self):
sourceFilePath = os.path.join('mi','dataset','driver','flord_l_wfp','sio','resource',
'node58p1_0.we_wfp.dat')
particle_data_hdlr_obj = ParticleDataHandler()
particle_data_hdlr_obj = parse(Config().base_dir(), sourceFilePath, particle_data_hdlr_obj)
log.debug("SAMPLES: %s", particle_data_hdlr_obj._samples)
log.debug("FAILURE: %s", particle_data_hdlr_obj._failure)
self.assertEquals(particle_data_hdlr_obj._failure, False)
if __name__ == '__main__':
test = DriverTest('test_one')
test.test_one() | bsd-2-clause | -2,796,201,758,248,631,000 | 24.666667 | 99 | 0.654596 | false |
dpapathanasiou/intelligent-smtp-responder | utils/email_utils.py | 1 | 2452 | #!/usr/bin/python
"""
Utilities for processing the parsed email output of messages sent through the smtp server
"""
import re
from lxml import html as lxml_html
from config import pass_through_mailboxes, action_mailboxes, default_mailbox
from rfc_822_email_address_validator import is_valid_email_address
def valid_email_address (email_address):
"""Confirm that the email address provided is of a valid format"""
result = False
if email_address:
if is_valid_email_address(email_address.strip('<>')):
result = True
return result
def get_email_address (s):
"""Parse out the first email address found in the string and return it"""
for token in s.split():
if token.find('@') > -1:
# token will be in the form:
# 'FROM:<[email protected]>' or 'TO:<[email protected]>'
# and with or without the <>
for email_part in token.split(':'):
if email_part.find('@') > -1:
return email_part.strip('<>')
def domain_recipients_valid (domain_recipients=[]):
"""Confirm that the first email recipient @smtp_server_domain could correspond to a valid project (i.e., it a new project or an int) and return it"""
result = None
try:
if domain_recipients[0] in action_mailboxes.keys() \
or domain_recipients[0] in pass_through_mailboxes \
or default_mailbox is not None:
result = domain_recipients[0]
except IndexError:
pass
return result
subject_prefix_pattern = re.compile('^(Fwd?|Re)(\[?[0-9]*\]?):', re.IGNORECASE) # matches Fwd:/Fw:/Re:/Re[4]: prefixes
def get_base_subject (subject_string):
"""Strip all forward/reply prefixes from the subject and return the base string"""
if not subject_prefix_pattern.search(subject_string):
# there are no Fwd:/Fw:/Re:/Re[4]: prefixes so just return the string as-is
return subject_string
else:
# Strip off the first layer of Fwd:/Fw:/Re:/Re[4]: prefixes, and pass it through again,
# to handle cases such as 'Re: Re[4]: Re:'
return get_base_subject(subject_prefix_pattern.sub('', subject_string).lstrip())
def get_text_from_html (html_string):
"""Return the text from an html string -- needed for cases where there is no 'body' returned from parse(), only 'html'"""
document = lxml_html.document_fromstring(html_string)
return document.text_content()
| mit | -4,512,159,627,556,549,000 | 38.548387 | 153 | 0.650489 | false |
davidrpugh/pyAM | pyam/inputs.py | 1 | 9450 | """
Classes for modeling heterogenous factors of production.
@author : David R. Pugh
@date : 2015-08-06
"""
from __future__ import division
import collections
import numpy as np
from scipy import optimize, special
import sympy as sym
class Input(object):
"""Class representing a heterogenous production input."""
_modules = [{'ImmutableMatrix': np.array, 'erf': special.erf}, 'numpy']
__numeric_cdf = None
__numeric_pdf = None
def __init__(self, var, cdf, bounds, params, alpha=None, measure=1.0):
"""
Create an instance of the Input class.
Parameters
----------
var : sym.Symbol
Symbolic variable representing the production input.
cdf : sym.Basic
Symbolic expression defining a valid probability distribution
function (CDF). Must be a function of var.
bounds : (float, float)
Tuple of floats that should bracket the desired quantile, alpha.
params : dict
Dictionary of distribution parameters.
alpha : float, optional (default=None)
Quantile defining the lower bound on the support of the cumulative
distribution function.
measure : float
The measure of available units of the input.
"""
self.var = var
self.measure = measure # needs to be assigned before cdf is set!
self.cdf = cdf
self.alpha = alpha # needs to be assigned before params are set!
self.lower = bounds[0]
self.upper = bounds[1]
self.params = params
@property
def _numeric_cdf(self):
"""
Vectorized function used to numerically evaluate the CDF.
:getter: Return the lambdified CDF.
:type: function
"""
if self.__numeric_cdf is None:
args = [self.var] + sym.var(list(self.params.keys()))
self.__numeric_cdf = sym.lambdify(args, self.cdf, self._modules)
return self.__numeric_cdf
@property
def _numeric_pdf(self):
"""
Vectorized function used to numerically evaluate the pdf.
:getter: Return the lambdified pdf.
:type: function
"""
if self.__numeric_pdf is None:
args = [self.var] + sym.var(list(self.params.keys()))
self.__numeric_pdf = sym.lambdify(args, self.pdf, self._modules)
return self.__numeric_pdf
@property
def cdf(self):
"""
Cumulative distribution function (CDF).
:getter: Return the current distribution function.
:setter: Set a new distribution function.
:type: sym.Basic
"""
return self._cdf
@cdf.setter
def cdf(self, value):
"""Set a new cumulative distribution function (CDF)."""
self._cdf = self.measure * self._validate_cdf(value) # rescale cdf!
@property
def lower(self):
"""
Lower bound on support of the cumulative distribution function (CDF).
:getter: Return the lower bound.
:setter: Set a new lower bound.
:type: float
"""
return self._lower
@lower.setter
def lower(self, value):
"""Set a new lower bound."""
self._lower = self._validate_lower_bound(value)
@property
def norm_constant(self):
"""
Constant used to normalize the probability density function (pdf).
:getter: Return the current normalization constant.
:type: float
"""
return self.evaluate_cdf(self.upper) - self.evaluate_cdf(self.lower)
@property
def measure(self):
"""
The measure of availale units of the input.
:getter: Return the measure.
:setter: Set a new measure.
:type: float
"""
return self._measure
@measure.setter
def measure(self, value):
"""Set a new lower bound."""
self._measure = self._validate_measure(value)
@property
def params(self):
"""
Dictionary of distribution parameters.
:getter: Return the current parameter dictionary.
:setter: Set a new parameter dictionary.
:type: dict
"""
return self._params
@params.setter
def params(self, value):
"""Set a new parameter dictionary."""
valid_params = self._validate_params(value)
self._params = self._order_params(valid_params)
self._update_bounds(self.lower, self.upper)
@property
def pdf(self):
"""
Probability density function (pdf).
:getter: Return the current probability density function.
:type: sym.Basic
"""
return sym.diff(self.cdf, self.var)
@property
def upper(self):
"""
Upper bound on support of the cumulative distribution function (CDF).
:getter: Return the lower bound.
:setter: Set a new lower bound.
:type: float
"""
return self._upper
@upper.setter
def upper(self, value):
"""Set a new upper bound."""
self._upper = self._validate_upper_bound(value)
@property
def var(self):
"""
Symbolic variable respresenting the production input.
:getter: Return the current variable.
:setter: Set a new variable.
:type: sym.Symbol
"""
return self._var
@var.setter
def var(self, value):
"""Set a new symbolic variable."""
self._var = self._validate_var(value)
@staticmethod
def _order_params(params):
"""Cast a dictionary to an order dictionary."""
return collections.OrderedDict(sorted(params.items()))
@staticmethod
def _validate_cdf(cdf):
"""Validates the probability distribution function (CDF)."""
if not isinstance(cdf, sym.Basic):
mesg = "Attribute 'cdf' must have type sympy.Basic, not {}"
raise AttributeError(mesg.format(cdf.__class__))
else:
return cdf
@staticmethod
def _validate_measure(value):
"""Validate the measure of available input."""
if not isinstance(value, float):
mesg = "Attribute 'measure' must be a float, not {}"
raise AttributeError(mesg.format(value.__class__))
elif value < 0:
mesg = "Attribute 'measure' attribute must be strictly positive."
raise AttributeError(mesg)
else:
return value
@staticmethod
def _validate_params(value):
"""Validate the dictionary of parameters."""
if not isinstance(value, dict):
mesg = "Attribute 'params' must have type dict, not {}"
raise AttributeError(mesg.format(value.__class__))
else:
return value
@staticmethod
def _validate_var(var):
"""Validates the symbolic variable."""
if not isinstance(var, sym.Symbol):
mesg = "Attribute 'var' must have type sympy.Symbol, not {}"
raise AttributeError(mesg.format(var.__class__))
else:
return var
def _validate_upper_bound(self, value):
"""Validate the upper bound on the suppport of the CDF."""
if not isinstance(value, float):
mesg = "Attribute 'upper' must have type float, not {}"
raise AttributeError(mesg.format(value.__class__))
else:
return value
def _find_bound(self, alpha, lower):
"""Find the alpha quantile of the CDF."""
return optimize.newton(self._inverse_cdf, lower, args=(alpha,))
def _inverse_cdf(self, x, alpha):
"""Inverse CDF used to identify the lower and upper bounds."""
return self.evaluate_cdf(x) - alpha
def _update_bounds(self, lower, upper):
if self.alpha is not None:
self.lower = self._find_bound(self.alpha * self.measure, lower)
self.upper = self._find_bound((1 - self.alpha) * self.measure, upper)
def _validate_lower_bound(self, value):
"""Validate the lower bound on the suppport of the CDF."""
if not isinstance(value, float):
mesg = "Attribute 'lower' must have type float, not {}"
raise AttributeError(mesg.format(value.__class__))
else:
return value
def evaluate_cdf(self, value):
"""
Numerically evaluate the cumulative distribution function (CDF).
Parameters
----------
value : numpy.ndarray
Values at which to evaluate the CDF.
Returns
-------
out : numpy.ndarray
Evaluated CDF.
"""
out = self._numeric_cdf(value, *self.params.values())
return out
def evaluate_pdf(self, value, norm=False):
"""
Numerically evaluate the probability density function (pdf).
Parameters
----------
value : numpy.ndarray
Values at which to evaluate the pdf.
norm : boolean (default=False)
True if you wish to normalize the pdf so that it integrates to one;
False otherwise.
Returns
-------
out : numpy.ndarray
Evaluated pdf.
"""
if norm:
out = (self._numeric_pdf(value, *self.params.values()) /
self.norm_constant)
else:
out = self._numeric_pdf(value, *self.params.values())
return out
| mit | 744,981,528,001,345,400 | 28.256966 | 81 | 0.580529 | false |
Azure/azure-sdk-for-python | sdk/agrifood/azure-agrifood-farming/azure/agrifood/farming/aio/operations/_seasons_operations.py | 1 | 18589 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import Any, AsyncIterable, Callable, Dict, Generic, List, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SeasonsOperations:
"""SeasonsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.agrifood.farming.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
min_start_date_time: Optional[datetime.datetime] = None,
max_start_date_time: Optional[datetime.datetime] = None,
min_end_date_time: Optional[datetime.datetime] = None,
max_end_date_time: Optional[datetime.datetime] = None,
years: Optional[List[int]] = None,
ids: Optional[List[str]] = None,
names: Optional[List[str]] = None,
property_filters: Optional[List[str]] = None,
statuses: Optional[List[str]] = None,
min_created_date_time: Optional[datetime.datetime] = None,
max_created_date_time: Optional[datetime.datetime] = None,
min_last_modified_date_time: Optional[datetime.datetime] = None,
max_last_modified_date_time: Optional[datetime.datetime] = None,
max_page_size: Optional[int] = 50,
skip_token: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.SeasonListResponse"]:
"""Returns a paginated list of season resources.
:param min_start_date_time: Minimum season start datetime, sample format: yyyy-MM-ddTHH:mm:ssZ.
:type min_start_date_time: ~datetime.datetime
:param max_start_date_time: Maximum season start datetime, sample format: yyyy-MM-ddTHH:mm:ssZ.
:type max_start_date_time: ~datetime.datetime
:param min_end_date_time: Minimum season end datetime, sample format: yyyy-MM-ddTHH:mm:ssZ.
:type min_end_date_time: ~datetime.datetime
:param max_end_date_time: Maximum season end datetime, sample format: yyyy-MM-ddTHH:mm:ssZ.
:type max_end_date_time: ~datetime.datetime
:param years: Years of the resource.
:type years: list[int]
:param ids: Ids of the resource.
:type ids: list[str]
:param names: Names of the resource.
:type names: list[str]
:param property_filters: Filters on key-value pairs within the Properties object.
eg. "{testKey} eq {testValue}".
:type property_filters: list[str]
:param statuses: Statuses of the resource.
:type statuses: list[str]
:param min_created_date_time: Minimum creation date of resource (inclusive).
:type min_created_date_time: ~datetime.datetime
:param max_created_date_time: Maximum creation date of resource (inclusive).
:type max_created_date_time: ~datetime.datetime
:param min_last_modified_date_time: Minimum last modified date of resource (inclusive).
:type min_last_modified_date_time: ~datetime.datetime
:param max_last_modified_date_time: Maximum last modified date of resource (inclusive).
:type max_last_modified_date_time: ~datetime.datetime
:param max_page_size: Maximum number of items needed (inclusive).
Minimum = 10, Maximum = 1000, Default value = 50.
:type max_page_size: int
:param skip_token: Skip token for getting next set of results.
:type skip_token: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SeasonListResponse or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.agrifood.farming.models.SeasonListResponse]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SeasonListResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-31-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if min_start_date_time is not None:
query_parameters['minStartDateTime'] = self._serialize.query("min_start_date_time", min_start_date_time, 'iso-8601')
if max_start_date_time is not None:
query_parameters['maxStartDateTime'] = self._serialize.query("max_start_date_time", max_start_date_time, 'iso-8601')
if min_end_date_time is not None:
query_parameters['minEndDateTime'] = self._serialize.query("min_end_date_time", min_end_date_time, 'iso-8601')
if max_end_date_time is not None:
query_parameters['maxEndDateTime'] = self._serialize.query("max_end_date_time", max_end_date_time, 'iso-8601')
if years is not None:
query_parameters['years'] = [self._serialize.query("years", q, 'int') if q is not None else '' for q in years]
if ids is not None:
query_parameters['ids'] = [self._serialize.query("ids", q, 'str') if q is not None else '' for q in ids]
if names is not None:
query_parameters['names'] = [self._serialize.query("names", q, 'str') if q is not None else '' for q in names]
if property_filters is not None:
query_parameters['propertyFilters'] = [self._serialize.query("property_filters", q, 'str') if q is not None else '' for q in property_filters]
if statuses is not None:
query_parameters['statuses'] = [self._serialize.query("statuses", q, 'str') if q is not None else '' for q in statuses]
if min_created_date_time is not None:
query_parameters['minCreatedDateTime'] = self._serialize.query("min_created_date_time", min_created_date_time, 'iso-8601')
if max_created_date_time is not None:
query_parameters['maxCreatedDateTime'] = self._serialize.query("max_created_date_time", max_created_date_time, 'iso-8601')
if min_last_modified_date_time is not None:
query_parameters['minLastModifiedDateTime'] = self._serialize.query("min_last_modified_date_time", min_last_modified_date_time, 'iso-8601')
if max_last_modified_date_time is not None:
query_parameters['maxLastModifiedDateTime'] = self._serialize.query("max_last_modified_date_time", max_last_modified_date_time, 'iso-8601')
if max_page_size is not None:
query_parameters['$maxPageSize'] = self._serialize.query("max_page_size", max_page_size, 'int', maximum=1000, minimum=10)
if skip_token is not None:
query_parameters['$skipToken'] = self._serialize.query("skip_token", skip_token, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('SeasonListResponse', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/seasons'} # type: ignore
async def get(
self,
season_id: str,
**kwargs: Any
) -> "_models.Season":
"""Gets a specified season resource.
:param season_id: ID of the season.
:type season_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Season, or the result of cls(response)
:rtype: ~azure.agrifood.farming.models.Season
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Season"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-31-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'seasonId': self._serialize.url("season_id", season_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('Season', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/seasons/{seasonId}'} # type: ignore
async def create_or_update(
self,
season_id: str,
season: Optional["_models.Season"] = None,
**kwargs: Any
) -> "_models.Season":
"""Creates or updates a season resource.
:param season_id: ID of the season resource.
:type season_id: str
:param season: Season resource payload to create or update.
:type season: ~azure.agrifood.farming.models.Season
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Season, or the result of cls(response)
:rtype: ~azure.agrifood.farming.models.Season
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Season"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-31-preview"
content_type = kwargs.pop("content_type", "application/merge-patch+json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'seasonId': self._serialize.url("season_id", season_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if season is not None:
body_content = self._serialize.body(season, 'Season')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
if response.status_code == 200:
deserialized = self._deserialize('Season', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Season', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/seasons/{seasonId}'} # type: ignore
async def delete(
self,
season_id: str,
**kwargs: Any
) -> None:
"""Deletes a specified season resource.
:param season_id: ID of the season.
:type season_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-31-preview"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'seasonId': self._serialize.url("season_id", season_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/seasons/{seasonId}'} # type: ignore
| mit | -3,638,587,050,284,170,000 | 49.651226 | 162 | 0.626446 | false |
cpatrick/comic-django | django/comicsite/middleware/project.py | 1 | 2748 | from django.core.urlresolvers import resolve,Resolver404
from django.conf import settings
from comicmodels.models import ComicSite,get_projectname
class ProjectMiddleware:
""" Everything you do on comicframework is related to a project. This
middleware makes this possible without having to duplicate 'project'
variables everywhere.
"""
def process_request(self, request):
""" Adds current project name to any request so it can be easily used
in views.
"""
request.is_projectadmin = self.is_projectadmin_url(request)
request.projectname = self.get_project_name(request)
request.project_pk = self.get_project_pk(request)
def get_project_name(self,request):
""" Tries to infer the name of the project this project is regarding
"""
try:
resolution = resolve(request.path)
if resolution.kwargs.has_key("site_short_name"):
projectname = resolution.kwargs["site_short_name"]
elif resolution.kwargs.has_key("project_name"):
projectname = resolution.kwargs["project_name"]
elif request.is_projectadmin:
projectname = get_projectname(resolution.namespace)
else:
projectname = settings.MAIN_PROJECT_NAME
except Resolver404:
projectname = settings.MAIN_PROJECT_NAME
return projectname
def get_project_pk(self,request):
""" Get unique key of current comicsite. This is used in admin views to
auto fill comicsite for any comicsitemodel
"""
try:
project_pk = ComicSite.objects.get(short_name=request.projectname).pk
except ComicSite.DoesNotExist:
project_pk = -1
return project_pk
def is_projectadmin_url(self,request):
""" When you are in admin for a single project, only show objects for
this project. This check must be made here as the request cannot
be modified later
"""
is_projectadmin = False
try:
resolution = resolve(request.path)
# urls.py is set up in such a way that 'namespace' is always 'xxxadmin'
# for example it is 'vesse12admin' for the vessel12 admin. It is only
# 'admin' for the main (root) admin site.
is_projectadmin = resolution.namespace != 'admin' and self.is_admin_page(resolution)
except Resolver404:
is_projectadmin = False
return is_projectadmin
def is_admin_page(self,resolution):
return resolution.app_name == 'admin'
| apache-2.0 | -7,650,004,721,259,196,000 | 36.148649 | 96 | 0.61099 | false |
codeforamerica/naics-api | data/inverse-index.py | 1 | 6786 | # -*- coding: utf-8 -*-
import json
from sys import argv
from re import compile
from collections import defaultdict
import inflect
p = inflect.engine()
stop_words = '''a, about, above, after, again, against, all, am, an, and, any,
are, aren't, as, at, be, because, been, before, being, below, between, both,
but, by, can't, cannot, could, couldn't, did, didn't, do, does, doesn't, doing,
don't, down, during, each, few, for, from, further, had, hadn't, has, hasn't,
have, haven't, having, he, he'd, he'll, he's, her, here, here's, hers, herself,
him, himself, his, how, how's, i, i'd, i'll, i'm, i've, if, in, into, is,
isn't, it, it's, its, itself, let's, me, more, most, mustn't, my, myself, no,
nor, not, of, off, on, once, only, or, other, ought, our, ours , ourselves,
out, over, own, same, shan't, she, she'd, she'll, she's, should, shouldn't, so,
some, such, than, that, that's, the, their, theirs, them, themselves, then,
there, there's, these, they, they'd, they'll, they're, they've, this, those,
through, to, too, under, until, up, very, was, wasn't, we, we'd, we'll, we're,
we've, were, weren't, what, what's, when, when's, where, where's, which, while,
who, who's, whom, why, why's, with, won't, would, wouldn't, you, you'd, you'll,
you're, you've, your, yours, yourself, yourselves'''
stop_words += ', except'
whatever = compile(r'\S+')
breaks = compile(r'[\s\/\-\xa0]')
stops = compile(r',\s+').split(stop_words)
junk_ = compile(u'^[\s\;\:\,\.\!\?\(\)\"\'…“”‘’]+')
_junk = compile( u'[\s\;\:\,\.\!\?\(\)\"\'…“”‘’]+$')
# Adapted from http://stackoverflow.com/a/10297152/336353
# plurals = compile(r'(?<![aei])([ie][d])(?=[^a-zA-Z])|(?<=[ertkgwmnl])s$')
def cleanup(word):
''' Clean up a single word if it's got some obvious junk on it.
'''
word = word.replace(' ', ' ')
word = junk_.sub('', _junk.sub('', word))
word = word.lower().replace(u'’', "'")
return word
def tokenize(text):
''' Tokenize a block of text into a set of search terms.
'''
#
# Ignore "except" language
#
try:
start = text.index('(except')
end = text.index(')', start) + 1
if end > start:
text = text.replace(text[start:end], '')
except:
pass
#
# Clean up words and group them into ones, twos and threes.
#
words = map(cleanup, breaks.split(text))
word_singles = [word for word in words if word not in stops]
word_pairs = [(w1+' '+w2).strip() for (w1, w2)
in zip(words, words[1:])
if not (w1 in stops or w2 in stops)]
word_triplets = [(w1+' '+w2+' '+w3).strip() for (w1, w2, w3)
in zip(words, words[1:], words[2:])
if not (w1 in stops or w3 in stops)]
words_altogether = word_singles + word_pairs #+ word_triplets
words_singular = [p.singular_noun(s) or s for s in words_altogether]
# Include original and singular variants of words
word_set = filter(None, set(words_singular + words_altogether))
return word_set
def gen_terms(filename):
''' Generate tuples of (code, score, search term) for NAICS items in a file.
{
"code":236220,
"title":"Commercial and Institutional Building Construction",
"index":
[
"Addition, alteration and renovation for-sale builders, commercial and institutional building",
"Addition, alteration and renovation for-sale builders, commercial warehouse",
...
],
"examples":
[
"Airport building construction",
"Office building construction",
...
],
"description":
[
"This industry comprises establishments primarily responsible for the construction (including new work, additions, alterations, maintenance, and repairs) of commercial and institutional buildings and related structures, such as stadiums, grain elevators, and indoor swimming facilities. This industry includes establishments responsible for the on-site assembly of modular or prefabricated commercial and institutional buildings. Included in this industry are commercial and institutional building general contractors, commercial and institutional building for-sale builders, commercial and institutional building design-build firms, and commercial and institutional building project construction management firms."
],
"crossrefs":
[
{"text":"Constructing structures that are integral parts of utility systems (e.g., storage tanks, pumping stations) or are used to produce products for these systems (e.g., power plants, refineries)--are classified in Industry Group 2371, Utility System Construction, based on type of construction project;","code":"2371"},
{"text":"Performing specialized construction work on commercial and institutional buildings, generally on a subcontract basis--are classified in Subsector 238, Specialty Trade Contractors; and","code":"238"},
{"text":"Constructing buildings on their own account for rent or lease--are classified in Industry Group 5311, Lessors of Real Estate.","code":"5311"}
],
"seq_no":217
}
'''
data = json.load(open(filename))
fields = 'code', 'title', 'index', 'examples', 'description'
scores = 5, 4, 3, 2, 1
for item in data['items']:
code = item['code']
for (score, field) in zip(scores, fields):
if field not in item:
continue
if field == 'code':
yield (code, score, str(code))
if field == 'title' and field in item:
for word in tokenize(item['title']):
yield (code, score, word)
if field in ('index', 'examples', 'description'):
for word in tokenize(' '.join(item[field])):
yield (code, score, word)
if __name__ == '__main__':
infile, outfile = argv[1:]
with open(outfile, 'w') as out:
#
# index is a dictionary of lists.
# Each list contains tuples of (score, code).
#
index = defaultdict(lambda: [])
for (code, score, word) in gen_terms(infile):
index[word].append((score, code))
#
# output is a dictionary of dicts.
# Each dict is a mapping from NAICS code to score.
#
output = dict()
for (word, results) in index.items():
output[word] = defaultdict(lambda: 0)
for (score, code) in results:
output[word][code] += score
json.dump(output, out, indent=2)
| bsd-3-clause | -5,669,027,410,985,313,000 | 39.261905 | 729 | 0.597132 | false |
tbenthompson/cppimport | cppimport/templating.py | 1 | 2092 | import io
import logging
import os
import mako.exceptions
import mako.lookup
import mako.runtime
import mako.template
logger = logging.getLogger(__name__)
def run_templating(module_data):
module_data["cfg"] = BuildArgs(
sources=[],
include_dirs=[],
extra_compile_args=[],
libraries=[],
library_dirs=[],
extra_link_args=[],
dependencies=[],
parallel=False,
)
module_data["setup_pybind11"] = setup_pybind11
buf = io.StringIO()
ctx = mako.runtime.Context(buf, **module_data)
filepath = module_data["filepath"]
lookup = mako.lookup.TemplateLookup(directories=[os.path.dirname(filepath)])
tmpl = mako.template.Template(filename=filepath, lookup=lookup)
tmpl.render_context(ctx)
rendered_src_filepath = get_rendered_source_filepath(filepath)
with open(rendered_src_filepath, "w", newline="") as f:
f.write(buf.getvalue())
module_data["rendered_src_filepath"] = rendered_src_filepath
class BuildArgs(dict):
"""
This exists for backwards compatibility with old configuration key names.
TODO: Add deprecation warnings to allow removing this sometime in the future.
"""
_key_mapping = {
"compiler_args": "extra_compile_args",
"linker_args": "extra_link_args",
}
def __getitem__(self, key):
return super(BuildArgs, self).__getitem__(self._key_mapping.get(key, key))
def __setitem__(self, key, value):
super(BuildArgs, self).__setitem__(self._key_mapping.get(key, key), value)
def setup_pybind11(cfg):
import pybind11
cfg["include_dirs"] += [pybind11.get_include(), pybind11.get_include(True)]
# Prefix with c++11 arg instead of suffix so that if a user specifies c++14
# (or later!) then it won't be overridden.
cfg["compiler_args"] = ["-std=c++11", "-fvisibility=hidden"] + cfg["compiler_args"]
def get_rendered_source_filepath(filepath):
dirname = os.path.dirname(filepath)
filename = os.path.basename(filepath)
return os.path.join(dirname, ".rendered." + filename)
| mit | -8,459,070,731,217,982,000 | 28.055556 | 87 | 0.656788 | false |
Crystalnix/BitPop | chrome/test/functional/media/media_seek_perf.py | 1 | 4254 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Seek performance testing for <video>.
Calculates the short and long seek times for different video formats on
different network constraints.
"""
import logging
import os
import posixpath
import pyauto_media
import pyauto_utils
import cns_test_base
import worker_thread
# Number of threads to use during testing.
_TEST_THREADS = 3
# HTML test path; relative to src/chrome/test/data.
_TEST_HTML_PATH = os.path.join('media', 'html', 'media_seek.html')
# The media files used for testing.
# Path under CNS root folder (pyauto_private/media).
_TEST_VIDEOS = [posixpath.join('dartmoor', name) for name in
['dartmoor2.ogg', 'dartmoor2.m4a', 'dartmoor2.mp3',
'dartmoor2.wav']]
_TEST_VIDEOS.extend(posixpath.join('crowd', name) for name in
['crowd1080.webm', 'crowd1080.ogv', 'crowd1080.mp4',
'crowd360.webm', 'crowd360.ogv', 'crowd360.mp4'])
# Constraints to run tests on.
_TESTS_TO_RUN = [
cns_test_base.Cable,
cns_test_base.Wifi,
cns_test_base.NoConstraints]
class SeekWorkerThread(worker_thread.WorkerThread):
"""Worker thread. Runs a test for each task in the queue."""
def RunTask(self, unique_url, task):
"""Runs the specific task on the url given.
It is assumed that a tab with the unique_url is already loaded.
Args:
unique_url: A unique identifier of the test page.
task: A (series_name, settings, file_name) tuple to run the test on.
"""
series_name, settings, file_name = task
video_url = cns_test_base.GetFileURL(
file_name, bandwidth=settings[0], latency=settings[1],
loss=settings[2])
# Start the test!
self.CallJavascriptFunc('startTest', [video_url], unique_url)
logging.debug('Running perf test for %s.', video_url)
# Time out is dependent on (seeking time * iterations). For 3 iterations
# per seek we get total of 18 seeks per test. We expect buffered and
# cached seeks to be fast. Through experimentation an average of 10 secs
# per seek was found to be adequate.
if not self.WaitUntil(self.GetDOMValue, args=['endTest', unique_url],
retry_sleep=5, timeout=300, debug=False):
error_msg = 'Seek tests timed out.'
else:
error_msg = self.GetDOMValue('errorMsg', unique_url)
cached_states = self.GetDOMValue(
"Object.keys(CachedState).join(',')", unique_url).split(',')
seek_test_cases = self.GetDOMValue(
"Object.keys(SeekTestCase).join(',')", unique_url).split(',')
graph_name = series_name + '_' + os.path.basename(file_name)
for state in cached_states:
for seek_case in seek_test_cases:
values = self.GetDOMValue(
"seekRecords[CachedState.%s][SeekTestCase.%s].join(',')" %
(state, seek_case), unique_url)
if values:
results = [float(value) for value in values.split(',')]
else:
results = []
pyauto_utils.PrintPerfResult('seek', '%s_%s_%s' %
(state, seek_case, graph_name),
results, 'sec')
if error_msg:
logging.error('Error while running %s: %s.', graph_name, error_msg)
return False
else:
return True
class MediaSeekPerfTest(cns_test_base.CNSTestBase):
"""PyAuto test container. See file doc string for more information."""
def __init__(self, *args, **kwargs):
"""Initialize the CNSTestBase with socket_timeout = 60 secs."""
cns_test_base.CNSTestBase.__init__(self, socket_timeout='60',
*args, **kwargs)
def testMediaSeekPerformance(self):
"""Launches HTML test which plays each video and records seek stats."""
tasks = cns_test_base.CreateCNSPerfTasks(_TESTS_TO_RUN, _TEST_VIDEOS)
if worker_thread.RunWorkerThreads(self, SeekWorkerThread, tasks,
_TEST_THREADS, _TEST_HTML_PATH):
self.fail('Some tests failed to run as expected.')
if __name__ == '__main__':
pyauto_media.Main()
| bsd-3-clause | -1,607,342,288,572,257,800 | 35.050847 | 77 | 0.637988 | false |
arozumenko/locust | locust/resource_tools.py | 1 | 5788 | # Copyright (c) 2014 Artem Rozumenko ([email protected])
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Resource tools commands."""
from threading import Thread, current_thread, active_count
from time import time, sleep
from random import randrange
from math import sqrt
from multiprocessing import Process, cpu_count, current_process
from os import urandom, remove
from glob import glob
from tempfile import gettempdir
from os.path import join as join_path
from psutil import swap_memory
from locust.common import IS_WINDOWS
from locust.common import message_wrapper, convert_timeout
def burn_cpu(timeout=30):
"""Burn CPU command.
Start processes with random int.
Arguments:
:timeout - length of time in seconds to burn cpu (Default: 30 sec)
Return:
feedback
"""
timeout = convert_timeout(timeout, def_timeout=30)
for _ in range(cpu_count()):
thread = Process(target=_burn_cpu, args=[timeout])
thread.start()
return message_wrapper('CPU burning started')
def _burn_cpu(timeout=0):
"""Burn CPU command."""
end_time = time() + timeout
while time() < end_time:
sqrt(float(randrange(1, 999999, 1)))
def burn_ram(timeout=30):
"""RAM overflow command.
Fill Ram with garbage data
Arguments:
:timeout - length of time in seconds to burn cpu (Default: 30 sec)
Return:
feedback
"""
timeout = convert_timeout(timeout, def_timeout=30)
process = Process(target=_burn_ram, args=[timeout])
process.start()
return message_wrapper('RAM overflowing has been started')
def _burn_ram(timeout):
"""RAM overflow command."""
f_ratio = 100
d_ratio = f_ratio
fill_ram = ''
decrease = ''
spike = ''
# Start RAM overflow
# Try to fill all free RAM space
while True:
try:
fill_ram = ' ' * int((float(swap_memory().free) / 100) * f_ratio)
break
except (MemoryError, OverflowError):
f_ratio -= 1
# Try to fill all left free RAM space (Windows OS specific)
while True:
try:
decrease = ' ' * int((float(swap_memory().free) / 100) * d_ratio)
break
except (MemoryError, OverflowError):
d_ratio -= 1
end_time = time() + timeout
while time() < end_time:
if float(swap_memory().percent) < 90:
try:
spike += ' ' * int((float(swap_memory().free) / 100) * 10)
except (MemoryError, OverflowError):
spike = ''
del fill_ram
del decrease
del spike
def burn_disk(timeout=30, file_size='1k', thread_limit='200'):
"""Burn HDD command.
Arguments:
timeout - length of time in seconds to burn HDD (Default: 30 sec);
file_size - file size to be created in thread;
thread_limit - thread limit count per process;
Return:
Returns message that burn HDD is started.
"""
timeout = convert_timeout(timeout, def_timeout=30)
values = {
'B': 0,
'K': 10,
'M': 20,
}
if file_size.isdigit():
count = file_size
rate = 'B'
else:
rate = file_size[-1:].upper()
count = file_size[:-1]
if not (rate in values and count.isdigit()):
mgs = ('Wrong format of file_size param "{param}". "file_size" '
'Parameter should have the following format:'
'"<size_in_digit><Multiplifier>". Correct values for '
'multiplifier is - {mult}')
keys = values.keys() + [k.lower() for k in values.keys()]
raise TypeError(mgs.format(param=file_size, mult=' '.join(keys)))
if not thread_limit.isdigit():
raise TypeError('Thread limit parameter should have the following '
'format:"<count_in_digit>"')
file_size = int(int(count) << values[rate])
end_time = time() + timeout
for _ in xrange(cpu_count()):
process = Process(target=_burn_disk,
args=[end_time, file_size, int(thread_limit)])
process.start()
return message_wrapper('HDD burning has been started')
def _burn_disk(end_time, file_size, thread_limit):
"""Burn HDD command."""
def _start_write():
"""Write data to temp file."""
while time() < end_time:
file_name = current_process().name + '_' + current_thread().name
file_name = join_path(gettempdir(), file_name)
try:
open_file = open(file_name, 'w')
open_file.write(str(urandom(file_size)))
except IOError:
pass
finally:
open_file.close()
if IS_WINDOWS:
overall_file_limit = 16000
else:
import resource
overall_file_limit = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
thread_count = overall_file_limit / cpu_count()
if thread_count > thread_limit:
thread_count = thread_limit
was_threads = active_count()
for _ in xrange(thread_count):
thread = Thread(target=_start_write)
thread.start()
while active_count() > was_threads:
sleep(1)
for each in glob(join_path(gettempdir(), current_process().name + '*')):
remove(each)
| apache-2.0 | -4,284,016,076,011,271,700 | 29.145833 | 77 | 0.607636 | false |
fuzzysteve/yamlloader | Load.py | 1 | 2725 | from sqlalchemy import create_engine,Table
import warnings
import sys
warnings.filterwarnings('ignore', '^Unicode type received non-unicode bind param value')
if len(sys.argv)<2:
print "Load.py destination"
exit()
database=sys.argv[1]
if len(sys.argv)==3:
language=sys.argv[2]
else:
language='en'
import ConfigParser, os
fileLocation = os.path.dirname(os.path.realpath(__file__))
inifile=fileLocation+'/sdeloader.cfg'
config = ConfigParser.ConfigParser()
config.read(inifile)
destination=config.get('Database',database)
sourcePath=config.get('Files','sourcePath')
from tableloader.tableFunctions import *
print "connecting to DB"
engine = create_engine(destination)
connection = engine.connect()
from tableloader.tables import metadataCreator
schema=None
if database=="postgresschema":
schema="evesde"
metadata=metadataCreator(schema)
print "Creating Tables"
metadata.drop_all(engine,checkfirst=True)
metadata.create_all(engine,checkfirst=True)
print "created"
import tableloader.tableFunctions
factions.importyaml(connection,metadata,sourcePath,language)
ancestries.importyaml(connection,metadata,sourcePath,language)
bloodlines.importyaml(connection,metadata,sourcePath,language)
npccorporations.importyaml(connection,metadata,sourcePath,language)
characterAttributes.importyaml(connection,metadata,sourcePath,language)
agents.importyaml(connection,metadata,sourcePath,language)
typeMaterials.importyaml(connection,metadata,sourcePath,language)
dogmaTypes.importyaml(connection,metadata,sourcePath,language)
dogmaEffects.importyaml(connection,metadata,sourcePath,language)
dogmaAttributes.importyaml(connection,metadata,sourcePath,language)
dogmaAttributeCategories.importyaml(connection,metadata,sourcePath,language)
blueprints.importyaml(connection,metadata,sourcePath)
marketGroups.importyaml(connection,metadata,sourcePath,language)
metaGroups.importyaml(connection,metadata,sourcePath,language)
controlTowerResources.importyaml(connection,metadata,sourcePath,language)
categories.importyaml(connection,metadata,sourcePath,language)
certificates.importyaml(connection,metadata,sourcePath)
graphics.importyaml(connection,metadata,sourcePath)
groups.importyaml(connection,metadata,sourcePath,language)
icons.importyaml(connection,metadata,sourcePath)
skins.importyaml(connection,metadata,sourcePath)
types.importyaml(connection,metadata,sourcePath,language)
planetary.importyaml(connection,metadata,sourcePath,language)
bsdTables.importyaml(connection,metadata,sourcePath)
universe.importyaml(connection,metadata,sourcePath)
universe.buildJumps(connection,database)
volumes.importVolumes(connection,metadata,sourcePath)
universe.fixStationNames(connection,metadata)
| mit | -348,006,879,507,055,800 | 27.385417 | 88 | 0.839633 | false |
jvrsantacruz/upversion | tests/__init__.py | 1 | 1934 | # -*- coding: utf-8 -*-
import os
import logging
import traceback
import contextlib
from click.testing import CliRunner
from hamcrest import (assert_that, all_of, has_property, has_properties,
anything)
from upversion import cli
logger = logging.getLogger('tests.cli')
class CommandTest(object):
def setup(self):
self.setup_fixture()
self.setup_runner()
def setup_fixture(self):
pass
def setup_runner(self):
self.runner = CliRunner()
self.result = None
def run(self, args, **kwargs):
logger.info(u'run: upversion' + u' '.join(args))
# Override default process environ
kwargs['env'] = kwargs.get('env', getattr(self, 'env', None))
# Add extra arguments by default
if hasattr(self, 'defargs'):
args += self.defargs
with clean_environ():
self.result = self.runner.invoke(cli, args, **kwargs)
if self.result.exit_code:
logger.info(u'error result: \n' + self.result.output)
if self.result.exception:
logger.info(u'exception raised: \n' +
u''.join(traceback.format_exception(*self.result.exc_info)))
return self.result
def assert_result(self, *matchers, **kwargs):
result = kwargs.get('result', self.result)
assert_that(result, all_of(
has_property('exit_code', kwargs.pop('exit_code', 0)),
has_property('output', kwargs.pop('output', anything())),
has_properties(**kwargs),
*matchers
))
def here(*parts):
return os.path.join(os.path.realpath(os.path.dirname(__file__)), *parts)
@contextlib.contextmanager
def clean_environ():
env = dict(os.environ)
for key in list(os.environ):
if key.startswith('UPVERSION_'):
del os.environ[key]
try:
yield
finally:
os.environ.update(env)
| gpl-2.0 | 6,734,251,735,488,621,000 | 25.493151 | 76 | 0.597208 | false |
mementum/bfplusplus | bfplusplus/gui/ProgressDialog.py | 1 | 2589 | #!/usr/bin/env python
# -*- coding: latin-1; py-indent-offset:4 -*-
################################################################################
#
# This file is part of Bfplusplus
#
# Bfplusplus is a graphical interface to the Betfair Betting Exchange
# Copyright (C) 2010 Daniel Rodriguez (aka Daniel Rodriksson)
# Copyright (C) 2011 Sensible Odds Ltd.
#
# You can learn more and contact the author at:
#
# http://code.google.com/p/bfplusplus/
#
# Bfplusplus is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Bfplusplus is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Bfplusplus. If not, see <http://www.gnu.org/licenses/>.
#
################################################################################
"""Subclass of ProgressDialog, which is generated by wxFormBuilder."""
import MainGui
import wx
# Implementing ProgressDialog
class ProgressDialog(MainGui.ProgressDialog):
timerInc = 0.100
def __init__(self, parent, timeOut=14.0):
MainGui.ProgressDialog.__init__(self, parent)
self.timeOut = timeOut
self.timerCount = 0
self.timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.OnTimer, self.timer)
if self.CanSetTransparent() == True:
self.SetTransparent(128)
def OnClose(self, event):
event.Veto()
def OnTimer(self, event):
event.Skip()
if self.timerCount >= self.timeOut:
self.Show(False)
return
self.timerCount += self.timerInc
self.m_gaugeLoading.SetValue(self.timerCount/self.timerInc)
def Show(self, p_show):
if p_show == True and self.IsShown() == True:
self.Show(False)
if p_show == False and self.IsShown() == False:
self.Show(True)
if p_show == True:
self.m_gaugeLoading.SetValue(0)
self.m_gaugeLoading.SetRange(self.timeOut / self.timerInc)
self.timerCount = 0
self.timer.Start(self.timerInc * 1000)
self.SetRect(self.GetParent().GetScreenRect())
else: # p_show is False
self.timer.Stop()
MainGui.ProgressDialog.Show(self, p_show)
| gpl-3.0 | 9,045,739,586,648,589,000 | 29.104651 | 80 | 0.623793 | false |
open-craft/opencraft | pr_watch/tests/test_api.py | 1 | 11845 | # -*- coding: utf-8 -*-
#
# OpenCraft -- tools to aid developing and hosting free software projects
# Copyright (C) 2015-2019 OpenCraft <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Tests of the Pull Request Watcher API
"""
# Imports #####################################################################
from unittest.mock import patch
import ddt
from rest_framework import status
from rest_framework.test import APIClient, APIRequestFactory
from instance.models.openedx_instance import OpenEdXInstance
from instance.tests.base import WithUserTestCase
from pr_watch import github
from pr_watch.models import WatchedPullRequest
from pr_watch.tests.factories import make_watched_pr_and_instance, PRFactory
# Tests #######################################################################
@ddt.ddt
@patch(
'instance.models.openedx_instance.OpenEdXInstance._write_metadata_to_consul',
return_value=(1, True)
)
class APITestCase(WithUserTestCase):
"""
Tests of the Pull Request Watcher API
"""
def setUp(self):
super().setUp()
self.api_factory = APIRequestFactory()
self.api_client = APIClient()
def test_get_unauthenticated(self, mock_consul):
"""
GET - Require to be authenticated
"""
forbidden_message = {"detail": "Authentication credentials were not provided."}
response = self.api_client.get('/api/v1/pr_watch/')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(response.data, forbidden_message)
watched_pr = make_watched_pr_and_instance()
response = self.api_client.get('/api/v1/pr_watch/{pk}/'.format(pk=watched_pr.pk))
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(response.data, forbidden_message)
@ddt.data('user1', 'user2')
def test_get_permission_denied(self, username, mock_consul):
"""
GET - basic and staff users denied access
"""
forbidden_message = {"detail": "You do not have permission to perform this action."}
self.api_client.login(username=username, password='pass')
response = self.api_client.get('/api/v1/pr_watch/')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(response.data, forbidden_message)
watched_pr = make_watched_pr_and_instance()
response = self.api_client.get('/api/v1/pr_watch/{pk}/'.format(pk=watched_pr.pk))
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(response.data, forbidden_message)
@ddt.data('user3', 'user4')
def test_get_authenticated(self, username, mock_consul):
"""
GET - Authenticated - instance manager users (superuser or not) allowed access
"""
self.api_client.login(username=username, password='pass')
response = self.api_client.get('/api/v1/pr_watch/')
self.assertEqual(response.data, [])
# This uses user4's organization. Both user3 and user4 will be able to see it later
watched_pr = make_watched_pr_and_instance(
branch_name='api-test-branch',
username='user4',
organization=self.organization2
)
def check_output(data):
""" Check that the data object passed matches expectations for 'watched_pr' """
data = data.items()
self.assertIn(('id', watched_pr.pk), data)
self.assertIn(('fork_name', 'fork/repo'), data)
self.assertIn(('target_fork_name', 'source/repo'), data)
self.assertIn(('branch_name', 'api-test-branch'), data)
self.assertIn(('github_pr_number', watched_pr.github_pr_number), data)
self.assertIn(('github_pr_url', watched_pr.github_pr_url), data)
self.assertIn(('instance_id', watched_pr.instance.ref.id), data)
response = self.api_client.get('/api/v1/pr_watch/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
check_output(response.data[0])
# And check the details view:
response = self.api_client.get('/api/v1/pr_watch/{pk}/'.format(pk=watched_pr.pk))
self.assertEqual(response.status_code, status.HTTP_200_OK)
check_output(response.data)
@patch('pr_watch.github.get_commit_id_from_ref', return_value=('5' * 40))
@patch('pr_watch.github.get_pr_by_number')
def test_get_filtered_by_organization(self, mock_get_pr_by_number, mock_get_commit_id_from_ref, mock_consul):
"""
GET+POST - A user (instance manager) can only manage PRs from WF which belong to the user's organization.
"""
wpr1 = make_watched_pr_and_instance(username='user1', organization=self.organization)
wpr2 = make_watched_pr_and_instance(username='user4', organization=self.organization2)
self.assertEqual(WatchedPullRequest.objects.count(), 2)
# We'll log in with user4, and we should only see pr2, but not pr1
self.api_client.login(username='user4', password='pass')
# Check the PR list
response = self.api_client.get('/api/v1/pr_watch/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertNotIn(('id', wpr1.pk), response.data[0].items())
self.assertIn(('id', wpr2.pk), response.data[0].items())
# Also check the detailed view
response = self.api_client.get('/api/v1/pr_watch/{pk}/'.format(pk=wpr1.pk))
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
response = self.api_client.get('/api/v1/pr_watch/{pk}/'.format(pk=wpr2.pk))
self.assertEqual(response.status_code, status.HTTP_200_OK)
# Also check update_instance
mock_get_pr_by_number.return_value = PRFactory(number=wpr1.github_pr_number)
response = self.api_client.post('/api/v1/pr_watch/{pk}/update_instance/'.format(pk=wpr1.pk))
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
mock_get_pr_by_number.return_value = PRFactory(number=wpr2.github_pr_number)
response = self.api_client.post('/api/v1/pr_watch/{pk}/update_instance/'.format(pk=wpr2.pk))
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_no_organization(self, mock_consul):
"""
GET+POST - An instance manager without an organization can't see/update any PR.
"""
self.api_client.login(username='user5', password='pass')
watched_pr = make_watched_pr_and_instance(branch_name='api-test-branch')
response = self.api_client.get('/api/v1/pr_watch/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, [])
response = self.api_client.get('/api/v1/pr_watch/{pk}/'.format(pk=watched_pr.pk))
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
response = self.api_client.post('/api/v1/pr_watch/{pk}/update_instance/'.format(pk=watched_pr.pk))
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
@patch('pr_watch.github.get_pr_by_number')
@ddt.data('user3', 'user4')
def test_update_instance(self, username, mock_get_pr_by_number, mock_consul):
"""
POST /pr_watch/:id/update_instance/ - Update instance with latest settings from the PR
"""
self.api_client.login(username=username, password='pass')
# Create a WatchedPullRequest, and OpenEdXInstance:
watched_pr = make_watched_pr_and_instance(username='user4', organization=self.organization2)
instance = OpenEdXInstance.objects.get(pk=watched_pr.instance_id)
self.assertIn('fork/master (5555555)', instance.name)
self.assertEqual(instance.edx_platform_commit, '5' * 40)
# Now mock the PR being updated on GitHub
mock_get_pr_by_number.return_value = PRFactory(
number=watched_pr.github_pr_number,
title="Updated Title",
)
with patch('pr_watch.github.get_commit_id_from_ref', return_value=('6' * 40)):
response = self.api_client.post('/api/v1/pr_watch/{pk}/update_instance/'.format(pk=watched_pr.pk))
self.assertEqual(response.status_code, status.HTTP_200_OK)
instance.refresh_from_db()
self.assertEqual(
instance.name,
'PR#{}: Updated Title (edx) - fork/master (6666666)'.format(watched_pr.github_pr_number)
)
self.assertEqual(instance.edx_platform_commit, '6' * 40)
def test_update_unauthenticated(self, mock_consul):
"""
POST /pr_watch/:id/update_instance/ - Denied to anonymous users
"""
forbidden_message = {"detail": "Authentication credentials were not provided."}
# Create a WatchedPullRequest, and OpenEdXInstance:
watched_pr = make_watched_pr_and_instance()
response = self.api_client.post('/api/v1/pr_watch/{pk}/update_instance/'.format(pk=watched_pr.pk))
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(response.data, forbidden_message)
@ddt.data('user1', 'user2')
def test_update_permission_denied(self, username, mock_consul):
"""
POST /pr_watch/:id/update_instance/ - Denied to non instance managers (basic user and staff)
"""
forbidden_message = {"detail": "You do not have permission to perform this action."}
self.api_client.login(username=username, password='pass')
# Create a WatchedPullRequest, and OpenEdXInstance:
watched_pr = make_watched_pr_and_instance()
response = self.api_client.post('/api/v1/pr_watch/{pk}/update_instance/'.format(pk=watched_pr.pk))
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(response.data, forbidden_message)
@patch('pr_watch.github.get_commit_id_from_ref', side_effect=github.ObjectDoesNotExist)
@patch('pr_watch.github.get_pr_by_number')
def test_update_instance_branch_delete(self, mock_get_pr_by_number, mock_get_commit_id_from_ref, mock_consul):
"""
Test what happens when we try to update an instance for a PR whose branch has been
deleted.
Note: Once WatchedPullRequest.update_instance_from_pr() has been refactored so that it
first queries GitHub for PR details (rather than accepting a PR parameter), it can get
the commit ID from the PR details response, rather than using get_branch_tip(), and then
this test won't be necessary since the PR API always contains the commit information
(in ["head"]["sha"]) even if the branch has been deleted.
"""
self.api_client.login(username='user3', password='pass')
watched_pr = make_watched_pr_and_instance()
mock_get_pr_by_number.return_value = PRFactory(number=watched_pr.github_pr_number)
response = self.api_client.post('/api/v1/pr_watch/{pk}/update_instance/'.format(pk=watched_pr.pk))
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {'error': 'Could not fetch updated details from GitHub.'})
| agpl-3.0 | 5,607,447,087,860,703,000 | 46.003968 | 114 | 0.661376 | false |
cauanicastro/smssurveytools | yegsms/settings.py | 1 | 3508 | """
Django settings for yegsms project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import os.path
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*us=$0&+m!=5lh*!%ihzpi_5)tifeep4+24p6_b)#_!$c@+jhu'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_twilio',
'dj_database_url',
'sms',
)
MIDDLEWARE_CLASSES = (
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'yegsms.urls'
WSGI_APPLICATION = 'yegsms.wsgi.application'
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.contrib.messages.context_processors.messages",
)
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
#DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
#}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Parse database configuration from $DATABASE_URL
#DATABASES['default'] = dj_database_url.config()
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'dbiup6i64tv78k',
'USER': 'uosqriiqcifbbi',
'PASSWORD': 'td-3wwLgTE--lNSkKAfNlTM-Q8',
'HOST': 'ec2-107-21-104-188.compute-1.amazonaws.com',
'PORT': '5432',
}
}
#DATABASES = {'default': dj_database_url.config(default=os.environ.get('DATABASE_URL'))}
#DATABASES['default'] =
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Static asset configuration
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
STATIC_ROOT = 'staticfiles'
STATIC_URL = '/static/'
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates').replace('\\','/'),
)
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static').replace('\\','/'),
)
LOGIN_REDIRECT_URL='/dashboard/'
TWILIO_ACCOUNT_SID = 'ACaca92adf5b074dad31e78b705e5a1d30'
TWILIO_AUTH_TOKEN = 'ffb68cdb782d127c3958cfe9bac5d03c'
| apache-2.0 | 7,114,435,093,570,781,000 | 26.40625 | 88 | 0.70268 | false |
sdee/yummly-goulash | app/search/search.py | 1 | 1739 | from flask import render_template, request, redirect, url_for
from wtforms import Form, TextField
from app.search.api import YummlyClient
from app.search.api import SearchResults
from flask_bootstrap import Bootstrap
from flask import Flask
import random
app = Flask(__name__)
Bootstrap(app)
app.config['DEBUG'] = True
app.config.from_envvar('GOULASH_SETTINGS')
class RecipeSearchForm(Form):
recipe = TextField('Recipe')
@app.route('/search/<dish>')
def search_by_dish(dish):
yc = YummlyClient(app.config['API_ID'], app.config['API_KEY'])
core_ingreds, matches = yc.find_consensus(dish)
message = str(', '.join(core_ingreds))
photos = random.sample(matches.photos, 9)
return redirect(url_for('results', message=message, num_matches=matches.num_matches, title=dish))
@app.route('/search', methods=['GET', 'POST'])
def search():
form = RecipeSearchForm(request.form)
if request.method == 'POST':
dish = form.recipe.data
return redirect(url_for('search_by_dish', dish=dish))
examples = [('goulash', url_for('search_by_dish', dish='goulash')),
('beef stroganoff', url_for('search_by_dish', dish='beef stroganoff')),
('caprese salad', url_for('search_by_dish', dish='caprese salad')),
('biscuits and gravy', url_for('search_by_dish', dish='biscuits and gravy')),
('peach cobbler', url_for('search_by_dish', dish='peach cobbler'))]
return render_template('search/search.html', form=form, examples=examples)
@app.route('/results')
def results():
message = request.args['message']
num_matches = request.args['num_matches']
title = request.args['title']
return render_template('search/results.html', message=message, title=title, num_matches=num_matches)
if __name__ == "__main__":
app.run()
| mit | -1,652,940,506,979,181,000 | 36.804348 | 101 | 0.707878 | false |
dieterv/etk.docking | docs/reference/source/conf.py | 1 | 7846 | # -*- coding: utf-8 -*-
# vim:sw=4:et:ai
# etk.docking documentation build configuration file, created by
# sphinx-quickstart on Thu Sep 23 17:46:41 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
import re
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
def get_version():
file = os.path.join(os.path.dirname(__file__), '..', '..', '..', 'lib', 'etk', 'docking', '__init__.py')
return re.compile(r".*__version__ = '(.*?)'", re.S).match(read(file)).group(1)
def autodoc_skip_member(app, what, name, obj, skip, options):
# Ignore gobject virtual function implementations (signal handlers)
if name.startswith('do_') or '.do_' in name:
return True
def setup(app):
app.connect('autodoc-skip-member', autodoc_skip_member)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('../../../lib'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'etk.docking'
copyright = u'2010, etk.docking Contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = get_version()
# The full version, including alpha/beta/rc tags.
release = get_version()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'etkdockingdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'etkdocking.tex', u'etk.docking Documentation',
u'etk.docing Contributors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'etkdocking', u'etk.docking Documentation',
[u'etk.docing Contributors'], 1)
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| lgpl-3.0 | 2,773,754,930,438,436,400 | 31.556017 | 108 | 0.704308 | false |
jskeet/google-api-dotnet-client | ClientGenerator/src/googleapis/codegen/targets.py | 1 | 6986 | #!/usr/bin/python2.7
# Copyright 2011 Google Inc. All Rights Reserved.
"""Targets class describes which languages/platforms we support."""
__author__ = '[email protected] (Will Clarkson)'
import logging
import os
from googleapis.codegen.filesys import files
from googleapis.codegen.utilities import json_expander
from googleapis.codegen.utilities import json_with_comments
class Targets(object):
"""Targets maintains the list of possible target options.
Reads targets.json file in local directory. This file is formatted
as:
{
'languages': {
'languageA': {
'surface_option1': {
'path': 'stable',
'description': 'something about language A',
'displayName': 'SurfaceOption1',
},
'surface_option2': {
'path': 'experimental',
'description': 'something about language A',
'displayName': 'SurfaceOption2',
'platforms': ['cmd-line'],
}
},
'languageB': {
...
}, ...
},
'platforms': {
'cmd-line': {
'displayName': 'Pretty Platform Name'
}
}
}
"""
def __init__(self, targets_path=None, template_root=None, targets_dict=None):
"""Constructor.
Loads targets file.
Args:
targets_path: (str) Path to targets file. Defaults to './targets.json'
template_root: (str) Path to template root. Defaults to '.'
targets_dict: (dict) Initial data, if not supplied from a file.
Raises:
ValueError: if the targets file does not contain the required sections.
"""
self.template_root = template_root or Targets._default_template_root
self.targets_path = targets_path or os.path.join(self.template_root,
'targets.json')
if targets_dict:
self._targets_dict = targets_dict
else:
self._targets_dict = json_with_comments.Loads(
files.GetFileContents(self.targets_path))
# Do some basic validation that this has the required fields
if 'languages' not in self._targets_dict:
raise ValueError('languages not in targets.json')
def Dict(self):
"""The targets.json file as a dictionary."""
return self._targets_dict
def VariationsForLanguage(self, language):
language_def = self._targets_dict['languages'].get(language)
if not language_def:
return None
return Variations(self, language, language_def['variations'])
def GetLanguage(self, language):
return self._targets_dict['languages'][language]
def Languages(self):
return self._targets_dict['languages']
def Platforms(self):
return self._targets_dict.get('platforms', {})
@staticmethod
def SetDefaultTemplateRoot(path):
"""Sets a new default full path to the templates directory.
Args:
path: (str) full path to templates directory.
"""
# This is not a classmethod because we don't want subclasses
# to shadow this value.
logging.info('setting default template root to %s', path)
Targets._default_template_root = path
@staticmethod
def GetDefaultTemplateRoot():
return Targets._default_template_root
# Set the initial template root.
_default_template_root = os.path.join(os.path.dirname(__file__),
'languages')
# Whether to use variation release versions when calculating template paths.
use_versioned_paths = False
@staticmethod
def SetUseVersionedPaths(use_versioned_paths):
"""Sets whether versions are used in the template path."""
# This is not a classmethod because we don't want subclasses
# to shadow this value.
Targets.use_versioned_paths = use_versioned_paths
class Variations(dict):
"""A set of variations available for a particular language."""
def __init__(self, targets, language, variations_dict):
super(Variations, self).__init__(variations_dict)
self._targets = targets
self._language = language
def IsValid(self, variation):
"""Test is a variation exists."""
return variation in self
def _RelativeTemplateDir(self, variation):
"""Returns the path to template dir for the selected variation.
By default, the path is the same as the variation name. It can be
overridden in two ways, of descending precedence:
1. by the 'releaseVersion' element, if use_versioned_paths is set.
2. with an explicit 'path' statement.
Args:
variation: (str) A target variation name.
Returns:
(str) Relative path to template directory.
"""
if self._targets.use_versioned_paths:
path = self[variation].get('releaseVersion') or variation
else:
path = None
if not path:
path = self.get(variation, {}).get('path') or variation
return os.path.join(self._language, path)
def AbsoluteTemplateDir(self, variation):
"""Returns the path to template dir for the selected variation.
Args:
variation: (str) A target variation name.
Returns:
(str) Absolute path to template directory.
"""
return os.path.join(self._targets.template_root,
self._RelativeTemplateDir(variation))
def GetFeaturesForReleaseVersion(self, release_version):
for name in self:
features = self.GetFeatures(name)
if release_version == features.get('releaseVersion'):
return features
return None
def GetFeatures(self, variation):
"""Returns the features dictionary for a specific variation.
This is the basic dictionary informaion plus any specific overrides in
the per-template-tree features.json file.
Args:
variation: (str) A target variation name.
Returns:
(Features) features dictionary
"""
if not variation:
return None
template_dir = self.AbsoluteTemplateDir(variation)
features = Features(template_dir, self.get(variation), variation)
json_path = os.path.join(template_dir, 'features.json')
try:
features_json = files.GetFileContents(json_path)
except files.FileDoesNotExist:
# for backwards compatibility, we forgive this.
# TODO(user): be stricter about this and
# fix/remove any tests that fail as a result.
return features
features.update(json_expander.ExpandJsonTemplate(
json_with_comments.Loads(features_json)))
# If not specified, the releaseVersion matches the variation
if not features.get('releaseVersion'):
features['releaseVersion'] = variation
return features
class Features(dict):
"""A dictionary describing the features of a particular API variation."""
# TODO(user): Do we need initial_content? The only thing we see in it is
# path, which should be set explicitly to the dirname of the real file path.
def __init__(self, template_dir, initial_content=None, name=None):
super(Features, self).__init__(initial_content or {})
self.name = name
self.template_dir = template_dir
if 'path' not in self:
self['path'] = os.path.basename(template_dir)
| apache-2.0 | 420,110,255,415,303,740 | 30.899543 | 79 | 0.6712 | false |
codilime/cloudify-manager | tests/testenv/processes/elastic.py | 1 | 6757 | ########
# Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import logging
import re
import shlex
import subprocess
import time
import sys
import os
import elasticsearch
from cloudify.utils import setup_logger
from testenv.constants import STORAGE_INDEX_NAME
from testenv.constants import LOG_INDICES_PREFIX
logger = setup_logger('elasticsearch_process')
class ElasticSearchProcess(object):
"""
Manages an ElasticSearch server process lifecycle.
"""
def __init__(self):
self._pid = None
self._process = None
setup_logger('elasticsearch',
logging.INFO)
setup_logger('elasticsearch.trace',
logging.INFO)
@staticmethod
def _verify_service_responsiveness(timeout=120):
import urllib2
service_url = "http://localhost:9200"
up = False
deadline = time.time() + timeout
res = None
while time.time() < deadline:
try:
res = urllib2.urlopen(service_url)
up = res.code == 200
break
except BaseException as e:
if e.message:
logger.warning(e.message)
pass
time.sleep(0.5)
if not up:
raise RuntimeError("Elasticsearch service is not responding @ {"
"0} (response: {1})".format(service_url, res))
def _verify_service_started(self, timeout=60):
deadline = time.time() + timeout
while time.time() < deadline:
self._pid = self._get_service_pid()
if self._pid is not None:
break
time.sleep(0.5)
if self._pid is None:
raise RuntimeError("Failed to start elasticsearch service within "
"a {0} seconds timeout".format(timeout))
def _verify_service_ended(self, timeout=10):
pid = self._pid
deadline = time.time() + timeout
while time.time() < deadline:
pid = self._get_service_pid()
if pid is None:
break
time.sleep(0.5)
if pid is not None:
raise RuntimeError("Failed to stop elasticsearch service within "
"a {0} seconds timeout".format(timeout))
@staticmethod
def _get_service_pid():
from subprocess import CalledProcessError
pattern = "\w*\s*(\d*).*"
try:
output = subprocess.check_output(
"ps -ef | grep elasticsearch | grep -v grep", shell=True)
match = re.match(pattern, output)
if match:
return int(match.group(1))
except CalledProcessError:
pass
return None
def start(self):
command = 'elasticsearch'
logger.info('Starting elasticsearch service with command {0}'
.format(command))
self._process = subprocess.Popen(shlex.split(command))
self._verify_service_started()
self._verify_service_responsiveness()
logger.info('elasticsearch service started [pid=%s]', self._pid)
self.reset_data()
def close(self):
if self._pid:
logger.info('Shutting down elasticsearch service [pid=%s]',
self._pid)
os.system('kill {0}'.format(self._pid))
self._verify_service_ended()
def reset_data(self):
self._remove_index_if_exists()
self._create_schema()
@staticmethod
def remove_log_indices():
es = elasticsearch.Elasticsearch()
from elasticsearch.client import IndicesClient
es_index = IndicesClient(es)
log_index_pattern = '{0}*'.format(LOG_INDICES_PREFIX)
if es_index.exists(log_index_pattern):
logger.info(
"Elasticsearch indices '{0}' already exist and "
"will be deleted".format(log_index_pattern))
try:
es_index.delete(log_index_pattern)
logger.info('Verifying Elasticsearch index was deleted...')
deadline = time.time() + 45
while es_index.exists(log_index_pattern):
if time.time() > deadline:
raise RuntimeError(
'Elasticsearch index was not deleted after '
'30 seconds')
time.sleep(0.5)
except BaseException as e:
logger.warn('Ignoring caught exception on Elasticsearch delete'
' index - {0}: {1}'.format(e.__class__, e.message))
@staticmethod
def _remove_index_if_exists():
es = elasticsearch.Elasticsearch()
from elasticsearch.client import IndicesClient
es_index = IndicesClient(es)
if es_index.exists(STORAGE_INDEX_NAME):
logger.info(
"Elasticsearch index '{0}' already exists and "
"will be deleted".format(STORAGE_INDEX_NAME))
try:
es_index.delete(STORAGE_INDEX_NAME)
logger.info('Verifying Elasticsearch index was deleted...')
deadline = time.time() + 45
while es_index.exists(STORAGE_INDEX_NAME):
if time.time() > deadline:
raise RuntimeError(
'Elasticsearch index was not deleted after '
'30 seconds')
time.sleep(0.5)
except BaseException as e:
logger.warn('Ignoring caught exception on Elasticsearch delete'
' index - {0}: {1}'.format(e.__class__, e.message))
@staticmethod
def _create_schema():
from testenv import es_schema_creator
creator_script_path = es_schema_creator.__file__
cmd = '{0} {1}'.format(sys.executable, creator_script_path)
status = os.system(cmd)
if status != 0:
raise RuntimeError(
'Elasticsearch create schema exited with {0}'.format(status))
logger.info("Elasticsearch schema created successfully")
| apache-2.0 | 3,925,448,571,460,775,000 | 36.331492 | 79 | 0.566672 | false |
gurneyalex/odoo | addons/crm/tests/test_crm_pls.py | 4 | 5565 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from datetime import timedelta
from odoo import fields, tools
from odoo.tests.common import TransactionCase
class TestCRMPLS(TransactionCase):
def _get_lead_values(self, team_id, name_suffix, country_id, state_id, email_state, phone_state, source_id, stage_id):
return {
'name': 'lead_' + name_suffix,
'type': 'opportunity',
'state_id': state_id,
'email_state': email_state,
'phone_state': phone_state,
'source_id': source_id,
'stage_id': stage_id,
'country_id': country_id,
'team_id': team_id
}
def test_predictive_lead_scoring(self):
""" We test here computation of lead probability based on PLS Bayes.
We will use 3 different values for each possible variables:
country_id : 1,2,3
state_id: 1,2,3
email_state: correct, incorrect, None
phone_state: correct, incorrect, None
source_id: 1,2,3
stage_id: 1,2,3 + the won stage
And we will compute all of this for 2 different team_id
Note : We assume here that original bayes computation is correct
as we don't compute manually the probabilities."""
Lead = self.env['crm.lead']
state_values = ['correct', 'incorrect', None]
source_ids = self.env['utm.source'].search([], limit=3).ids
state_ids = self.env['res.country.state'].search([], limit=3).ids
country_ids = self.env['res.country'].search([], limit=3).ids
stage_ids = self.env['crm.stage'].search([], limit=3).ids
team_ids = self.env['crm.team'].create([{'name': 'Team Test 1'}, {'name': 'Team Test 2'}]).ids
# create bunch of lost and won crm_lead
leads_to_create = []
# for team 1
for i in range(3):
leads_to_create.append(self._get_lead_values(team_ids[0], 'team_1_%s' % str(i), country_ids[i], state_ids[i], state_values[i], state_values[i], source_ids[i], stage_ids[i]))
leads_to_create.append(
self._get_lead_values(team_ids[0], 'team_1_%s' % str(3), country_ids[0], state_ids[1], state_values[2], state_values[0], source_ids[2], stage_ids[1]))
# for team 2
leads_to_create.append(
self._get_lead_values(team_ids[1], 'team_2_%s' % str(0), country_ids[0], state_ids[1], state_values[2], state_values[0], source_ids[1], stage_ids[2]))
leads_to_create.append(
self._get_lead_values(team_ids[1], 'team_2_%s' % str(1), country_ids[0], state_ids[1], state_values[0], state_values[1], source_ids[2], stage_ids[1]))
leads_to_create.append(
self._get_lead_values(team_ids[1], 'team_2_%s' % str(2), country_ids[0], state_ids[2], state_values[0], state_values[1], source_ids[2], stage_ids[0]))
leads_to_create.append(
self._get_lead_values(team_ids[1], 'team_2_%s' % str(3), country_ids[0], state_ids[1], state_values[2], state_values[0], source_ids[2], stage_ids[1]))
leads = Lead.create(leads_to_create)
# Set the PLS config
self.env['ir.config_parameter'].sudo().set_param("crm.pls_start_date", "2000-01-01")
self.env['ir.config_parameter'].sudo().set_param("crm.pls_fields", "country_id,state_id,email_state,phone_state,source_id")
# set leads as won and lost
# for Team 1
leads[0].action_set_lost()
leads[1].action_set_lost()
leads[2].action_set_won()
# for Team 2
leads[4].action_set_lost()
leads[5].action_set_lost()
leads[6].action_set_won()
# rebuild frequencies table and recompute automated_probability for all leads.
Lead._cron_update_automated_probabilities()
# As the cron is computing and writing in SQL queries, we need to invalidate the cache
leads.invalidate_cache()
self.assertEquals(tools.float_compare(leads[3].automated_probability, 33.49, 2), 0)
self.assertEquals(tools.float_compare(leads[7].automated_probability, 7.74, 2), 0)
def test_settings_pls_start_date(self):
# We test here that settings never crash due to ill-configured config param 'crm.pls_start_date'
set_param = self.env['ir.config_parameter'].sudo().set_param
str_date_8_days_ago = fields.Date.to_string(fields.Date.today() - timedelta(days=8))
resConfig = self.env['res.config.settings']
set_param("crm.pls_start_date", "2021-10-10")
res_config_new = resConfig.new()
self.assertEqual(fields.Date.to_string(res_config_new.predictive_lead_scoring_start_date),
"2021-10-10", "If config param is a valid date, date in settings it should match with config param")
set_param("crm.pls_start_date", "")
res_config_new = resConfig.new()
self.assertEqual(fields.Date.to_string(res_config_new.predictive_lead_scoring_start_date),
str_date_8_days_ago, "If config param is empty, date in settings should be set to 8 days before today")
set_param("crm.pls_start_date", "One does not simply walk into system parameters to corrupt them")
res_config_new = resConfig.new()
self.assertEqual(fields.Date.to_string(res_config_new.predictive_lead_scoring_start_date),
str_date_8_days_ago, "If config param is not a valid date, date in settings should be set to 8 days before today")
| agpl-3.0 | -9,076,126,193,186,145,000 | 52 | 185 | 0.616532 | false |
shubhdev/edx-platform | lms/envs/aws.py | 1 | 29113 | """
This is the default template for our main set of AWS servers. This does NOT
cover the content machines, which use content.py
Common traits:
* Use memcached, and cache-backed sessions
* Use a MySQL 5.1 database
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=wildcard-import, unused-wildcard-import
# Pylint gets confused by path.py instances, which report themselves as class
# objects. As a result, pylint applies the wrong regex in validating names,
# and throws spurious errors. Therefore, we disable invalid-name checking.
# pylint: disable=invalid-name
import json
from .common import *
from openedx.core.lib.logsettings import get_logger_config
import os
from path import path
from xmodule.modulestore.modulestore_settings import convert_module_store_setting_if_needed
# SERVICE_VARIANT specifies name of the variant used, which decides what JSON
# configuration files are read during startup.
SERVICE_VARIANT = os.environ.get('SERVICE_VARIANT', None)
# CONFIG_ROOT specifies the directory where the JSON configuration
# files are expected to be found. If not specified, use the project
# directory.
CONFIG_ROOT = path(os.environ.get('CONFIG_ROOT', ENV_ROOT))
# CONFIG_PREFIX specifies the prefix of the JSON configuration files,
# based on the service variant. If no variant is use, don't use a
# prefix.
CONFIG_PREFIX = SERVICE_VARIANT + "." if SERVICE_VARIANT else ""
################################ ALWAYS THE SAME ##############################
DEBUG = False
TEMPLATE_DEBUG = False
EMAIL_BACKEND = 'django_ses.SESBackend'
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
# IMPORTANT: With this enabled, the server must always be behind a proxy that
# strips the header HTTP_X_FORWARDED_PROTO from client requests. Otherwise,
# a user can fool our server into thinking it was an https connection.
# See
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
# for other warnings.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
###################################### CELERY ################################
# Don't use a connection pool, since connections are dropped by ELB.
BROKER_POOL_LIMIT = 0
BROKER_CONNECTION_TIMEOUT = 1
# For the Result Store, use the django cache named 'celery'
CELERY_RESULT_BACKEND = 'djcelery.backends.cache:CacheBackend'
# When the broker is behind an ELB, use a heartbeat to refresh the
# connection and to detect if it has been dropped.
BROKER_HEARTBEAT = 10.0
BROKER_HEARTBEAT_CHECKRATE = 2
# Each worker should only fetch one message at a time
CELERYD_PREFETCH_MULTIPLIER = 1
# Skip djcelery migrations, since we don't use the database as the broker
SOUTH_MIGRATION_MODULES = {
'djcelery': 'ignore',
}
# Rename the exchange and queues for each variant
QUEUE_VARIANT = CONFIG_PREFIX.lower()
CELERY_DEFAULT_EXCHANGE = 'edx.{0}core'.format(QUEUE_VARIANT)
HIGH_PRIORITY_QUEUE = 'edx.{0}core.high'.format(QUEUE_VARIANT)
DEFAULT_PRIORITY_QUEUE = 'edx.{0}core.default'.format(QUEUE_VARIANT)
LOW_PRIORITY_QUEUE = 'edx.{0}core.low'.format(QUEUE_VARIANT)
HIGH_MEM_QUEUE = 'edx.{0}core.high_mem'.format(QUEUE_VARIANT)
CELERY_DEFAULT_QUEUE = DEFAULT_PRIORITY_QUEUE
CELERY_DEFAULT_ROUTING_KEY = DEFAULT_PRIORITY_QUEUE
CELERY_QUEUES = {
HIGH_PRIORITY_QUEUE: {},
LOW_PRIORITY_QUEUE: {},
DEFAULT_PRIORITY_QUEUE: {},
HIGH_MEM_QUEUE: {},
}
# If we're a worker on the high_mem queue, set ourselves to die after processing
# one request to avoid having memory leaks take down the worker server. This env
# var is set in /etc/init/edx-workers.conf -- this should probably be replaced
# with some celery API call to see what queue we started listening to, but I
# don't know what that call is or if it's active at this point in the code.
if os.environ.get('QUEUE') == 'high_mem':
CELERYD_MAX_TASKS_PER_CHILD = 1
########################## NON-SECURE ENV CONFIG ##############################
# Things like server locations, ports, etc.
with open(CONFIG_ROOT / CONFIG_PREFIX + "env.json") as env_file:
ENV_TOKENS = json.load(env_file)
# STATIC_ROOT specifies the directory where static files are
# collected
STATIC_ROOT_BASE = ENV_TOKENS.get('STATIC_ROOT_BASE', None)
if STATIC_ROOT_BASE:
STATIC_ROOT = path(STATIC_ROOT_BASE)
# STATIC_URL_BASE specifies the base url to use for static files
STATIC_URL_BASE = ENV_TOKENS.get('STATIC_URL_BASE', None)
if STATIC_URL_BASE:
# collectstatic will fail if STATIC_URL is a unicode string
STATIC_URL = STATIC_URL_BASE.encode('ascii')
if not STATIC_URL.endswith("/"):
STATIC_URL += "/"
# MEDIA_ROOT specifies the directory where user-uploaded files are stored.
MEDIA_ROOT = ENV_TOKENS.get('MEDIA_ROOT', MEDIA_ROOT)
MEDIA_URL = ENV_TOKENS.get('MEDIA_URL', MEDIA_URL)
PLATFORM_NAME = ENV_TOKENS.get('PLATFORM_NAME', PLATFORM_NAME)
# For displaying on the receipt. At Stanford PLATFORM_NAME != MERCHANT_NAME, but PLATFORM_NAME is a fine default
PLATFORM_TWITTER_ACCOUNT = ENV_TOKENS.get('PLATFORM_TWITTER_ACCOUNT', PLATFORM_TWITTER_ACCOUNT)
PLATFORM_FACEBOOK_ACCOUNT = ENV_TOKENS.get('PLATFORM_FACEBOOK_ACCOUNT', PLATFORM_FACEBOOK_ACCOUNT)
# Social media links for the page footer
SOCIAL_MEDIA_FOOTER_URLS = ENV_TOKENS.get('SOCIAL_MEDIA_FOOTER_URLS', SOCIAL_MEDIA_FOOTER_URLS)
CC_MERCHANT_NAME = ENV_TOKENS.get('CC_MERCHANT_NAME', PLATFORM_NAME)
EMAIL_BACKEND = ENV_TOKENS.get('EMAIL_BACKEND', EMAIL_BACKEND)
EMAIL_FILE_PATH = ENV_TOKENS.get('EMAIL_FILE_PATH', None)
EMAIL_HOST = ENV_TOKENS.get('EMAIL_HOST', 'localhost') # django default is localhost
EMAIL_PORT = ENV_TOKENS.get('EMAIL_PORT', 25) # django default is 25
EMAIL_USE_TLS = ENV_TOKENS.get('EMAIL_USE_TLS', False) # django default is False
SITE_NAME = ENV_TOKENS['SITE_NAME']
HTTPS = ENV_TOKENS.get('HTTPS', HTTPS)
SESSION_ENGINE = ENV_TOKENS.get('SESSION_ENGINE', SESSION_ENGINE)
SESSION_COOKIE_DOMAIN = ENV_TOKENS.get('SESSION_COOKIE_DOMAIN')
SESSION_COOKIE_HTTPONLY = ENV_TOKENS.get('SESSION_COOKIE_HTTPONLY', True)
REGISTRATION_EXTRA_FIELDS = ENV_TOKENS.get('REGISTRATION_EXTRA_FIELDS', REGISTRATION_EXTRA_FIELDS)
SESSION_COOKIE_SECURE = ENV_TOKENS.get('SESSION_COOKIE_SECURE', SESSION_COOKIE_SECURE)
CMS_BASE = ENV_TOKENS.get('CMS_BASE', 'studio.edx.org')
# allow for environments to specify what cookie name our login subsystem should use
# this is to fix a bug regarding simultaneous logins between edx.org and edge.edx.org which can
# happen with some browsers (e.g. Firefox)
if ENV_TOKENS.get('SESSION_COOKIE_NAME', None):
# NOTE, there's a bug in Django (http://bugs.python.org/issue18012) which necessitates this being a str()
SESSION_COOKIE_NAME = str(ENV_TOKENS.get('SESSION_COOKIE_NAME'))
BOOK_URL = ENV_TOKENS['BOOK_URL']
MEDIA_URL = ENV_TOKENS['MEDIA_URL']
LOG_DIR = ENV_TOKENS['LOG_DIR']
CACHES = ENV_TOKENS['CACHES']
# Cache used for location mapping -- called many times with the same key/value
# in a given request.
if 'loc_cache' not in CACHES:
CACHES['loc_cache'] = {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'edx_location_mem_cache',
}
# Email overrides
DEFAULT_FROM_EMAIL = ENV_TOKENS.get('DEFAULT_FROM_EMAIL', DEFAULT_FROM_EMAIL)
DEFAULT_FEEDBACK_EMAIL = ENV_TOKENS.get('DEFAULT_FEEDBACK_EMAIL', DEFAULT_FEEDBACK_EMAIL)
ADMINS = ENV_TOKENS.get('ADMINS', ADMINS)
SERVER_EMAIL = ENV_TOKENS.get('SERVER_EMAIL', SERVER_EMAIL)
TECH_SUPPORT_EMAIL = ENV_TOKENS.get('TECH_SUPPORT_EMAIL', TECH_SUPPORT_EMAIL)
CONTACT_EMAIL = ENV_TOKENS.get('CONTACT_EMAIL', CONTACT_EMAIL)
BUGS_EMAIL = ENV_TOKENS.get('BUGS_EMAIL', BUGS_EMAIL)
PAYMENT_SUPPORT_EMAIL = ENV_TOKENS.get('PAYMENT_SUPPORT_EMAIL', PAYMENT_SUPPORT_EMAIL)
FINANCE_EMAIL = ENV_TOKENS.get('FINANCE_EMAIL', FINANCE_EMAIL)
UNIVERSITY_EMAIL = ENV_TOKENS.get('UNIVERSITY_EMAIL', UNIVERSITY_EMAIL)
PRESS_EMAIL = ENV_TOKENS.get('PRESS_EMAIL', PRESS_EMAIL)
# Currency
PAID_COURSE_REGISTRATION_CURRENCY = ENV_TOKENS.get('PAID_COURSE_REGISTRATION_CURRENCY',
PAID_COURSE_REGISTRATION_CURRENCY)
# Payment Report Settings
PAYMENT_REPORT_GENERATOR_GROUP = ENV_TOKENS.get('PAYMENT_REPORT_GENERATOR_GROUP', PAYMENT_REPORT_GENERATOR_GROUP)
# Bulk Email overrides
BULK_EMAIL_DEFAULT_FROM_EMAIL = ENV_TOKENS.get('BULK_EMAIL_DEFAULT_FROM_EMAIL', BULK_EMAIL_DEFAULT_FROM_EMAIL)
BULK_EMAIL_EMAILS_PER_TASK = ENV_TOKENS.get('BULK_EMAIL_EMAILS_PER_TASK', BULK_EMAIL_EMAILS_PER_TASK)
BULK_EMAIL_DEFAULT_RETRY_DELAY = ENV_TOKENS.get('BULK_EMAIL_DEFAULT_RETRY_DELAY', BULK_EMAIL_DEFAULT_RETRY_DELAY)
BULK_EMAIL_MAX_RETRIES = ENV_TOKENS.get('BULK_EMAIL_MAX_RETRIES', BULK_EMAIL_MAX_RETRIES)
BULK_EMAIL_INFINITE_RETRY_CAP = ENV_TOKENS.get('BULK_EMAIL_INFINITE_RETRY_CAP', BULK_EMAIL_INFINITE_RETRY_CAP)
BULK_EMAIL_LOG_SENT_EMAILS = ENV_TOKENS.get('BULK_EMAIL_LOG_SENT_EMAILS', BULK_EMAIL_LOG_SENT_EMAILS)
BULK_EMAIL_RETRY_DELAY_BETWEEN_SENDS = ENV_TOKENS.get('BULK_EMAIL_RETRY_DELAY_BETWEEN_SENDS', BULK_EMAIL_RETRY_DELAY_BETWEEN_SENDS)
# We want Bulk Email running on the high-priority queue, so we define the
# routing key that points to it. At the moment, the name is the same.
# We have to reset the value here, since we have changed the value of the queue name.
BULK_EMAIL_ROUTING_KEY = HIGH_PRIORITY_QUEUE
# We can run smaller jobs on the low priority queue. See note above for why
# we have to reset the value here.
BULK_EMAIL_ROUTING_KEY_SMALL_JOBS = LOW_PRIORITY_QUEUE
# Theme overrides
THEME_NAME = ENV_TOKENS.get('THEME_NAME', None)
# Marketing link overrides
MKTG_URL_LINK_MAP.update(ENV_TOKENS.get('MKTG_URL_LINK_MAP', {}))
# Mobile store URL overrides
MOBILE_STORE_URLS = ENV_TOKENS.get('MOBILE_STORE_URLS', MOBILE_STORE_URLS)
# Timezone overrides
TIME_ZONE = ENV_TOKENS.get('TIME_ZONE', TIME_ZONE)
# Translation overrides
LANGUAGES = ENV_TOKENS.get('LANGUAGES', LANGUAGES)
LANGUAGE_DICT = dict(LANGUAGES)
LANGUAGE_CODE = ENV_TOKENS.get('LANGUAGE_CODE', LANGUAGE_CODE)
USE_I18N = ENV_TOKENS.get('USE_I18N', USE_I18N)
# Additional installed apps
for app in ENV_TOKENS.get('ADDL_INSTALLED_APPS', []):
INSTALLED_APPS += (app,)
ENV_FEATURES = ENV_TOKENS.get('FEATURES', ENV_TOKENS.get('MITX_FEATURES', {}))
for feature, value in ENV_FEATURES.items():
FEATURES[feature] = value
WIKI_ENABLED = ENV_TOKENS.get('WIKI_ENABLED', WIKI_ENABLED)
local_loglevel = ENV_TOKENS.get('LOCAL_LOGLEVEL', 'INFO')
LOGGING = get_logger_config(LOG_DIR,
logging_env=ENV_TOKENS['LOGGING_ENV'],
local_loglevel=local_loglevel,
debug=False,
service_variant=SERVICE_VARIANT)
COURSE_LISTINGS = ENV_TOKENS.get('COURSE_LISTINGS', {})
SUBDOMAIN_BRANDING = ENV_TOKENS.get('SUBDOMAIN_BRANDING', {})
VIRTUAL_UNIVERSITIES = ENV_TOKENS.get('VIRTUAL_UNIVERSITIES', [])
META_UNIVERSITIES = ENV_TOKENS.get('META_UNIVERSITIES', {})
COMMENTS_SERVICE_URL = ENV_TOKENS.get("COMMENTS_SERVICE_URL", '')
COMMENTS_SERVICE_KEY = ENV_TOKENS.get("COMMENTS_SERVICE_KEY", '')
CERT_QUEUE = ENV_TOKENS.get("CERT_QUEUE", 'test-pull')
ZENDESK_URL = ENV_TOKENS.get("ZENDESK_URL")
FEEDBACK_SUBMISSION_EMAIL = ENV_TOKENS.get("FEEDBACK_SUBMISSION_EMAIL")
MKTG_URLS = ENV_TOKENS.get('MKTG_URLS', MKTG_URLS)
# Badgr API
BADGR_API_TOKEN = ENV_TOKENS.get('BADGR_API_TOKEN', BADGR_API_TOKEN)
BADGR_BASE_URL = ENV_TOKENS.get('BADGR_BASE_URL', BADGR_BASE_URL)
BADGR_ISSUER_SLUG = ENV_TOKENS.get('BADGR_ISSUER_SLUG', BADGR_ISSUER_SLUG)
# git repo loading environment
GIT_REPO_DIR = ENV_TOKENS.get('GIT_REPO_DIR', '/edx/var/edxapp/course_repos')
GIT_IMPORT_STATIC = ENV_TOKENS.get('GIT_IMPORT_STATIC', True)
for name, value in ENV_TOKENS.get("CODE_JAIL", {}).items():
oldvalue = CODE_JAIL.get(name)
if isinstance(oldvalue, dict):
for subname, subvalue in value.items():
oldvalue[subname] = subvalue
else:
CODE_JAIL[name] = value
COURSES_WITH_UNSAFE_CODE = ENV_TOKENS.get("COURSES_WITH_UNSAFE_CODE", [])
ASSET_IGNORE_REGEX = ENV_TOKENS.get('ASSET_IGNORE_REGEX', ASSET_IGNORE_REGEX)
# Event Tracking
if "TRACKING_IGNORE_URL_PATTERNS" in ENV_TOKENS:
TRACKING_IGNORE_URL_PATTERNS = ENV_TOKENS.get("TRACKING_IGNORE_URL_PATTERNS")
# SSL external authentication settings
SSL_AUTH_EMAIL_DOMAIN = ENV_TOKENS.get("SSL_AUTH_EMAIL_DOMAIN", "MIT.EDU")
SSL_AUTH_DN_FORMAT_STRING = ENV_TOKENS.get("SSL_AUTH_DN_FORMAT_STRING",
"/C=US/ST=Massachusetts/O=Massachusetts Institute of Technology/OU=Client CA v1/CN={0}/emailAddress={1}")
# Django CAS external authentication settings
CAS_EXTRA_LOGIN_PARAMS = ENV_TOKENS.get("CAS_EXTRA_LOGIN_PARAMS", None)
if FEATURES.get('AUTH_USE_CAS'):
CAS_SERVER_URL = ENV_TOKENS.get("CAS_SERVER_URL", None)
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'django_cas.backends.CASBackend',
)
INSTALLED_APPS += ('django_cas',)
MIDDLEWARE_CLASSES += ('django_cas.middleware.CASMiddleware',)
CAS_ATTRIBUTE_CALLBACK = ENV_TOKENS.get('CAS_ATTRIBUTE_CALLBACK', None)
if CAS_ATTRIBUTE_CALLBACK:
import importlib
CAS_USER_DETAILS_RESOLVER = getattr(
importlib.import_module(CAS_ATTRIBUTE_CALLBACK['module']),
CAS_ATTRIBUTE_CALLBACK['function']
)
# Video Caching. Pairing country codes with CDN URLs.
# Example: {'CN': 'http://api.xuetangx.com/edx/video?s3_url='}
VIDEO_CDN_URL = ENV_TOKENS.get('VIDEO_CDN_URL', {})
# Branded footer
FOOTER_OPENEDX_URL = ENV_TOKENS.get('FOOTER_OPENEDX_URL', FOOTER_OPENEDX_URL)
FOOTER_OPENEDX_LOGO_IMAGE = ENV_TOKENS.get('FOOTER_OPENEDX_LOGO_IMAGE', FOOTER_OPENEDX_LOGO_IMAGE)
FOOTER_ORGANIZATION_IMAGE = ENV_TOKENS.get('FOOTER_ORGANIZATION_IMAGE', FOOTER_ORGANIZATION_IMAGE)
FOOTER_CACHE_TIMEOUT = ENV_TOKENS.get('FOOTER_CACHE_TIMEOUT', FOOTER_CACHE_TIMEOUT)
FOOTER_BROWSER_CACHE_MAX_AGE = ENV_TOKENS.get('FOOTER_BROWSER_CACHE_MAX_AGE', FOOTER_BROWSER_CACHE_MAX_AGE)
############# CORS headers for cross-domain requests #################
if FEATURES.get('ENABLE_CORS_HEADERS') or FEATURES.get('ENABLE_CROSS_DOMAIN_CSRF_COOKIE'):
CORS_ALLOW_CREDENTIALS = True
CORS_ORIGIN_WHITELIST = ENV_TOKENS.get('CORS_ORIGIN_WHITELIST', ())
CORS_ORIGIN_ALLOW_ALL = ENV_TOKENS.get('CORS_ORIGIN_ALLOW_ALL', False)
CORS_ALLOW_INSECURE = ENV_TOKENS.get('CORS_ALLOW_INSECURE', False)
# If setting a cross-domain cookie, it's really important to choose
# a name for the cookie that is DIFFERENT than the cookies used
# by each subdomain. For example, suppose the applications
# at these subdomains are configured to use the following cookie names:
#
# 1) foo.example.com --> "csrftoken"
# 2) baz.example.com --> "csrftoken"
# 3) bar.example.com --> "csrftoken"
#
# For the cross-domain version of the CSRF cookie, you need to choose
# a name DIFFERENT than "csrftoken"; otherwise, the new token configured
# for ".example.com" could conflict with the other cookies,
# non-deterministically causing 403 responses.
#
# Because of the way Django stores cookies, the cookie name MUST
# be a `str`, not unicode. Otherwise there will `TypeError`s will be raised
# when Django tries to call the unicode `translate()` method with the wrong
# number of parameters.
CROSS_DOMAIN_CSRF_COOKIE_NAME = str(ENV_TOKENS.get('CROSS_DOMAIN_CSRF_COOKIE_NAME'))
# When setting the domain for the "cross-domain" version of the CSRF
# cookie, you should choose something like: ".example.com"
# (note the leading dot), where both the referer and the host
# are subdomains of "example.com".
#
# Browser security rules require that
# the cookie domain matches the domain of the server; otherwise
# the cookie won't get set. And once the cookie gets set, the client
# needs to be on a domain that matches the cookie domain, otherwise
# the client won't be able to read the cookie.
CROSS_DOMAIN_CSRF_COOKIE_DOMAIN = ENV_TOKENS.get('CROSS_DOMAIN_CSRF_COOKIE_DOMAIN')
# Field overrides. To use the IDDE feature, add
# 'courseware.student_field_overrides.IndividualStudentOverrideProvider'.
FIELD_OVERRIDE_PROVIDERS = tuple(ENV_TOKENS.get('FIELD_OVERRIDE_PROVIDERS', []))
############################## SECURE AUTH ITEMS ###############
# Secret things: passwords, access keys, etc.
with open(CONFIG_ROOT / CONFIG_PREFIX + "auth.json") as auth_file:
AUTH_TOKENS = json.load(auth_file)
############### XBlock filesystem field config ##########
if 'DJFS' in AUTH_TOKENS and AUTH_TOKENS['DJFS'] is not None:
DJFS = AUTH_TOKENS['DJFS']
############### Module Store Items ##########
HOSTNAME_MODULESTORE_DEFAULT_MAPPINGS = ENV_TOKENS.get('HOSTNAME_MODULESTORE_DEFAULT_MAPPINGS', {})
############### Mixed Related(Secure/Not-Secure) Items ##########
# If Segment.io key specified, load it and enable Segment.io if the feature flag is set
SEGMENT_IO_LMS_KEY = AUTH_TOKENS.get('SEGMENT_IO_LMS_KEY')
if SEGMENT_IO_LMS_KEY:
FEATURES['SEGMENT_IO_LMS'] = ENV_TOKENS.get('SEGMENT_IO_LMS', False)
CC_PROCESSOR_NAME = AUTH_TOKENS.get('CC_PROCESSOR_NAME', CC_PROCESSOR_NAME)
CC_PROCESSOR = AUTH_TOKENS.get('CC_PROCESSOR', CC_PROCESSOR)
SECRET_KEY = AUTH_TOKENS['SECRET_KEY']
AWS_ACCESS_KEY_ID = AUTH_TOKENS["AWS_ACCESS_KEY_ID"]
if AWS_ACCESS_KEY_ID == "":
AWS_ACCESS_KEY_ID = None
AWS_SECRET_ACCESS_KEY = AUTH_TOKENS["AWS_SECRET_ACCESS_KEY"]
if AWS_SECRET_ACCESS_KEY == "":
AWS_SECRET_ACCESS_KEY = None
AWS_STORAGE_BUCKET_NAME = AUTH_TOKENS.get('AWS_STORAGE_BUCKET_NAME', 'edxuploads')
if AUTH_TOKENS.get('DEFAULT_FILE_STORAGE'):
DEFAULT_FILE_STORAGE = AUTH_TOKENS.get('DEFAULT_FILE_STORAGE')
elif AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY:
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
else:
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
# Specific setting for the File Upload Service to store media in a bucket.
FILE_UPLOAD_STORAGE_BUCKET_NAME = ENV_TOKENS.get('FILE_UPLOAD_STORAGE_BUCKET_NAME', FILE_UPLOAD_STORAGE_BUCKET_NAME)
FILE_UPLOAD_STORAGE_PREFIX = ENV_TOKENS.get('FILE_UPLOAD_STORAGE_PREFIX', FILE_UPLOAD_STORAGE_PREFIX)
# If there is a database called 'read_replica', you can use the use_read_replica_if_available
# function in util/query.py, which is useful for very large database reads
DATABASES = AUTH_TOKENS['DATABASES']
XQUEUE_INTERFACE = AUTH_TOKENS['XQUEUE_INTERFACE']
# Get the MODULESTORE from auth.json, but if it doesn't exist,
# use the one from common.py
MODULESTORE = convert_module_store_setting_if_needed(AUTH_TOKENS.get('MODULESTORE', MODULESTORE))
CONTENTSTORE = AUTH_TOKENS.get('CONTENTSTORE', CONTENTSTORE)
DOC_STORE_CONFIG = AUTH_TOKENS.get('DOC_STORE_CONFIG', DOC_STORE_CONFIG)
MONGODB_LOG = AUTH_TOKENS.get('MONGODB_LOG', {})
OPEN_ENDED_GRADING_INTERFACE = AUTH_TOKENS.get('OPEN_ENDED_GRADING_INTERFACE',
OPEN_ENDED_GRADING_INTERFACE)
EMAIL_HOST_USER = AUTH_TOKENS.get('EMAIL_HOST_USER', '') # django default is ''
EMAIL_HOST_PASSWORD = AUTH_TOKENS.get('EMAIL_HOST_PASSWORD', '') # django default is ''
# Datadog for events!
DATADOG = AUTH_TOKENS.get("DATADOG", {})
DATADOG.update(ENV_TOKENS.get("DATADOG", {}))
# TODO: deprecated (compatibility with previous settings)
if 'DATADOG_API' in AUTH_TOKENS:
DATADOG['api_key'] = AUTH_TOKENS['DATADOG_API']
# Analytics dashboard server
ANALYTICS_SERVER_URL = ENV_TOKENS.get("ANALYTICS_SERVER_URL")
ANALYTICS_API_KEY = AUTH_TOKENS.get("ANALYTICS_API_KEY", "")
# Analytics data source
ANALYTICS_DATA_URL = ENV_TOKENS.get("ANALYTICS_DATA_URL", ANALYTICS_DATA_URL)
ANALYTICS_DATA_TOKEN = AUTH_TOKENS.get("ANALYTICS_DATA_TOKEN", ANALYTICS_DATA_TOKEN)
# Analytics Dashboard
ANALYTICS_DASHBOARD_URL = ENV_TOKENS.get("ANALYTICS_DASHBOARD_URL", ANALYTICS_DASHBOARD_URL)
ANALYTICS_DASHBOARD_NAME = ENV_TOKENS.get("ANALYTICS_DASHBOARD_NAME", PLATFORM_NAME + " Insights")
# Zendesk
ZENDESK_USER = AUTH_TOKENS.get("ZENDESK_USER")
ZENDESK_API_KEY = AUTH_TOKENS.get("ZENDESK_API_KEY")
# API Key for inbound requests from Notifier service
EDX_API_KEY = AUTH_TOKENS.get("EDX_API_KEY")
# Celery Broker
CELERY_BROKER_TRANSPORT = ENV_TOKENS.get("CELERY_BROKER_TRANSPORT", "")
CELERY_BROKER_HOSTNAME = ENV_TOKENS.get("CELERY_BROKER_HOSTNAME", "")
CELERY_BROKER_VHOST = ENV_TOKENS.get("CELERY_BROKER_VHOST", "")
CELERY_BROKER_USER = AUTH_TOKENS.get("CELERY_BROKER_USER", "")
CELERY_BROKER_PASSWORD = AUTH_TOKENS.get("CELERY_BROKER_PASSWORD", "")
BROKER_URL = "{0}://{1}:{2}@{3}/{4}".format(CELERY_BROKER_TRANSPORT,
CELERY_BROKER_USER,
CELERY_BROKER_PASSWORD,
CELERY_BROKER_HOSTNAME,
CELERY_BROKER_VHOST)
# upload limits
STUDENT_FILEUPLOAD_MAX_SIZE = ENV_TOKENS.get("STUDENT_FILEUPLOAD_MAX_SIZE", STUDENT_FILEUPLOAD_MAX_SIZE)
# Event tracking
TRACKING_BACKENDS.update(AUTH_TOKENS.get("TRACKING_BACKENDS", {}))
EVENT_TRACKING_BACKENDS['tracking_logs']['OPTIONS']['backends'].update(AUTH_TOKENS.get("EVENT_TRACKING_BACKENDS", {}))
EVENT_TRACKING_BACKENDS['segmentio']['OPTIONS']['processors'][0]['OPTIONS']['whitelist'].extend(
AUTH_TOKENS.get("EVENT_TRACKING_SEGMENTIO_EMIT_WHITELIST", []))
TRACKING_SEGMENTIO_WEBHOOK_SECRET = AUTH_TOKENS.get(
"TRACKING_SEGMENTIO_WEBHOOK_SECRET",
TRACKING_SEGMENTIO_WEBHOOK_SECRET
)
TRACKING_SEGMENTIO_ALLOWED_TYPES = ENV_TOKENS.get("TRACKING_SEGMENTIO_ALLOWED_TYPES", TRACKING_SEGMENTIO_ALLOWED_TYPES)
TRACKING_SEGMENTIO_DISALLOWED_SUBSTRING_NAMES = ENV_TOKENS.get(
"TRACKING_SEGMENTIO_DISALLOWED_SUBSTRING_NAMES",
TRACKING_SEGMENTIO_DISALLOWED_SUBSTRING_NAMES
)
TRACKING_SEGMENTIO_SOURCE_MAP = ENV_TOKENS.get("TRACKING_SEGMENTIO_SOURCE_MAP", TRACKING_SEGMENTIO_SOURCE_MAP)
# Student identity verification settings
VERIFY_STUDENT = AUTH_TOKENS.get("VERIFY_STUDENT", VERIFY_STUDENT)
# Grades download
GRADES_DOWNLOAD_ROUTING_KEY = HIGH_MEM_QUEUE
GRADES_DOWNLOAD = ENV_TOKENS.get("GRADES_DOWNLOAD", GRADES_DOWNLOAD)
# financial reports
FINANCIAL_REPORTS = ENV_TOKENS.get("FINANCIAL_REPORTS", FINANCIAL_REPORTS)
##### ORA2 ######
# Prefix for uploads of example-based assessment AI classifiers
# This can be used to separate uploads for different environments
# within the same S3 bucket.
ORA2_FILE_PREFIX = ENV_TOKENS.get("ORA2_FILE_PREFIX", ORA2_FILE_PREFIX)
##### ACCOUNT LOCKOUT DEFAULT PARAMETERS #####
MAX_FAILED_LOGIN_ATTEMPTS_ALLOWED = ENV_TOKENS.get("MAX_FAILED_LOGIN_ATTEMPTS_ALLOWED", 5)
MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS = ENV_TOKENS.get("MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS", 15 * 60)
MICROSITE_CONFIGURATION = ENV_TOKENS.get('MICROSITE_CONFIGURATION', {})
MICROSITE_ROOT_DIR = path(ENV_TOKENS.get('MICROSITE_ROOT_DIR', ''))
#### PASSWORD POLICY SETTINGS #####
PASSWORD_MIN_LENGTH = ENV_TOKENS.get("PASSWORD_MIN_LENGTH")
PASSWORD_MAX_LENGTH = ENV_TOKENS.get("PASSWORD_MAX_LENGTH")
PASSWORD_COMPLEXITY = ENV_TOKENS.get("PASSWORD_COMPLEXITY", {})
PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD = ENV_TOKENS.get("PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD")
PASSWORD_DICTIONARY = ENV_TOKENS.get("PASSWORD_DICTIONARY", [])
### INACTIVITY SETTINGS ####
SESSION_INACTIVITY_TIMEOUT_IN_SECONDS = AUTH_TOKENS.get("SESSION_INACTIVITY_TIMEOUT_IN_SECONDS")
##### LMS DEADLINE DISPLAY TIME_ZONE #######
TIME_ZONE_DISPLAYED_FOR_DEADLINES = ENV_TOKENS.get("TIME_ZONE_DISPLAYED_FOR_DEADLINES",
TIME_ZONE_DISPLAYED_FOR_DEADLINES)
##### X-Frame-Options response header settings #####
X_FRAME_OPTIONS = ENV_TOKENS.get('X_FRAME_OPTIONS', X_FRAME_OPTIONS)
##### Third-party auth options ################################################
THIRD_PARTY_AUTH = AUTH_TOKENS.get('THIRD_PARTY_AUTH', THIRD_PARTY_AUTH)
##### OAUTH2 Provider ##############
if FEATURES.get('ENABLE_OAUTH2_PROVIDER'):
OAUTH_OIDC_ISSUER = ENV_TOKENS['OAUTH_OIDC_ISSUER']
OAUTH_ENFORCE_SECURE = ENV_TOKENS.get('OAUTH_ENFORCE_SECURE', True)
OAUTH_ENFORCE_CLIENT_SECURE = ENV_TOKENS.get('OAUTH_ENFORCE_CLIENT_SECURE', True)
##### ADVANCED_SECURITY_CONFIG #####
ADVANCED_SECURITY_CONFIG = ENV_TOKENS.get('ADVANCED_SECURITY_CONFIG', {})
##### GOOGLE ANALYTICS IDS #####
GOOGLE_ANALYTICS_ACCOUNT = AUTH_TOKENS.get('GOOGLE_ANALYTICS_ACCOUNT')
GOOGLE_ANALYTICS_LINKEDIN = AUTH_TOKENS.get('GOOGLE_ANALYTICS_LINKEDIN')
##### OPTIMIZELY PROJECT ID #####
OPTIMIZELY_PROJECT_ID = AUTH_TOKENS.get('OPTIMIZELY_PROJECT_ID', OPTIMIZELY_PROJECT_ID)
#### Course Registration Code length ####
REGISTRATION_CODE_LENGTH = ENV_TOKENS.get('REGISTRATION_CODE_LENGTH', 8)
# REGISTRATION CODES DISPLAY INFORMATION
INVOICE_CORP_ADDRESS = ENV_TOKENS.get('INVOICE_CORP_ADDRESS', INVOICE_CORP_ADDRESS)
INVOICE_PAYMENT_INSTRUCTIONS = ENV_TOKENS.get('INVOICE_PAYMENT_INSTRUCTIONS', INVOICE_PAYMENT_INSTRUCTIONS)
# Which access.py permission names to check;
# We default this to the legacy permission 'see_exists'.
COURSE_CATALOG_VISIBILITY_PERMISSION = ENV_TOKENS.get(
'COURSE_CATALOG_VISIBILITY_PERMISSION',
COURSE_CATALOG_VISIBILITY_PERMISSION
)
COURSE_ABOUT_VISIBILITY_PERMISSION = ENV_TOKENS.get(
'COURSE_ABOUT_VISIBILITY_PERMISSION',
COURSE_ABOUT_VISIBILITY_PERMISSION
)
# Enrollment API Cache Timeout
ENROLLMENT_COURSE_DETAILS_CACHE_TIMEOUT = ENV_TOKENS.get('ENROLLMENT_COURSE_DETAILS_CACHE_TIMEOUT', 60)
# PDF RECEIPT/INVOICE OVERRIDES
PDF_RECEIPT_TAX_ID = ENV_TOKENS.get('PDF_RECEIPT_TAX_ID', PDF_RECEIPT_TAX_ID)
PDF_RECEIPT_FOOTER_TEXT = ENV_TOKENS.get('PDF_RECEIPT_FOOTER_TEXT', PDF_RECEIPT_FOOTER_TEXT)
PDF_RECEIPT_DISCLAIMER_TEXT = ENV_TOKENS.get('PDF_RECEIPT_DISCLAIMER_TEXT', PDF_RECEIPT_DISCLAIMER_TEXT)
PDF_RECEIPT_BILLING_ADDRESS = ENV_TOKENS.get('PDF_RECEIPT_BILLING_ADDRESS', PDF_RECEIPT_BILLING_ADDRESS)
PDF_RECEIPT_TERMS_AND_CONDITIONS = ENV_TOKENS.get('PDF_RECEIPT_TERMS_AND_CONDITIONS', PDF_RECEIPT_TERMS_AND_CONDITIONS)
PDF_RECEIPT_TAX_ID_LABEL = ENV_TOKENS.get('PDF_RECEIPT_TAX_ID_LABEL', PDF_RECEIPT_TAX_ID_LABEL)
PDF_RECEIPT_LOGO_PATH = ENV_TOKENS.get('PDF_RECEIPT_LOGO_PATH', PDF_RECEIPT_LOGO_PATH)
PDF_RECEIPT_COBRAND_LOGO_PATH = ENV_TOKENS.get('PDF_RECEIPT_COBRAND_LOGO_PATH', PDF_RECEIPT_COBRAND_LOGO_PATH)
PDF_RECEIPT_LOGO_HEIGHT_MM = ENV_TOKENS.get('PDF_RECEIPT_LOGO_HEIGHT_MM', PDF_RECEIPT_LOGO_HEIGHT_MM)
PDF_RECEIPT_COBRAND_LOGO_HEIGHT_MM = ENV_TOKENS.get(
'PDF_RECEIPT_COBRAND_LOGO_HEIGHT_MM', PDF_RECEIPT_COBRAND_LOGO_HEIGHT_MM
)
if FEATURES.get('ENABLE_COURSEWARE_SEARCH') or FEATURES.get('ENABLE_DASHBOARD_SEARCH'):
# Use ElasticSearch as the search engine herein
SEARCH_ENGINE = "search.elastic.ElasticSearchEngine"
# Facebook app
FACEBOOK_API_VERSION = AUTH_TOKENS.get("FACEBOOK_API_VERSION")
FACEBOOK_APP_SECRET = AUTH_TOKENS.get("FACEBOOK_APP_SECRET")
FACEBOOK_APP_ID = AUTH_TOKENS.get("FACEBOOK_APP_ID")
XBLOCK_SETTINGS = ENV_TOKENS.get('XBLOCK_SETTINGS', {})
XBLOCK_SETTINGS.setdefault("VideoDescriptor", {})["licensing_enabled"] = FEATURES.get("LICENSING", False)
##### CDN EXPERIMENT/MONITORING FLAGS #####
CDN_VIDEO_URLS = ENV_TOKENS.get('CDN_VIDEO_URLS', CDN_VIDEO_URLS)
ONLOAD_BEACON_SAMPLE_RATE = ENV_TOKENS.get('ONLOAD_BEACON_SAMPLE_RATE', ONLOAD_BEACON_SAMPLE_RATE)
##### ECOMMERCE API CONFIGURATION SETTINGS #####
ECOMMERCE_PUBLIC_URL_ROOT = ENV_TOKENS.get('ECOMMERCE_PUBLIC_URL_ROOT', ECOMMERCE_PUBLIC_URL_ROOT)
ECOMMERCE_API_URL = ENV_TOKENS.get('ECOMMERCE_API_URL', ECOMMERCE_API_URL)
ECOMMERCE_API_SIGNING_KEY = AUTH_TOKENS.get('ECOMMERCE_API_SIGNING_KEY', ECOMMERCE_API_SIGNING_KEY)
ECOMMERCE_API_TIMEOUT = ENV_TOKENS.get('ECOMMERCE_API_TIMEOUT', ECOMMERCE_API_TIMEOUT)
##### Custom Courses for EdX #####
if FEATURES.get('CUSTOM_COURSES_EDX'):
INSTALLED_APPS += ('ccx',)
FIELD_OVERRIDE_PROVIDERS += (
'ccx.overrides.CustomCoursesForEdxOverrideProvider',
)
##### Individual Due Date Extensions #####
if FEATURES.get('INDIVIDUAL_DUE_DATES'):
FIELD_OVERRIDE_PROVIDERS += (
'courseware.student_field_overrides.IndividualStudentOverrideProvider',
)
# PROFILE IMAGE CONFIG
PROFILE_IMAGE_BACKEND = ENV_TOKENS.get('PROFILE_IMAGE_BACKEND', PROFILE_IMAGE_BACKEND)
PROFILE_IMAGE_SECRET_KEY = AUTH_TOKENS.get('PROFILE_IMAGE_SECRET_KEY', PROFILE_IMAGE_SECRET_KEY)
PROFILE_IMAGE_MAX_BYTES = ENV_TOKENS.get('PROFILE_IMAGE_MAX_BYTES', PROFILE_IMAGE_MAX_BYTES)
PROFILE_IMAGE_MIN_BYTES = ENV_TOKENS.get('PROFILE_IMAGE_MIN_BYTES', PROFILE_IMAGE_MIN_BYTES)
if FEATURES['IS_EDX_DOMAIN']:
PROFILE_IMAGE_DEFAULT_FILENAME = 'images/edx-theme/default-profile'
else:
PROFILE_IMAGE_DEFAULT_FILENAME = ENV_TOKENS.get('PROFILE_IMAGE_DEFAULT_FILENAME', PROFILE_IMAGE_DEFAULT_FILENAME)
# EdxNotes config
EDXNOTES_PUBLIC_API = ENV_TOKENS.get('EDXNOTES_PUBLIC_API', EDXNOTES_PUBLIC_API)
EDXNOTES_INTERNAL_API = ENV_TOKENS.get('EDXNOTES_INTERNAL_API', EDXNOTES_INTERNAL_API)
##### Credit Provider Integration #####
CREDIT_PROVIDER_SECRET_KEYS = AUTH_TOKENS.get("CREDIT_PROVIDER_SECRET_KEYS", {})
| agpl-3.0 | 5,623,049,350,839,791,000 | 44.56025 | 148 | 0.722564 | false |
kulawczukmarcin/mypox | mininet_scripts/5sw-2host-proactive_net.py | 1 | 1609 | #!/usr/bin/python
from mininet.net import Mininet
from mininet.node import Controller, RemoteController, Node
from mininet.cli import CLI
from mininet.log import setLogLevel, info
from mininet.link import Link, Intf
"""
THis is from https://haryachyy.wordpress.com/2014/06/14/learning-pox-openflow-controller-proactive-approach/. To save time added
h1.cmd( 'dhclient h1-eth0 ')
h2.cmd( 'dhclient h2-eth0' )
This is staticlly setting IPs. Don't really sure if this works fine (for proactive approch).
!!!POX must started for this to work!!!
"""
def aggNet():
CONTROLLER_IP='192.168.56.1'
net = Mininet( topo=None,
build=False)
net.addController( 'c0',
controller=RemoteController,
ip=CONTROLLER_IP,
port=6633)
h1 = net.addHost( 'h1', ip='0.0.0.0' )
h2 = net.addHost( 'h2', ip='0.0.0.0' )
leftSwitch = net.addSwitch( 's1' )
rightSwitch = net.addSwitch( 's2' )
midSwitch = net.addSwitch( 's3' )
uplSwitch = net.addSwitch( 's4' )
uprSwitch = net.addSwitch( 's5' )
# Add links
net.addLink( h1, leftSwitch )
net.addLink( leftSwitch, midSwitch )
net.addLink( midSwitch, rightSwitch )
net.addLink( rightSwitch, h2 )
net.addLink( rightSwitch, uprSwitch )
net.addLink( leftSwitch, uplSwitch )
net.addLink( uprSwitch, uplSwitch )
net.start()
#print h1.cmd( 'ping -c1', h2.IP() )
h1.cmd( 'dhclient h1-eth0 ')
h2.cmd( 'dhclient h2-eth0' )
CLI( net )
net.stop()
if __name__ == '__main__':
setLogLevel( 'info' )
aggNet()
| apache-2.0 | -7,188,892,072,148,140,000 | 25.816667 | 128 | 0.628341 | false |
polyaxon/polyaxon | sdks/python/http_client/v1/polyaxon_sdk/models/v1_pipeline.py | 1 | 5017 | #!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Polyaxon SDKs and REST API specification.
Polyaxon SDKs and REST API specification. # noqa: E501
The version of the OpenAPI document: 1.10.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from polyaxon_sdk.configuration import Configuration
class V1Pipeline(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'uuid': 'str',
'name': 'str',
'kind': 'V1PipelineKind'
}
attribute_map = {
'uuid': 'uuid',
'name': 'name',
'kind': 'kind'
}
def __init__(self, uuid=None, name=None, kind=None, local_vars_configuration=None): # noqa: E501
"""V1Pipeline - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._uuid = None
self._name = None
self._kind = None
self.discriminator = None
if uuid is not None:
self.uuid = uuid
if name is not None:
self.name = name
if kind is not None:
self.kind = kind
@property
def uuid(self):
"""Gets the uuid of this V1Pipeline. # noqa: E501
:return: The uuid of this V1Pipeline. # noqa: E501
:rtype: str
"""
return self._uuid
@uuid.setter
def uuid(self, uuid):
"""Sets the uuid of this V1Pipeline.
:param uuid: The uuid of this V1Pipeline. # noqa: E501
:type: str
"""
self._uuid = uuid
@property
def name(self):
"""Gets the name of this V1Pipeline. # noqa: E501
:return: The name of this V1Pipeline. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1Pipeline.
:param name: The name of this V1Pipeline. # noqa: E501
:type: str
"""
self._name = name
@property
def kind(self):
"""Gets the kind of this V1Pipeline. # noqa: E501
:return: The kind of this V1Pipeline. # noqa: E501
:rtype: V1PipelineKind
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1Pipeline.
:param kind: The kind of this V1Pipeline. # noqa: E501
:type: V1PipelineKind
"""
self._kind = kind
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1Pipeline):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1Pipeline):
return True
return self.to_dict() != other.to_dict()
| apache-2.0 | 78,043,437,675,736,400 | 25.544974 | 101 | 0.565477 | false |
macosforge/ccs-calendarserver | twistedcaldav/test/test_xml.py | 1 | 5529 | ##
# Copyright (c) 2005-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
import os
from twisted.trial.unittest import SkipTest
from twistedcaldav.ical import Component
import twistedcaldav.test.util
from twistedcaldav.caldavxml import ComponentFilter, PropertyFilter, TextMatch, \
Filter, TimeRange
from txdav.caldav.datastore.query.filter import Filter as storeFilter
from txdav.caldav.datastore.query.filter import ComponentFilter as storeComponentFilter
class XML (twistedcaldav.test.util.TestCase):
"""
XML tests
"""
calendar_file = os.path.join(os.path.dirname(__file__), "data", "Holidays",
"C3184A66-1ED0-11D9-A5E0-000A958A3252.ics")
calendar = Component.fromStream(file(calendar_file))
calendar.validCalendarData()
calendar.validCalendarForCalDAV(methodAllowed=False)
def test_ComponentFilter(self):
"""
Component filter element.
"""
for component_name, has in (
("VEVENT", True),
("VTODO", False),
):
if has:
no = "no "
else:
no = ""
if has != storeComponentFilter(
ComponentFilter(
ComponentFilter(
name=component_name
),
name="VCALENDAR"
)
).match(self.calendar, None):
self.fail("Calendar has %s%s?" % (no, component_name))
def test_PropertyFilter(self):
"""
Property filter element.
"""
for property_name, has in (
("UID", True),
("BOOGER", False),
):
if has:
no = "no "
else:
no = ""
if has != storeComponentFilter(
ComponentFilter(
ComponentFilter(
PropertyFilter(
name=property_name
),
name="VEVENT"
),
name="VCALENDAR"
)
).match(self.calendar, None):
self.fail("Calendar has %sVEVENT with %s?" % (no, property_name))
def test_ParameterFilter(self):
"""
Parameter filter element.
"""
raise SkipTest("test unimplemented")
def test_TextMatch(self):
"""
Text match element.
"""
for uid, caseless, has in (
("C3184A66-1ED0-11D9-A5E0-000A958A3252", False, True),
("c3184a66-1ed0-11d9-a5e0-000a958a3252", True, True),
("BOOGER", False, False),
("BOOGER", True, False),
):
if has:
no = "no "
else:
no = ""
if has != storeComponentFilter(
ComponentFilter(
ComponentFilter(
PropertyFilter(
TextMatch.fromString(uid, caseless=caseless),
name="UID"
),
name="VEVENT"
),
name="VCALENDAR"
)
).match(self.calendar, None):
self.fail("Calendar has %sVEVENT with UID %s? (caseless=%s)" % (no, uid, caseless))
def test_TimeRange(self):
"""
Time range match element.
"""
for start, end, has in (
("20020101T000000Z", "20020101T000001Z", True),
("20020101T000000Z", "20020101T000000Z", True), # Timespan of zero duration
("20020101", "20020101", True), # Timespan of zero duration
("20020101", "20020102", True),
("20020101", "20020103", True),
("20020102", "20020103", False),
("20011201", "20020101", False), # End is non-inclusive
# Expanded recurrence
("20030101T000000Z", "20030101T000001Z", True),
("20030101T000000Z", "20030101T000000Z", True), # Timespan of zero duration
("20030101", "20030101", True), # Timespan of zero duration
("20030101", "20030102", True),
("20030101", "20030103", True),
("20030102", "20030103", False),
("20021201", "20030101", False), # End is non-inclusive
):
if has:
no = "no "
else:
no = ""
if has != storeFilter(
Filter(
ComponentFilter(
ComponentFilter(
TimeRange(start=start, end=end),
name="VEVENT"
),
name="VCALENDAR"
)
)
).match(self.calendar):
self.fail("Calendar has %sVEVENT with timerange %s?" % (no, (start, end)))
test_TimeRange.todo = "recurrence expansion"
| apache-2.0 | -8,531,242,181,165,564,000 | 32.713415 | 99 | 0.505336 | false |
ziplokk1/python-shopify-api | shopify/orders/tests/test_order.py | 1 | 2572 | from unittest import TestCase
class TestOrder(TestCase):
def test_email(self):
self.fail()
def test_closed_at(self):
self.fail()
def test_created_at(self):
self.fail()
def test_updated_at(self):
self.fail()
def test_number(self):
self.fail()
def test_note(self):
self.fail()
def test_token(self):
self.fail()
def test_gateway(self):
self.fail()
def test_test(self):
self.fail()
def test_total_price(self):
self.fail()
def test_subtotal_price(self):
self.fail()
def test_total_weight(self):
self.fail()
def test_total_tax(self):
self.fail()
def test_taxes_included(self):
self.fail()
def test_currency(self):
self.fail()
def test_financial_status(self):
self.fail()
def test_confirmed(self):
self.fail()
def test_total_discounts(self):
self.fail()
def test_total_line_items_price(self):
self.fail()
def test_cart_token(self):
self.fail()
def test_buyer_accepts_marketing(self):
self.fail()
def test_name(self):
self.fail()
def test_referring_site(self):
self.fail()
def test_landing_site(self):
self.fail()
def test_cancelled_at(self):
self.fail()
def test_cancel_reason(self):
self.fail()
def test_total_price_usd(self):
self.fail()
def test_checkout_token(self):
self.fail()
def test_reference(self):
self.fail()
def test_user_id(self):
self.fail()
def test_location_id(self):
self.fail()
def test_source_identifier(self):
self.fail()
def test_source_url(self):
self.fail()
def test_processed_at(self):
self.fail()
def test_device_id(self):
self.fail()
def test_browser_ip(self):
self.fail()
def test_landing_site_ref(self):
self.fail()
def test_order_number(self):
self.fail()
def test_discount_codes(self):
self.fail()
def test_note_attributes(self):
self.fail()
def test_payment_gateway_names(self):
self.fail()
def test_processing_method(self):
self.fail()
def test_checkout_id(self):
self.fail()
def test_source_name(self):
self.fail()
def test_fulfillment_status(self):
self.fail()
def test_tax_lines(self):
self.fail()
def test_tags(self):
self.fail()
| unlicense | -8,013,424,252,688,809,000 | 16.861111 | 43 | 0.559487 | false |
Autoplectic/dit | dit/divergences/pmf.py | 1 | 1169 | """
Provide a common place to access pmf-based divergences.
"""
from .earth_movers_distance import (
earth_movers_distance_pmf as earth_movers_distance,
)
from .jensen_shannon_divergence import (
jensen_shannon_divergence_pmf as jensen_shannon_divergence,
)
from ._kl_nonmerge import (
cross_entropy_pmf as cross_entropy,
relative_entropy_pmf as relative_entropy,
)
from .maximum_correlation import (
maximum_correlation_pmf as maximum_correlation,
conditional_maximum_correlation_pmf as conditional_maximum_correlation,
)
from .variational_distance import (
bhattacharyya_coefficient_pmf as bhattacharyya_coefficient,
chernoff_information_pmf as chernoff_information,
hellinger_distance_pmf as hellinger_distance,
variational_distance_pmf as variational_distance,
)
def jensen_shannon_divergence2(p, q):
"""
Compute the Jensen-Shannon divergence between two pmfs.
Parameters
----------
p : np.ndarray
The first pmf.
q : np.ndarray
The second pmf.
Returns
-------
jsd : float
The Jensen-Shannon divergence.
"""
return jensen_shannon_divergence([p, q]) | bsd-3-clause | -7,598,254,254,028,665,000 | 23.375 | 75 | 0.706587 | false |
puneetgkaur/sugar-toolkit-gtk3 | src/sugar3/graphics/menuitem.py | 1 | 3409 | # Copyright (C) 2007, Eduardo Silva <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
"""
STABLE.
"""
import logging
from gi.repository import GObject
from gi.repository import Gtk
from sugar3.graphics.icon import Icon
from sugar3.graphics import style
class MenuItem(Gtk.ImageMenuItem):
def __init__(self, text_label=None, icon_name=None,
text_maxlen=style.MENU_WIDTH_CHARS, xo_color=None,
file_name=None):
GObject.GObject.__init__(self)
self._accelerator = None
label = Gtk.AccelLabel(label=text_label)
label.set_alignment(0.0, 0.5)
label.set_accel_widget(self)
if text_maxlen > 0:
label.set_ellipsize(style.ELLIPSIZE_MODE_DEFAULT)
label.set_max_width_chars(text_maxlen)
self.add(label)
label.show()
if icon_name is not None:
icon = Icon(icon_name=icon_name,
icon_size=Gtk.IconSize.SMALL_TOOLBAR)
if xo_color is not None:
icon.props.xo_color = xo_color
self.set_image(icon)
icon.show()
elif file_name is not None:
icon = Icon(file=file_name, icon_size=Gtk.IconSize.SMALL_TOOLBAR)
if xo_color is not None:
icon.props.xo_color = xo_color
self.set_image(icon)
icon.show()
self.connect('can-activate-accel', self.__can_activate_accel_cb)
self.connect('hierarchy-changed', self.__hierarchy_changed_cb)
def __hierarchy_changed_cb(self, widget, previous_toplevel):
self._add_accelerator()
def __can_activate_accel_cb(self, widget, signal_id):
# Accept activation via accelerators regardless of this widget's state
return True
def _add_accelerator(self):
if self._accelerator is None or self.get_toplevel() is None:
return
# TODO: should we remove the accelerator from the prev top level?
if not hasattr(self.get_toplevel(), 'sugar_accel_group'):
logging.warning('No Gtk.AccelGroup in the top level window.')
return
accel_group = self.get_toplevel().sugar_accel_group
keyval, mask = Gtk.accelerator_parse(self._accelerator)
self.add_accelerator('activate', accel_group, keyval, mask,
Gtk.AccelFlags.LOCKED | Gtk.AccelFlags.VISIBLE)
def set_accelerator(self, accelerator):
self._accelerator = accelerator
self._add_accelerator()
def get_accelerator(self):
return self._accelerator
accelerator = GObject.property(type=str, setter=set_accelerator,
getter=get_accelerator)
| lgpl-2.1 | -5,601,322,465,322,806,000 | 34.510417 | 78 | 0.645057 | false |
a10networks/acos-client | acos_client/v30/slb/hm.py | 1 | 5639 | # Copyright 2014, Jeff Buttars, A10 Networks.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import unicode_literals
from acos_client import errors as acos_errors
from acos_client.v30 import base
class HealthMonitor(base.BaseV30):
# Valid method objects
UDP = 'udp'
ICMP = 'icmp'
TCP = 'tcp'
HTTP = 'http'
HTTPS = 'https'
url_prefix = "/health/monitor/"
_method_objects = {
ICMP: {
"icmp": 1
},
UDP: {
"udp": 1,
"udp-port": 5550,
"force-up-with-single-healthcheck": 0
},
HTTP: {
"http": 1,
"http-port": 80,
"http-expect": 1,
"http-response-code": "200",
"http-url": 1,
"url-type": "GET",
"url-path": "/",
},
HTTPS: {
"https": 1,
"web-port": 443,
"https-expect": 1,
"https-response-code": "200",
"https-url": 1,
"url-type": "GET",
"url-path": "/",
"disable-sslv2hello": 0
},
TCP: {
"method-tcp": 1,
"tcp-port": 80
},
}
def get(self, name, **kwargs):
return self._get(self.url_prefix + name, **kwargs)
def _set(self, name, mon_method, hm_interval, hm_timeout, hm_max_retries,
method=None, url=None, expect_code=None, port=None, ipv4=None, post_data=None,
**kwargs):
params = {
"monitor": {
"name": name,
"retry": int(hm_max_retries),
"interval": int(hm_interval),
"timeout": int(hm_timeout),
"method": {
mon_method: self._method_objects[mon_method]
},
"override-ipv4": ipv4
}
}
if method:
params['monitor']['method'][mon_method]['url-type'] = method
if url:
params['monitor']['method'][mon_method]['url-path'] = url
if expect_code:
k = "%s-response-code" % mon_method
params['monitor']['method'][mon_method][k] = str(expect_code)
if port:
if mon_method == self.HTTPS:
k = 'web-port'
else:
k = '%s-port' % mon_method
params['monitor']['method'][mon_method][k] = int(port)
params['monitor']['override-port'] = int(port)
# handle POST case for HTTP/HTTPS hm
if ('url-type' in params['monitor']['method'][mon_method] and
'url-path' in params['monitor']['method'][mon_method] and
params['monitor']['method'][mon_method]['url-type'] == "POST"):
if post_data:
params['monitor']['method'][mon_method]['post-type'] = "postdata"
if mon_method == self.HTTPS:
params['monitor']['method'][mon_method]['https-postdata'] = str(post_data)
else:
params['monitor']['method'][mon_method]['http-postdata'] = str(post_data)
postpath = params['monitor']['method'][mon_method]['url-path']
params['monitor']['method'][mon_method]['post-path'] = postpath
params['monitor']['method'][mon_method].pop('url-path', None)
else:
params['monitor']['method'][mon_method].pop('post-type', None)
params['monitor']['method'][mon_method].pop('http-postdata', None)
params['monitor']['method'][mon_method].pop('post-path', None)
return params
def create(self, name, mon_type, hm_interval, hm_timeout, hm_max_retries,
method=None, url=None, expect_code=None, port=None, ipv4=None, post_data=None,
max_retries=None, timeout=None, **kwargs):
try:
self.get(name)
except acos_errors.NotFound:
pass
else:
raise acos_errors.Exists()
params = self._set(name, mon_type, hm_interval, hm_timeout,
hm_max_retries, method, url, expect_code, port, ipv4,
post_data=post_data, **kwargs)
return self._post(self.url_prefix, params, max_retries=max_retries, timeout=timeout,
axapi_args=kwargs)
def update(self, name, mon_type, hm_interval, hm_timeout, hm_max_retries,
method=None, url=None, expect_code=None, port=None, ipv4=None, post_data=None,
max_retries=None, timeout=None, **kwargs):
self.get(name) # We want a NotFound if it does not exist
params = self._set(name, mon_type, hm_interval, hm_timeout,
hm_max_retries, method, url, expect_code, port, ipv4,
post_data=post_data, **kwargs)
return self._post(self.url_prefix + name, params, max_retries=max_retries, timeout=timeout,
axapi_args=kwargs)
def delete(self, name):
return self._delete(self.url_prefix + name)
| apache-2.0 | -3,965,902,010,584,862,700 | 38.159722 | 99 | 0.528817 | false |
ergs/transmutagen | transmutagen/py_solve/tests/test_solve.py | 1 | 2185 | import sys
import os
import numpy as np
import scipy.sparse as sp
import scipy.sparse.linalg as spla
from .. import N, solve, ones, flatten_sparse_matrix, diag_add, dot
import pytest
this_dir = os.path.dirname(__file__)
sys.path.insert(0, os.path.join(this_dir, os.pardir))
DTYPES = ['f8', np.complex128]
@pytest.mark.parametrize('dtype', DTYPES)
def test_solve_identity_ones(dtype):
b = np.ones(N, dtype=dtype)
mat = sp.eye(N, format='csr', dtype=dtype)
obs = solve(mat, b)
exp = spla.spsolve(mat, b)
assert np.allclose(exp, obs)
@pytest.mark.parametrize('dtype', DTYPES)
def test_solve_identity_range(dtype):
b = np.arange(N, dtype=dtype)
mat = sp.eye(N, format='csr', dtype=dtype)
obs = solve(mat, b)
exp = spla.spsolve(mat, b)
assert np.allclose(exp, obs)
@pytest.mark.parametrize('dtype', DTYPES)
def test_solve_ones_ones(dtype):
b = np.ones(N, dtype=dtype)
mat = ones(dtype=dtype) + 9*sp.eye(N, format='csr', dtype=dtype)
obs = solve(mat, b)
exp = spla.spsolve(mat, b)
assert np.allclose(exp, obs)
@pytest.mark.parametrize('dtype', DTYPES)
def test_solve_ones_range(dtype):
b = np.arange(N, dtype=dtype)
mat = ones(dtype=dtype) + 9*sp.eye(N, format='csr', dtype=dtype)
obs = solve(mat, b)
exp = spla.spsolve(mat, b)
assert np.allclose(exp, obs)
@pytest.mark.parametrize('dtype', DTYPES)
def test_solve_range_range(dtype):
b = np.arange(N, dtype=dtype)
mat = ones(dtype=dtype) + sp.diags([b], offsets=[0], shape=(N, N),
format='csr', dtype=dtype)
obs = solve(mat, b)
exp = spla.spsolve(mat, b)
assert np.allclose(exp, obs)
@pytest.mark.parametrize('dtype', DTYPES)
def test_diag_add(dtype):
mat = ones(dtype=dtype)
res = mat + 9*sp.eye(N, format='csr', dtype=dtype)
exp = flatten_sparse_matrix(res)
obs = diag_add(mat, 9.0)
assert np.allclose(exp, obs)
@pytest.mark.parametrize('dtype', DTYPES)
def test_dot(dtype):
x = np.arange(N, dtype=dtype)
mat = ones(dtype=dtype) + 9*sp.eye(N, format='csr', dtype=dtype)
exp = mat.dot(x)
obs = dot(mat, x)
assert np.allclose(exp, obs)
| bsd-3-clause | 1,125,637,393,144,652,900 | 26.658228 | 72 | 0.637986 | false |
drifterza/openstack-ansible-os_prometheus_server | doc/source/conf.py | 1 | 10221 | #!/usr/bin/env python3
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import pbr.version
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'oslosphinx'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
author = 'OpenStack-Ansible Contributors'
category = 'Miscellaneous'
copyright = '2014-2016, OpenStack-Ansible Contributors'
description = 'OpenStack-Ansible deploys OpenStack environments using Ansible.'
project = 'OpenStack-Ansible'
role_name = 'os_prometheus_server'
target_name = 'openstack-ansible-' + role_name
title = 'OpenStack-Ansible Documentation: ' + role_name + 'role'
# The link to the browsable source code (for the left hand menu)
oslosphinx_cgit_link = 'http://git.openstack.org/cgit/openstack/' + target_name
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version_info = pbr.version.VersionInfo(target_name)
# The full version, including alpha/beta/rc tags.
release = version_info.version_string_with_vcs()
# The short X.Y version.
version = version_info.canonical_version_string()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%Y-%m-%d %H:%M'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = target_name + '-docs'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, target_name + '.tex',
title, author, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, target_name,
title, [author], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, target_name,
title, author, project,
description, category),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
| apache-2.0 | -411,856,084,028,370,200 | 32.511475 | 79 | 0.707074 | false |
MMaus/mutils | fmalibs/sde.py | 1 | 13361 | from numpy import (
# array handling
asarray, zeros, zeros_like, empty, newaxis, floating,
# array operations
dot, linspace, mean, std, arange, any,
# core math
pi, nan, sin, cos, log, sqrt, exp, sinh, isnan,
)
from pylab import (
# plotting
plot, figure, clf, subplot, semilogy, axis,
title, xlabel, ylabel, loglog, show,
# math util functions
nansum, randn, diff
)
from pdb import set_trace as BRK
class SDE( object ):
"""Concrete class SDE
Implements a fixed step SDE integrator - the fixed step version of the
R3 scheme from doi:10.1016/j.cam.2006.08.012 (Bastani et.al. 2006),
which is itself taken from:
P.M. Burrage "Runge-Kutta methods for stochastic differential equations"
Ph.D. Thesis, The University of Queensland, Australia, 1999
This integrates an SDE of the Stratonovich form:
dX(t) = f (X(t)) dt + g(X(t)) o dW (t)
X(0) = X0
USAGE:
>>> sde = SDE( diffusion_function, drift_function, noise_dim )
>>> t,y,w = sde( x0, t0, t1, dt )
to integrate from t0 to t1 in steps of dt, with initial condition x0
>>> sde.integrateAt( t, x0, X, W )
to integrate and give trajectory values at times t, starting with
initial condition x0, and using X and W to store the trajectory data.
See the test_XXX functions in this module for usage examples.
(c) Shai Revzen, 2011, U. Penn
"""
# Butcher array for explicit R3 scheme
TAB = asarray([
[0, 0, 0, 0],
[0.5, 0.5, 0, 0],
[0.75, 0, 0.75, 0],
[nan, 2/9.0,1/3.0, 4/9.0]
])
def __init__(self, d, s, sdim, dTab=TAB, sTab=TAB ):
"""
INPUTS:
d -- callable -- diffusion (ODE) part of the equation, mapping x --> dot x
s -- callable -- drift (SDE) part of the equation, mapping x,dW --> dot x
NOTE: this mapping *MUST* be linear in dW for the integration to be
valid. It is in functional form to allow for code optimization.
sdim -- dimension of dW
dTab, sTab -- Butcher tables of the integration scheme.
*WARNING* don't change these unless you really, REALLY, know what
it means. If you do -- make sure to run the tests and validate the
convergent order.
"""
# store Butcher arrays
self.bd = dTab
self.bs = sTab
assert callable(d),"diffusion flow is a callable"
self.d = d
assert callable(s),"stochastic (drift) flow is a callable"
self.s = s
assert int(sdim) >= 0, "dimension of noise >=0"
self.sdim = int(sdim)
def dW( self, t0, t1 ):
"""
Generate stochastic step for time interval t0..t1.
Subclasses may override this term if something other than a
Wiener process is needed.
"""
return randn(self.sdim) * sqrt(t1-t0)
def __call__(self,x0,t0,t1,dt=None):
""" Integrate the SDE
INPUTS:
x0 -- D -- initial condition
t0,t1 -- float -- start and end times
dt -- float (optional) -- time step; default is (t1-t0)/100
OUTPUTS:
t -- N -- time points
y -- N x D -- trajectory at times t
w -- N x self.sdim -- random walk values at times t
"""
if dt is None:
dt = float(t1-t0)/100
return self.integrateAt( arange(t0,t1,dt), x0 )
def bisect( self, t0,t1,x0,x1,w0,w1, dtype=floating ):
"""Bisect the trajectory segment between times t0 and t1
INPUTS:
t0, t1 -- float -- times, t1>t0
x0, x1 -- D -- trajectory points at t0, t1
w0, w1 -- sdim -- random walk values at t0, t1
OUTPUT:
t -- 3 -- t0,(t0+t1)/2,t1
x -- 3 x D -- trajectory with mid-point
w -- 3 x sdim -- random walk with mid-point
"""
assert t1>t0
x0 = asarray(x0,dtype=dtype).squeeze()
x1 = asarray(x1,dtype=dtype).squeeze()
assert x0.shape==x1.shape
w0 = asarray(w0,dtype=dtype).flatten()
w1 = asarray(w1,dtype=dtype).flatten()
assert w0.size==self.sdim
assert w1.size==self.sdim
# Define Brownian bridge function for the interval
dW = w1-w0
dT = t1-t0
def bridge(_t0,_t1):
assert _t0==t0 and _t1==(t1+t0)/2.0
return (w1-w0)/2.0 + randn(*w0.shape) * sqrt((t1-t0)/2.0)
return self.integrateAt([t0,(t0+t1)/2.0,t1],x0)
def refine( self, secfun, t0,t1,x0,x1,w0,w1, args=(), retAll = False, fTol = 1e-4, xTol=1e-8, tTol=1e-6, wTol = 1e-6, dtype=floating ):
"""Refine trajectory to a positive zero crossing of secfun
INPUTS:
t0,t1,x0,x1,w0,w1 -- as in self.bisect()
secfun -- callable -- secfun(t,x,w,*args) is a function that crosses from
negative to positive on the trajectory in the time interval t0..t1.
args -- tuple -- extra arguments for secfun
fTol -- tolerance for secfun values
xTol -- tolerance for trajectory coordinates (sup norm)
tTol -- tolerance for time values
wTol -- tolerance for random walk step (sub norm)
retAll -- return all points sampled
OUTPUT:
(retAll false) t,x,w,y
t -- float -- crossing time
x -- D -- crossing point
w -- sdim -- random walk term
y -- float -- secfun value at crossing
(retAll true) unsorted list of points with format t,x,w,y
Uses repeated bisection to find a crossing point for a section function.
"""
y0 = secfun( t0, x0, w0, *args )
y1 = secfun( t1, x1, w1, *args )
# Check if we just got lucky...
if abs(y0)<fTol:
if abs(y1)<fTol:
return ((t1+t0)/2, (x1+x0)/2, (w1+w0)/2, (y1+y0)/2)
return (t0,x0,w0,y0)
elif abs(y1)<fTol:
return (t1,x1,w1,y1)
if y0>0 or y1<=0 and abs(y1-y0)>fTol:
raise ValueError("Section function values did not cross 0; were %g and %g" % (y0,y1))
traj = [(t0,x0,w0,y0),(t1,x1,w1,y1)]
while (abs(y1-y0)>fTol
or all(abs(x1-x0)>xTol)
or all(abs(w1-w0)>wTol)) and (abs(t1-t0)>tTol):
t,x,w = self.bisect( t0,t1,x0,x1,w0,w1 )
y = secfun( t[1], x[1,:], w[1,:], *args )
traj.append( (t[1], x[1,:], w[1,:], y) )
#!!!print t0,y0,'--',t1,y1
if y>0:
t1,x1,w1,y1 = traj[-1]
else:
t0,x0,w0,y0 = traj[-1]
if abs(t1-t0)<tTol:
traj.append( ((t1+t0)/2, (x1+x0)/2, (w1+w0)/2, (y1+y0)/2) )
if retAll:
return traj
return traj[-1]
def integrateAt( self, t, x0, X=None, W=None, dW_fun=None, dtype=floating ):
""" Integrate the SDE
INPUTS:
t -- N -- monotone increasing array with times at which to compute
the integral
x0 -- D -- initial condition
X -- N x D (or None) -- storage for trajectory
W -- N x self.sdim (or None) -- storage for random walk term
dW_fun -- callable -- dW(t0,t1) gives the random walk step for times
t0 to t1. For a Wiener process, this is randn()*sqrt(t1-t0).
OUTPUTS: t,X,W
if either X or W were None, a newly allocated array is returned with
the data.
"""
x0 = asarray(x0).flatten()
dim = len(x0)
stp = self.bd.shape[0]-1
assert dim == len(self.s(x0,zeros(self.sdim))), "Stochastic flow output must have same dimension as system"
if X is None:
X = empty( (len(t),dim), dtype=dtype )
else:
assert X.shape == (len(t),dim), "Has space for result"
if W is None:
W = empty((len(t),self.sdim), dtype=dtype)
else:
assert W.shape == (len(t),self.sdim), "Has space for noise"
if dW_fun is None:
dW_fun = self.dW
# Storage for step computations
Xd = zeros( (stp+1,dim), dtype=dtype )
Xs = zeros( (stp+1,dim), dtype=dtype )
Yd = zeros( (stp,dim), dtype=dtype )
Ys = zeros( (stp,dim), dtype=dtype )
# Loop over all times
t0 = t[0]
X[0,:] = x0
W[0,:] = 0
for ti in xrange(1,len(t)):
# Step forward in time
t1 = t[ti]
dt = t1-t0
dW = dW_fun( t0, t1 )
if any(isnan(dW)):
print "%s(%g,%g) has NaN-s" % (repr(dW_fun),t0,t1)
BRK()
# Init arrays for this step
Xd[0,:] = 0
Xs[0,:] = 0
Yd[0,:] = self.d( x0 )
Ys[0,:] = self.s( x0, dW )
for k in xrange(1,stp):
# Sums for Butcher row k
Xd[k,:] = dot( self.bd[k,1:k+1], Yd[:k,:] )
Xs[k,:] = dot( self.bs[k,1:k+1], Ys[:k,:] )
# Evaluate at integration point
xk = x0 + dt * Xd[k,:] + Xs[k,:]
Yd[k,:] = self.d( xk )
Ys[k,:] = self.s( xk, dW )
# Compute next time step
x1 = ( x0
+ dt * dot(self.bd[-1,1:],Yd)
+ dot(self.bs[-1,1:],Ys)
)
# Store results
X[ti,:] = x1
W[ti,:] = W[ti-1] + dW
if any(isnan(x1)):
print "integration step returned NaN-s"
BRK()
x0 = x1
t0 = t1
# Return results
return t,X,W
if 1: # include unit tests
import sys
def test_diffusion_order():
"""
Test the convergence order of the continuous integrator using
the "circle" system
"""
print "test_diffusion_order"
sde = SDE(
lambda x: dot([[0,-1],[1,0]],x),
lambda x,dw: zeros_like(x),
2
)
ee = []
for dt in 10**-linspace(0.5,4.5,11):
print "dt=",dt
t,y,w = sde( [0,1],0,0.2*pi, dt )
err = sqrt((y[-1,0]+sin(t[-1]))**2+(y[-1,1]-cos(t[-1]))**2)
ee.append([dt,err])
ee = asarray(ee)
loglog(ee[:,0],ee[:,1],'o-')
lee = log(ee)
lee = lee - mean(lee,axis=0)[newaxis,:]
lee = diff(lee,axis=0)
title("Order %.2f" % mean(lee[:,1] / lee[:,0]))
def test_vs_sol( s, d, sol, beta, x0, N=5, rng = (0,10,0.001)):
"""
Utility function for testing eqn. dX = s(x) dt + d(x,dW)
INPUTS:
s,d -- SDE specification
beta -- parameter values to test; SDE-s are integrated in parallel, with
on column for each parameter value (thus sdim = len(beta))
sol -- function t,w --> x giving the exact solution for the integral
N -- number of repetitions to run
rng -- time range and sample points
OUTPUTS:
Y -- N x len(arange(*rng)) x len(beta) -- integrated outputs
X -- Y.shape -- exact solutions given by sol
Also plots the result in the current figure as subplots. Each row gives
a value of beta. The left column has the simulations; the right has the
error w.r.t. the exact solution
"""
def nanmean(x,axis=-1):
return nansum(x,axis=axis)/nansum(~isnan(x),axis=axis)
def nanstd(x,axis=-1):
return sqrt(nanmean(x**2,axis)-nanmean(x,axis)**2)
beta = asarray(beta)
sde = SDE(s,d,len(beta) )
t = arange(*rng)
w = None
Y = []
X = []
print "%d iterations:" % N,
sys.stdout.flush()
for k in xrange(N):
t,y,w = sde.integrateAt( t, x0, W = w )
Y.append(y)
X.append( sol(t,w) )
print k,
sys.stdout.flush()
print
Y = asarray(Y)
X = asarray(X)
err = Y-X
m = nanmean(err,0)
s = nanstd(err,0)
idx = linspace(0,len(t)-1,500).astype(int)
tg = t[idx]
for k in xrange(len(beta)):
#
#
subplot(len(beta),2,2*k+1)
cvg = abs(Y[:,idx,k]-Y[:,[-1],k])+1e-18
#BRK()
semilogy(tg,cvg.T,alpha=0.3)
axis([0,10,1e-18,10])
ylabel('beta = %.2e' % beta[k])
if k==0:
title('Simulations')
#
#
subplot(len(beta),2,2*k+2)
semilogy(tg,abs(err[:,idx,k].T),alpha=0.3)
semilogy(tg,abs(m[idx,k]),'k',lw=3)
semilogy(tg,abs(m[idx,k]+s[idx,k]),'--k')
semilogy(tg,abs(m[idx,k]-s[idx,k]),'--k')
axis([0,10,1e-18,10])
if k==0:
title('Errors')
return Y,X
def test_bastani_6_4( beta = (0.1,1)):
"test from Bastani, et.al. 2006 eqn. 6.4, 6.5"
print "dX = -(1-x**2)dt + beta*(1-x**2)dW"
beta = asarray(beta)
def flow(x):
return -(1-x**2)
def drift(x,dw):
return dw*beta*(1-x**2)
def exact(t,w):
q = exp(-2*t[:,newaxis] + 2*beta*w)
return (q-1)/(q+1)
return test_vs_sol( flow, drift, exact, beta, (0,0) )
def test_bastani_6_6( beta = (0.1,1)):
"test from Bastani, et.al. 2006 eqn. 6.6, 6.7"
print "dX = -x dt + beta*x dW"
beta = asarray(beta)
alpha = -1
def flow(x):
return alpha*x
def drift(x,dw):
return dw*beta*x
def exact(t,w):
return exp(alpha*t[:,newaxis]+beta*w)
return test_vs_sol( flow, drift, exact, beta, (1,1) )
def test_bastani_6_8():
"test from Bastani, et.al. 2006 eqn. 6.8, 6.9"
print "dX = sqrt(1+x**2) dt + sqrt(1+x**2) dW"
def flow(x):
return sqrt(1+x**2)
def drift(x,dw):
return dw*sqrt(1+x**2)
def exact(t,w):
return sinh(t+w)
return test_vs_sol( flow, drift, exact, (0,), (0,), rng = (0,2,0.0005))
def test_bastani_6_10( beta = (0.1,1), N=5):
"test from Bastani, et.al. 2006 eqn. 6.10, 6.11"
print "dX = -beta*sqrt(1-x.clip(-1,1)**2) dW"
beta = asarray(beta)
def flow(x):
return zeros_like(x)
def drift(x,dw):
return -beta*sqrt(1-x.clip(-1,1)**2)*dw
def exact(t,w):
return cos(beta*w+pi/2)
return test_vs_sol( flow, drift, exact, beta, zeros_like(beta), N=N )
if 0:#__name__=="__main__":
figure(1)
test_diffusion_order()
figure(4)
test_bastani_6_4()
figure(6)
test_bastani_6_6()
figure(8)
test_bastani_6_8()
figure(10)
test_bastani_6_10()
show()
| gpl-2.0 | -5,816,760,792,579,759,000 | 31.990123 | 137 | 0.55909 | false |
TalAmuyal/YAGBC2A | tools/CompileAll.py | 1 | 1996 | """
@ Set-up instructions
In order to run this tool, the following steps are required:
1. Set the working directory to the project's root.
2. Build the project into a "bin" directory under the project's root.
3. JRE must be installed and available globally through OS shell.
The above may be altered by reconfiguring the proper variables or by
providing arguments.
@ Arguments
* -s=<path> --source=<path> The path of the assembly files directory
* -d=<path> --destination=<path> The path for the assembled files directory
* -p=<path> --project=<path> The path for the compiler directory
Note(s):
1. A directory's path should end with a trailing forword-slash (`/`).
2. In paths, spaces are not supported
"""
"""
Imports
"""
import os
import sys
"""
Functions
"""
def main(argv):
"""
Set configurations
"""
JAVA_INTERPRETER = "java"
COMPILED_PROJECT_DIR = "bin"
COMPILED_PROJECT = "open_source.amuyal_tal.yagbc2a.Main"
INPUT_DIR = "tests"
OUTPUT_DIR = "bin"
"""
Parse arguments-vector
"""
for arg in argv:
splitArg = arg.split("=")
if len(splitArg) != 2:
sys.exit("Unregocnized command ", arg)
key = splitArg[0]
value = splitArg[1]
if key == "-p" or key == "--project":
COMPILED_PROJECT_DIR = value
elif key == "-s" or key == "--source":
INPUT_DIR = value
elif key == "-d" or key == "--destination":
OUTPUT_DIR = value
else:
sys.exit("Unregocnized command ", arg)
"""
Normalize configurations
"""
if not INPUT_DIR.endswith("/"):
INPUT_DIR = INPUT_DIR + "/"
if not OUTPUT_DIR.endswith("/"):
OUTPUT_DIR = OUTPUT_DIR + "/"
"""
Compile
"""
for file in os.listdir(INPUT_DIR):
if file.endswith(".asm"):
commandArgs = [JAVA_INTERPRETER, "-cp", COMPILED_PROJECT_DIR, COMPILED_PROJECT, INPUT_DIR + file, "-o", OUTPUT_DIR + file[:-4] + ".gb"]
compileCommand = " ".join(commandArgs)
os.popen(compileCommand).read() #Executa command and hide output
"""
Main
"""
if __name__ == "__main__":
main(sys.argv[1:])
| gpl-3.0 | -1,318,118,175,961,597,700 | 20.462366 | 138 | 0.655812 | false |
ulikoehler/Translatron | Translatron/Misc/UniprotMetadatabase.py | 1 | 3699 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
A fetcher for the UniProt metadatabase.
Iterates over all database IDs ranging from 1-300
and stores basic information about any valid database id
in a python dictionary.
"""
import requests
from ansicolor import red, green
from bs4 import BeautifulSoup
import json
__author__ = "Uli Koehler"
__copyright__ = "Copyright 2015 Uli Koehler"
__license__ = "CC0"
__version__ = "1.0 Universal"
__status__ = "Beta"
def iterateUniprotDatabases(quiet=True):
"""
Fetch the uniprot metadatabase by guessing valid integral database IDs.
Guarantees to yield all databases up to 9999
"""
template = "http://www.uniprot.org/database/%d.rdf"
for i in range(300): #In the far future, there might be more DBs than 300.
r = requests.get(template % i)
if r.status_code == requests.codes.ok:
if not quiet:
print (green("[UniProt MetaDB] Fetching DB #%d" % i))
soup = BeautifulSoup(r.text)
#Very, very crude RDF/XML parser
rdf = soup.html.body.find("rdf:rdf")
db = {
"id": rdf.abbreviation.text,
"name": rdf.abbreviation.text,
"category": rdf.category.text,
"description": rdf.find("rdfs:label").text,
}
url = rdf.find("rdfs:seealso")["rdf:resource"]
if url: db["url"] = url
urltemplate = rdf.urltemplate.text
if urltemplate: db["urltemplate"] = urltemplate
yield(db)
else:
if not quiet:
print(red("[UniProt MetaDB] Database #%d does not exist" % i))
def downloadUniprotMetadatabase(filename, quiet=False):
"""
Fetch the UniProt metadatabase and store the resulting database map in a JSON file.
Returns the object that has been written to the file as JSON
"""
#Run parser
databaseList = list(iterateUniprotDatabases(quiet=False))
#Remap to dictionary with key == database ID
databases = {db["id"]: db for db in databaseList}
#Write to outfile
with open(filename, "w") as outfile:
json.dump(databases, outfile)
return databases
def initializeMetaDatabase(filename="metadb.json"):
"""
Ensure we valid file with meta-database information,
i.e. links, names and URL templates for any database being referenced.
This information is used to generate links to external databases, e.g. STRING.
This function fetches the Metadatabase from UniProt if required.
The metadatabase dictionary is returned.
Also reads and adds (or replaces) additional entries from metadb-additional.json
"""
#
with open("metadb-additional.json") as infile:
additional = json.load(infile)
try:
with open(filename) as infile:
db = json.load(infile)
db.update(additional)
return db
except:
# Try to download from UniProt
try:
db = downloadUniprotMetadatabase(filename)
db.update(additional)
return db
except Exception as ex:
print(ex)
print(red("Can neither read nor fetch metadabase. Database links will not work.", bold=True))
if __name__ == "__main__":
#Usage example: Fetch all databases and store them in a single JSON file
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("outfile", help="The JSON file to write the result to")
parser.add_argument("-q", "--quiet", action="store_true", help="Do not print status informations")
args = parser.parse_args()
downloadUniprotMetadatabase(args.outfile, quiet=args.quiet)
| apache-2.0 | -285,140,666,107,954,020 | 35.264706 | 105 | 0.63801 | false |
lteasdale/ltbio | scripts/seq_chopper.py | 1 | 1942 | #!/usr/bin/env python
# Seq_chopper!
from __future__ import print_function
from os import path
# The fasta parser.
import screed
import docopt
import sys
__author__ = "Luisa Teasdale"
CLI_ARGS = """
USAGE:
seq_chopper.py -f SEQFILE -p PARTITIONS
OPTIONS:
-f SEQFILE The multi-species alignment file in fasta format.
-p PARTITIONS Partition file which contains the sections of the alignment you
want chopped out and the name of the fasta file the subset will
be printed to.
This script takes a supermatrix alignment and a text file specifying
partitions in RAxML format and will output a seperate fasta file for each
partition. At the moment the script only works with one data block per
partition.
"""
# Function to process the partitions file
def sum_partitions(line):
name = line.strip().split(' ')[0]
start_pos = line.strip().split(' ')[2]
end_pos = line.strip().split(' ')[4]
return name, start_pos, end_pos
# Function to do the chopping
def chop_seqs(fastafile, name, start_pos, end_pos):
filename = name + '.fasta'
with screed.open(fastafile) as fh:
with open(filename, 'w') as newfh:
for seq in fh:
seq_name = seq.name
seq = seq.sequence
start = int(start_pos) - 1
end = int(end_pos)
subset = seq[start:end]
print(">{}\n{}".format(seq_name, subset), file=newfh)
# If I am being run as a script...
if __name__ == '__main__':
opts = docopt.docopt(CLI_ARGS)
partitionfile = opts['-p']
fastafile = opts['-f']
partitions = open(partitionfile)
print(fastafile, file=sys.stderr)
for line in partitions:
name, start_pos, end_pos = sum_partitions(line)
chop_seqs(fastafile, name, start_pos, end_pos)
print('chopped partition {}'.format(name), file=sys.stderr)
print('Finished!', file=sys.stderr)
| mpl-2.0 | 2,092,505,129,969,343,500 | 27.144928 | 79 | 0.635942 | false |
YusufCelik/YChip8 | emulator/instructions.py | 1 | 12062 | from random import randint
from emulator import state
from hardware import gpu, keyboard
def executeOpcode(opcode):
"""
Function mapped to a specific opcode or opcode family will
be executed.
:param opcode:
"""
if (opcode & 0xf0ff) == 0x00e0 or (opcode & 0xf0ff) == 0x00ee:
instruction = opcode & 0xf0ff
elif (opcode & 0xf000) == 0xf000:
if (opcode & 0xf00f) == 0xf007 or (opcode & 0xf00f) == 0xf00a:
instruction = opcode & 0xf00f
else:
instruction = opcode & 0xf0ff
elif (opcode & 0xf000) == 0x8000:
instruction = opcode & 0xf00f
elif (opcode & 0xf0ff) == 0xe0a1 or (opcode & 0xf0ff) == 0xe09e:
instruction = opcode & 0xf0ff
else:
instruction = opcode & 0xF000
funcs[instruction](opcode)
def Zero(opcode):
"""
Regular 0 opcodes were machine code, and
will therefore be ignored
"""
state.program_counter += 2
def x00E0(opcode):
"""Clear the screen"""
gpu.reset()
state.program_counter += 2
def x00EE(opcode):
"""
Return from a subroutine.
The interpreter sets the program counter to the address
at the top of the state.stack,
then subtracts 1 from the state.stack pointer.
"""
state.program_counter = state.stack[-1]
state.stack.pop()
state.program_counter += 2
def x1NNN(opcode):
"""Jump to address NNN"""
state.program_counter = (opcode & 0x0FFF)
def x2NNN(opcode):
"""Execute subroutine starting at address NNN"""
state.stack.append(state.program_counter)
state.program_counter = (opcode & 0x0FFF)
def x3XKK(opcode):
"""Skip the following instruction if the value of register VX equals NN"""
vx_index = (opcode & 0x0F00) >> 8
cmp_value = (opcode & 0x00ff)
if state.vx[vx_index] == cmp_value:
state.program_counter += 4
else:
state.program_counter += 2
def x4XKK(opcode):
"""
Skip the following instruction if the value of register VX
is not equal to NN
"""
vx_index = (opcode & 0x0F00) >> 8
cmp_value = (opcode & 0x00ff)
if state.vx[vx_index] != cmp_value:
state.program_counter += 4
else:
state.program_counter += 2
def x5XY0(opcode):
"""
Skip the following instruction if the value of register VX
is equal to the value of register VY
"""
vx_index = (opcode & 0x0f00) >> 8
vy_index = (opcode & 0x00f0) >> 4
if state.vx[vx_index] == state.vx[vy_index]:
state.program_counter += 4
else:
state.program_counter += 2
def x6XKK(opcode):
"""Store number NN in register VX"""
vx_index = (opcode & 0x0F00) >> 8
vx_value = (opcode & 0x00FF)
state.vx[vx_index] = vx_value
state.program_counter += 2
def x7XKK(opcode):
"""Add the value NN to register VX"""
vx_index = (opcode & 0x0F00) >> 8
vx_new_value = (opcode & 0x00FF)
vx_result = state.vx[vx_index] + vx_new_value
vx_result &= 255
state.vx[vx_index] = vx_result
state.program_counter += 2
def x8XY0(opcode):
"""Store the value of register VY in register VX"""
vx_index = (opcode & 0x0f00) >> 8
vy_index = (opcode & 0x00f0) >> 4
state.vx[vx_index] = state.vx[vy_index]
state.program_counter += 2
def x8XY1(opcode):
"""Set VX to VX OR VY"""
vx_index = (opcode & 0x0f00) >> 8
vy_index = (opcode & 0x00f0) >> 4
state.vx[vx_index] |= state.vx[vy_index]
state.program_counter += 2
def x8XY2(opcode):
"""Set VX to VX AND VY"""
vx_index = (opcode & 0x0f00) >> 8
vy_index = (opcode & 0x00f0) >> 4
state.vx[vx_index] &= state.vx[vy_index]
state.program_counter += 2
def x8XY3(opcode):
"""Set VX to VX XOR VY"""
vx_index = (opcode & 0x0f00) >> 8
vy_index = (opcode & 0x00f0) >> 4
state.vx[vx_index] ^= state.vx[vy_index]
state.program_counter += 2
def x8XY4(opcode):
"""
Add the value of register VY to register VX
Set VF to 01 if a carry occurs
Set VF to 00 if a carry does not occur
"""
vx_index = (opcode & 0x0f00) >> 8
vy_index = (opcode & 0x00f0) >> 4
state.vx[vx_index] += state.vx[vy_index]
if state.vx[vx_index] > 255:
state.vx[0xf] = 1
state.vx[vx_index] &= 255
else:
state.vx[0xf] = 0
state.program_counter += 2
def x8XY5(opcode):
"""
Subtract the value of register VY from register VX
Set VF to 00 if a borrow occurs
Set VF to 01 if a borrow does not occur
"""
vx_index = (opcode & 0x0f00) >> 8
vy_index = (opcode & 0x00f0) >> 4
state.vx[vx_index] -= state.vx[vy_index]
if state.vx[vx_index] < 0:
state.vx[0xf] = 0
state.vx[vx_index] &= 255
else:
state.vx[0xf] = 1
state.program_counter += 2
def x8XY6(opcode):
"""
Store the value of register VY shifted right one bit in register VX
Set register VF to the least significant bit prior to the shift
"""
vx_index = (opcode & 0x0f00) >> 8
vy_index = (opcode & 0x00f0) >> 4
binary_string = bin(state.vx[vy_index])
if binary_string[len(binary_string) - 1] == '1':
state.vx[0xf] = 1
elif binary_string[len(binary_string) - 1] == '0':
state.vx[0xf] = 0
state.vx[vy_index] = state.vx[vy_index] >> 1
state.vx[vx_index] = state.vx[vy_index]
state.program_counter += 2
def x8XY7(opcode):
"""
Set register VX to the value of VY minus VX
Set VF to 00 if a borrow occurs
Set VF to 01 if a borrow does not occur
"""
vx_index = (opcode & 0x0f00) >> 8
vy_index = (opcode & 0x00f0) >> 4
state.vx[vx_index] = state.vx[vy_index] - state.vx[vx_index]
if state.vx[vx_index] < 0:
state.vx[0xf] = 1
state.vx[vx_index] = 0
else:
state.vx[0xf] = 0
state.program_counter += 2
def x8XYE(opcode):
"""
Store the value of register VY shifted left one bit in register VX
Set register VrF to the most significant bit prior to the shift
"""
vx_index = (opcode & 0x0f00) >> 8
vy_index = (opcode & 0x00f0) >> 4
binary_string = bin(state.vx[vy_index])
if binary_string[len(binary_string) - 1] == '1':
state.vx[0xf] = 1
elif binary_string[len(binary_string) - 1] == '0':
state.vx[0xf] = 0
state.vx[vy_index] = state.vx[vy_index] << 1
state.vx[vx_index] = state.vx[vy_index]
state.program_counter += 2
def x9XY0(opcode):
"""
Skip the following instruction if the value of register VX
is not equal to the value of register VY
"""
vx_index = (opcode & 0x0F00) >> 8
vy_index = (opcode & 0x00f0) >> 4
if state.vx[vx_index] != state.vx[vy_index]:
state.program_counter += 4
else:
state.program_counter += 2
def ANNN(opcode):
"""Store memory address NNN in register I"""
i_value = (opcode & 0x0FFF)
state.register_index = i_value
state.program_counter += 2
def BNNN(opcode):
"""Jump to address NNN + V0"""
state.program_counter = (opcode & 0x0FFF) + state.vx[0]
def CXKK(opcode):
"""Set VX to a random number with a mask of NN"""
vx_index = (opcode & 0x0F00) >> 8
kk_value = (opcode & 0x00FF)
rnd_value = randint(0, 255)
toset_value = rnd_value & kk_value
state.vx[vx_index] = toset_value
state.program_counter += 2
def DXYN(opcode):
"""
DRWDraw a sprite at position VX, VY with N bytes
of sprite data starting at the address stored in I
Set VF to 01 if any set pixels are changed to unset, and 00 otherwise
"""
drawing_height = (opcode & 0x000F)
start_x_cord = state.vx[(opcode & 0x0F00) >> 8]
start_y_cord = state.vx[(opcode & 0x00f0) >> 4]
endoffset = state.register_index + drawing_height
draw_data = state.memory[state.register_index:endoffset]
state.vx[0xf] = 0
gpu.sprite_to_buffer(start_x_cord, start_y_cord, draw_data, drawing_height)
gpu.create_graphics_batch()
state.program_counter += 2
def EX9E(opcode):
"""
Skip the following instruction if the key corresponding
to the hex value currently stored in register VX is pressed
"""
vx_index = (opcode & 0x0f00) >> 8
if state.key_pressed_index == state.vx[vx_index]:
state.program_counter += 4
keyboard.reset()
else:
state.program_counter += 2
def EXA1(opcode):
"""
Skip the following instruction if the key corresponding to the hex
value currently stored in register VX is not pressed
"""
vx_index = (opcode & 0x0f00) >> 8
if state.key_pressed_index != state.vx[vx_index]:
state.program_counter += 4
else:
keyboard.reset()
state.program_counter += 2
def FX07(opcode):
"""Store the current value of the delay timer in register VX"""
vx_index = (opcode & 0x0F00) >> 8
state.vx[vx_index] = state.delay_timer
state.program_counter += 2
def FX0A(opcode):
"""Wait for a keypress and store the result in register VX"""
vx_index = (opcode & 0x0f00) >> 8
if state.key_pressed_index is not None:
state.vx[vx_index] = state.key_pressed_index
keyboard.reset()
state.program_counter += 2
def FX15(opcode):
""" Set the delay timer to the value of register VX"""
vx_index = (opcode & 0x0F00) >> 8
state.delay_timer = state.vx[vx_index]
state.program_counter += 2
def FX18(opcode):
"""Set the sound timer to the value of register VX"""
vx_index = (opcode & 0x0f00) >> 8
state.sound_timer = state.vx[vx_index]
state.program_counter += 2
def FX1E(opcode):
"""Add the value stored in register VX to register I"""
vx_index = (opcode & 0x0F00) >> 8
state.register_index += state.vx[vx_index]
state.program_counter += 2
def FX29(opcode):
"""Set I to the memory address of the sprite data corresponding
to the hexadecimal digit stored in register VX"""
vx_index = (opcode & 0x0F00) >> 8
state.register_index = state.vx[vx_index] * 0x5
state.program_counter += 2
def FX33(opcode):
"""
Store the binary-coded decimal equivalent
of the value stored in register VX at addresses I, I+1, and I+2
"""
vx_index = (opcode & 0x0F00) >> 8
bcd_value = '{:03d}'.format(state.vx[vx_index])
state.memory[state.register_index] = int(bcd_value[0])
state.memory[state.register_index + 1] = int(bcd_value[1])
state.memory[state.register_index + 2] = int(bcd_value[2])
state.program_counter += 2
def FX55(opcode):
"""Store the values of registers V0 to VX inclusive
in memory starting at address I.
I is set to I + X + 1 after operation
"""
last_index = (opcode & 0x0F00) >> 8
if last_index > 0:
for index in range(0, last_index + 1):
state.memory[state.register_index + index] = state.vx[index]
else:
state.memory[state.register_index] = state.vx[last_index]
state.program_counter += 2
def FX65(opcode):
"""Fill registers V0 to VX inclusive with the values stored in
memory starting at address I.
I is set to I + X + 1 after operation
"""
last_index = (opcode & 0x0F00) >> 8
if last_index > 0:
for index in range(0, last_index + 1):
state.vx[index] = state.memory[state.register_index + index]
else:
state.vx[last_index] = state.memory[state.register_index]
state.program_counter += 2
funcs = {
0x0000: Zero,
0x00ee: x00EE,
0x00e0: x00E0,
0x1000: x1NNN,
0x2000: x2NNN,
0x3000: x3XKK,
0x4000: x4XKK,
0x5000: x5XY0,
0x6000: x6XKK,
0x7000: x7XKK,
0x8000: x8XY0,
0x8001: x8XY1,
0x8002: x8XY2,
0x8003: x8XY3,
0x8004: x8XY4,
0x8005: x8XY5,
0x8006: x8XY6,
0x8007: x8XY7,
0x800e: x8XYE,
0x9000: x9XY0,
0xa000: ANNN,
0xb000: BNNN,
0xc000: CXKK,
0xd000: DXYN,
0xe0a1: EXA1,
0xe09e: EX9E,
0xf007: FX07,
0xf00a: FX0A,
0xf015: FX15,
0xf018: FX18,
0xf01e: FX1E,
0xf029: FX29,
0xf033: FX33,
0xf055: FX55,
0xf065: FX65
}
| gpl-3.0 | -2,646,583,697,571,645,400 | 24.501057 | 79 | 0.610181 | false |
larmer01/activetickpy | convert.py | 1 | 10298 | #!/usr/bin/env python3
"""ActiveTick data conversions and code mappings."""
import calendar
import time
import unittest
import pytz
import activetickpy.config as config
def _datetime_to_ms(dtime: calendar.datetime.datetime) -> int:
"""Convert the specified datetime object to milliseconds since the
Epoch.
Args:
dtime: datetime object
Returns:
Time in milliseconds since the Epoch.
"""
milliseconds = calendar.timegm(dtime.utctimetuple()) * 1000
milliseconds = int(milliseconds + dtime.microsecond / 1000.0)
return milliseconds
def _ms_to_datetime(milliseconds: int,
timezone: str) -> calendar.datetime.datetime:
"""Convert the specified time in milliseconds to a datetime object in
the given timezone.
Args:
milliseconds: time in milliseconds since the Epoch
timezone : timezone string
Returns:
datetime object in the specified timezone.
"""
seconds = int(milliseconds / 1000)
microseconds = (milliseconds % 1000) * 1000
gmtime = time.gmtime(seconds)
dtime = pytz.datetime.datetime(gmtime.tm_year, gmtime.tm_mon,
gmtime.tm_mday, gmtime.tm_hour,
gmtime.tm_min, gmtime.tm_sec, microseconds,
pytz.timezone('UTC'))
return dtime.astimezone(pytz.timezone(timezone))
def _ms_to_str(milliseconds: int, timezone: str, formatting: str) -> str:
"""Convert the specified time in milliseconds to a string.
Please see the following for formatting directives:
http://docs.python.org/library/datetime.html#strftime-strptime-behavior
Args:
milliseconds: time in milliseconds since the Epoch
timezone : timezone string
formatting : formatting string
Returns:
Time string.
"""
dtime = _ms_to_datetime(milliseconds, timezone)
return dtime.strftime(formatting)
def at_time_to_ms(date: str, timezone: str=config.TIMEZONE,
include_ms: bool=True) -> int:
"""Convert the specified ActiveTick time string in either
config.AT_TIME_STR_MS or config.AT_TIME_STR form to milliseconds since the
Epoch.
Args:
date : date string
timezone : timezone string (default: config.TIMEZONE)
include_ms: True if milliseconds are included in the date string;
False, otherwise (default: True)
Returns:
Time in milliseconds since the Epoch.
"""
if include_ms:
dtime = pytz.datetime.datetime.strptime(date, config.AT_TIME_STR_MS)
else:
dtime = pytz.datetime.datetime.strptime(date, config.AT_TIME_STR)
# Note that in order for daylight savings conversions to work, we
# MUST use localize() here and cannot simply pass a timezone into the
# datetime constructor.
tzone = pytz.timezone(timezone)
dtime = pytz.datetime.datetime(dtime.year, dtime.month, dtime.day,
dtime.hour, dtime.minute, dtime.second,
dtime.microsecond)
dtime = tzone.localize(dtime)
dtime = dtime.astimezone(pytz.timezone('UTC'))
return _datetime_to_ms(dtime)
def get_exchange(code: str) -> str:
"""Gets the name of the exchange represented by the specified exchange
code.
Args:
code: exchange code string
Returns:
Exchange name.
Raises:
ValueError: if `code` is not a valid exchange code.
"""
if code in config.EXCHANGE_CODE_MAP:
return config.EXCHANGE_CODE_MAP[code]
else:
raise ValueError('{0} is not a valid exchange code.'.format(code))
def get_status(code: int) -> str:
"""Gets the status represented by the specified status code.
Args:
code: status code
Returns:
Status.
Raises:
ValueError: if `code` is not a valid status code.
"""
if code in config.STATUS_CODE_MAP:
return config.STATUS_CODE_MAP[code]
else:
raise ValueError('{0} is not a valid status code.'.format(code))
def get_trade_condition(code: int) -> str:
"""Gets the trade condition represented by the specified trade condition
code.
Args:
code: trade condition code
Returns:
Trade condition.
Raises:
ValueError: if `code` is not a valid trade condition code.
"""
if code in config.TRADE_CONDITION_CODE_MAP:
return config.TRADE_CONDITION_CODE_MAP[code]
else:
raise ValueError('{0} is not a valid trade condition code.'.format(
code))
def get_typed_data(code: int, data: str):
"""Converts the specified data string to the data type represented by
`code`.
Code ActiveTick Type Python Type
----------------------------------
1 Byte bytes
2 ByteArrary bytearray
3 UInteger32 int
4 UInteger64 int
5 Integer32 int
6 Integer64 int
7 Price float
8 String str
9 UnicodeString str
10 DateTime int
11 Double float
Args:
code: target data type code
data: data to convert
Returns:
Data as the specified data type.
Raises:
ValueError: if `code` is not a valid data type code.
"""
if code == 1:
return bytes([int(data)]).decode()
elif code == 2:
return bytearray(data.encode())
elif code >= 3 and code <= 6:
return int(data)
elif code == 7 or code == 11:
return float(data)
elif code == 8 or code == 9:
return str(data)
elif code == 10:
return at_time_to_ms(data)
else:
raise ValueError('{0} is not a valid data type code.'.format(code))
def ms_to_at_time(milliseconds: int, timezone: str=config.TIMEZONE) -> int:
"""Return the time as an integer of the form config.AT_TIME_STR for the
specified time in milliseconds. This is the date/time format used by
ActiveTick.com.
Args:
milliseconds: time in milliseconds
timezone : timezone (default: config.TIMEZONE)
Returns:
ActiveTick time in yyyymmddhhmmss form.
"""
datestr = _ms_to_str(milliseconds, timezone, config.AT_TIME_STR)
return int(datestr)
class ConvertTest(unittest.TestCase):
"""Test cases for this module."""
def setUp(self):
"""Initialization for the test cases."""
pass
def test_get_exchange(self):
"""Tests the get_exchange method."""
self.assertEqual('AMEX', get_exchange('A'))
with self.assertRaises(ValueError):
get_exchange('BadCode')
with self.assertRaises(ValueError):
get_exchange(None)
def test_get_status(self):
"""Tests the get_status method."""
self.assertEqual('', get_status(1))
self.assertEqual('Invalid', get_status(2))
with self.assertRaises(ValueError):
get_status(5)
with self.assertRaises(ValueError):
get_status('Bad Status')
with self.assertRaises(ValueError):
get_status(None)
def test_get_trade_condition(self):
"""Tests the get_trade_condition method."""
self.assertEqual('Regular', get_trade_condition(0))
with self.assertRaises(ValueError):
get_trade_condition(50)
with self.assertRaises(ValueError):
get_trade_condition('Bad Condition')
with self.assertRaises(ValueError):
get_trade_condition(None)
def test_get_typed_data(self):
"""Tests the get_typed_data method."""
# type 1
self.assertEqual('P', get_typed_data(1, '80'))
# type 2
self.assertEqual(bytearray(b'80'), get_typed_data(2, '80'))
# type 3
self.assertEqual(202, get_typed_data(3, '202'))
# type 4
self.assertEqual(5032474, get_typed_data(4, '5032474'))
# type 5
self.assertEqual(5032474, get_typed_data(5, '5032474'))
# type 6
self.assertEqual(5032474, get_typed_data(6, '5032474'))
# type 7
self.assertEqual(163.79, get_typed_data(7, '163.790000'))
# type 8
self.assertEqual('5032474', get_typed_data(8, '5032474'))
# type 9
self.assertEqual('5032474', get_typed_data(9, '5032474'))
# type 10
self.assertEqual(1515804900598,
get_typed_data(10, '20180112195500598'))
# type 11
self.assertEqual(163.79, get_typed_data(11, '163.790000'))
# Invalid codes
with self.assertRaises(ValueError):
get_typed_data(0, 'data')
with self.assertRaises(ValueError):
get_typed_data(12, 'data')
# Bad data
with self.assertRaises(ValueError):
get_typed_data(1, 'X')
def test_ms_to_at_time(self):
"""Tests the ms_to_at_time method."""
self.assertEqual(20171106154710,
ms_to_at_time(1510001230100, 'US/Eastern'))
self.assertEqual(20171106154710,
ms_to_at_time(1510001230100))
self.assertEqual(20171106144710,
ms_to_at_time(1510001230100, 'US/Central'))
def test_at_time_to_ms(self):
"""Tests the at_time_to_ms method."""
# With milliseconds
self.assertEqual(1510001230100,
at_time_to_ms('20171106154710100', 'US/Eastern'))
self.assertEqual(1510001230100,
at_time_to_ms('20171106154710100'))
self.assertEqual(1510001230100,
at_time_to_ms('20171106144710100', 'US/Central'))
# Without milliseconds
self.assertEqual(1510001230000,
at_time_to_ms('20171106154710', 'US/Eastern', False))
self.assertEqual(1510001230000,
at_time_to_ms('20171106154710', include_ms=False))
self.assertEqual(1510001230000,
at_time_to_ms('20171106144710', 'US/Central', False))
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 1,847,621,181,480,076,000 | 31.18125 | 78 | 0.594581 | false |
twz915/django | django/forms/utils.py | 1 | 5880 | import json
import sys
from collections import UserList
from django.conf import settings
from django.core.exceptions import ValidationError # backwards compatibility
from django.utils import six, timezone
from django.utils.encoding import force_text
from django.utils.html import escape, format_html, format_html_join, html_safe
from django.utils.translation import ugettext_lazy as _
def pretty_name(name):
"""Converts 'first_name' to 'First name'"""
if not name:
return ''
return name.replace('_', ' ').capitalize()
def flatatt(attrs):
"""
Convert a dictionary of attributes to a single string.
The returned string will contain a leading space followed by key="value",
XML-style pairs. In the case of a boolean value, the key will appear
without a value. It is assumed that the keys do not need to be
XML-escaped. If the passed dictionary is empty, then return an empty
string.
The result is passed through 'mark_safe' (by way of 'format_html_join').
"""
key_value_attrs = []
boolean_attrs = []
for attr, value in attrs.items():
if isinstance(value, bool):
if value:
boolean_attrs.append((attr,))
elif value is not None:
key_value_attrs.append((attr, value))
return (
format_html_join('', ' {}="{}"', sorted(key_value_attrs)) +
format_html_join('', ' {}', sorted(boolean_attrs))
)
@html_safe
class ErrorDict(dict):
"""
A collection of errors that knows how to display itself in various formats.
The dictionary keys are the field names, and the values are the errors.
"""
def as_data(self):
return {f: e.as_data() for f, e in self.items()}
def as_json(self, escape_html=False):
return json.dumps({f: e.get_json_data(escape_html) for f, e in self.items()})
def as_ul(self):
if not self:
return ''
return format_html(
'<ul class="errorlist">{}</ul>',
format_html_join('', '<li>{}{}</li>', ((k, force_text(v)) for k, v in self.items()))
)
def as_text(self):
output = []
for field, errors in self.items():
output.append('* %s' % field)
output.append('\n'.join(' * %s' % e for e in errors))
return '\n'.join(output)
def __str__(self):
return self.as_ul()
@html_safe
class ErrorList(UserList, list):
"""
A collection of errors that knows how to display itself in various formats.
"""
def __init__(self, initlist=None, error_class=None):
super(ErrorList, self).__init__(initlist)
if error_class is None:
self.error_class = 'errorlist'
else:
self.error_class = 'errorlist {}'.format(error_class)
def as_data(self):
return ValidationError(self.data).error_list
def get_json_data(self, escape_html=False):
errors = []
for error in self.as_data():
message = list(error)[0]
errors.append({
'message': escape(message) if escape_html else message,
'code': error.code or '',
})
return errors
def as_json(self, escape_html=False):
return json.dumps(self.get_json_data(escape_html))
def as_ul(self):
if not self.data:
return ''
return format_html(
'<ul class="{}">{}</ul>',
self.error_class,
format_html_join('', '<li>{}</li>', ((force_text(e),) for e in self))
)
def as_text(self):
return '\n'.join('* %s' % e for e in self)
def __str__(self):
return self.as_ul()
def __repr__(self):
return repr(list(self))
def __contains__(self, item):
return item in list(self)
def __eq__(self, other):
return list(self) == other
def __getitem__(self, i):
error = self.data[i]
if isinstance(error, ValidationError):
return list(error)[0]
return force_text(error)
def __reduce_ex__(self, *args, **kwargs):
# The `list` reduce function returns an iterator as the fourth element
# that is normally used for repopulating. Since we only inherit from
# `list` for `isinstance` backward compatibility (Refs #17413) we
# nullify this iterator as it would otherwise result in duplicate
# entries. (Refs #23594)
info = super(UserList, self).__reduce_ex__(*args, **kwargs)
return info[:3] + (None, None)
# Utilities for time zone support in DateTimeField et al.
def from_current_timezone(value):
"""
When time zone support is enabled, convert naive datetimes
entered in the current time zone to aware datetimes.
"""
if settings.USE_TZ and value is not None and timezone.is_naive(value):
current_timezone = timezone.get_current_timezone()
try:
return timezone.make_aware(value, current_timezone)
except Exception:
message = _(
'%(datetime)s couldn\'t be interpreted '
'in time zone %(current_timezone)s; it '
'may be ambiguous or it may not exist.'
)
params = {'datetime': value, 'current_timezone': current_timezone}
six.reraise(ValidationError, ValidationError(
message,
code='ambiguous_timezone',
params=params,
), sys.exc_info()[2])
return value
def to_current_timezone(value):
"""
When time zone support is enabled, convert aware datetimes
to naive datetimes in the current time zone for display.
"""
if settings.USE_TZ and value is not None and timezone.is_aware(value):
current_timezone = timezone.get_current_timezone()
return timezone.make_naive(value, current_timezone)
return value
| bsd-3-clause | -6,116,146,199,201,036,000 | 31.307692 | 96 | 0.596259 | false |
stuart-knock/tvb-library | tvb/basic/readers.py | 1 | 7790 | # -*- coding: utf-8 -*-
#
#
# TheVirtualBrain-Scientific Package. This package holds all simulators, and
# analysers necessary to run brain-simulations. You can use it stand alone or
# in conjunction with TheVirtualBrain-Framework Package. See content of the
# documentation-folder for more details. See also http://www.thevirtualbrain.org
#
# (c) 2012-2013, Baycrest Centre for Geriatric Care ("Baycrest")
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 2 as published by the Free
# Software Foundation. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details. You should have received a copy of the GNU General
# Public License along with this program; if not, you can download it here
# http://www.gnu.org/licenses/old-licenses/gpl-2.0
#
#
# CITATION:
# When using The Virtual Brain for scientific publications, please cite it as follows:
#
# Paula Sanz Leon, Stuart A. Knock, M. Marmaduke Woodman, Lia Domide,
# Jochen Mersmann, Anthony R. McIntosh, Viktor Jirsa (2013)
# The Virtual Brain: a simulator of primate brain network dynamics.
# Frontiers in Neuroinformatics (7:10. doi: 10.3389/fninf.2013.00010)
#
#
"""
This module contains basic reading mechanism for default DataType fields.
.. moduleauthor:: Lia Domide <[email protected]>
"""
try:
H5PY_SUPPORT = True
import h5py as hdf5
except Exception:
H5PY_SUPPORT = False
import os
import numpy
import zipfile
import uuid
from tempfile import gettempdir
from scipy import io as scipy_io
from tvb.basic.logger.builder import get_logger
class H5Reader():
"""
Read one or many numpy arrays from a H5 file.
"""
def __init__(self, h5_path):
self.logger = get_logger(__name__)
if H5PY_SUPPORT:
self.hfd5_source = hdf5.File(h5_path, 'r', libver='latest')
else:
self.logger.warning("You need h5py properly installed in order to load from a HDF5 source.")
def read_field(self, field):
try:
return self.hfd5_source['/' + field][()]
except Exception:
self.logger.exception("Could not read from %s field" % field)
raise ReaderException("Could not read from %s field" % field)
def read_optional_field(self, field):
try:
return self.read_field(field)
except ReaderException:
return numpy.array([])
class FileReader():
"""
Read one or multiple numpy arrays from a text/bz2 file.
"""
def __init__(self, file_path):
self.logger = get_logger(__name__)
self.file_path = file_path
self.file_stream = file_path
def read_array(self, dtype=numpy.float64, skip_rows=0, use_cols=None, matlab_data_name=None):
self.logger.debug("Starting to read from: " + str(self.file_path))
try:
# Try to read H5:
if self.file_path.endswith('.h5'):
self.logger.error("Not yet implemented read from a ZIP of H5 files!")
return numpy.array([])
# Try to read NumPy:
if self.file_path.endswith('.txt') or self.file_path.endswith('.bz2'):
return self._read_text(self.file_stream, dtype, skip_rows, use_cols)
if self.file_path.endswith('.npz'):
return numpy.load(self.file_stream)
# Try to read Matlab format:
return self._read_matlab(self.file_stream, matlab_data_name)
except Exception:
self.logger.exception("Could not read from %s file" % self.file_path)
raise ReaderException("Could not read from %s file" % self.file_path)
def _read_text(self, file_stream, dtype, skip_rows, use_cols):
array_result = numpy.loadtxt(file_stream, dtype=dtype, skiprows=skip_rows, usecols=use_cols)
return array_result
def _read_matlab(self, file_stream, matlab_data_name=None):
if self.file_path.endswith(".mtx"):
return scipy_io.mmread(file_stream)
if self.file_path.endswith(".mat"):
matlab_data = scipy_io.matlab.loadmat(file_stream)
return matlab_data[matlab_data_name]
class ZipReader():
"""
Read one or many numpy arrays from a ZIP archive.
"""
def __init__(self, zip_path):
self.logger = get_logger(__name__)
self.zip_archive = zipfile.ZipFile(zip_path)
def read_array_from_file(self, file_name, dtype=numpy.float64, skip_rows=0, use_cols=None, matlab_data_name=None):
matching_file_name = None
for actual_name in self.zip_archive.namelist():
if file_name in actual_name and not actual_name.startswith("__MACOSX"):
matching_file_name = actual_name
break
if matching_file_name is None:
self.logger.warning("File %r not found in ZIP." % file_name)
raise ReaderException("File %r not found in ZIP." % file_name)
zip_entry = self.zip_archive.open(matching_file_name, 'rU')
if matching_file_name.endswith(".bz2"):
temp_file = copy_zip_entry_into_temp(zip_entry, matching_file_name)
file_reader = FileReader(temp_file)
result = file_reader.read_array(dtype, skip_rows, use_cols, matlab_data_name)
os.remove(temp_file)
return result
file_reader = FileReader(matching_file_name)
file_reader.file_stream = zip_entry
return file_reader.read_array(dtype, skip_rows, use_cols, matlab_data_name)
def read_optional_array_from_file(self, file_name, dtype=numpy.float64, skip_rows=0,
use_cols=None, matlab_data_name=None):
try:
return self.read_array_from_file(file_name, dtype, skip_rows, use_cols, matlab_data_name)
except ReaderException:
return numpy.array([])
class ReaderException(Exception):
pass
def try_get_absolute_path(relative_module, file_suffix):
"""
:param relative_module: python module to be imported. When import of this fails, we will return the file_suffix
:param file_suffix: In case this is already an absolute path, return it immediately,
otherwise append it after the module path
:return: Try to build an absolute path based on a python module and a file-suffix
"""
result_full_path = file_suffix
if not os.path.isabs(file_suffix):
try:
module_import = __import__(relative_module, globals(), locals(), ["__init__"])
result_full_path = os.path.join(os.path.dirname(module_import.__file__), file_suffix)
except ImportError:
LOG = get_logger(__name__)
LOG.exception("Could not import tvb_data Python module for default data-set!")
return result_full_path
def copy_zip_entry_into_temp(source, file_suffix, buffer_size=1024 * 1024):
"""
Copy a ZIP Entry into a new file created under system temporary folder.
:param source: ZipEntry
:param file_suffix: String suffix to be added to the temporary file name
:param buffer_size: Buffer size used when copying the file-content
:return: the path towards the new file.
"""
result_dest_path = os.path.join(gettempdir(), "tvb_" + str(uuid.uuid1()) + file_suffix)
result_dest = open(result_dest_path, 'wb')
while 1:
copy_buffer = source.read(buffer_size)
if copy_buffer:
result_dest.write(copy_buffer)
else:
break
source.close()
result_dest.close()
return result_dest_path | gpl-2.0 | 8,471,436,281,063,750,000 | 32.012712 | 118 | 0.646727 | false |
DarioGT/OMS-PluginXML | org.modelsphere.sms/lib/jython-2.2.1/Lib/pprint.py | 1 | 10427 | # Author: Fred L. Drake, Jr.
# [email protected]
#
# This is a simple little module I wrote to make life easier. I didn't
# see anything quite like it in the library, though I may have overlooked
# something. I wrote this when I was trying to read some heavily nested
# tuples with fairly non-descriptive content. This is modeled very much
# after Lisp/Scheme - style pretty-printing of lists. If you find it
# useful, thank small children who sleep at night.
"""Support to pretty-print lists, tuples, & dictionaries recursively.
Very simple, but useful, especially in debugging data structures.
Classes
-------
PrettyPrinter()
Handle pretty-printing operations onto a stream using a configured
set of formatting parameters.
Functions
---------
pformat()
Format a Python object into a pretty-printed representation.
pprint()
Pretty-print a Python object to a stream [default is sys.sydout].
saferepr()
Generate a 'standard' repr()-like value, but protect against recursive
data structures.
"""
from types import DictType, ListType, TupleType, StringType
import sys
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
__all__ = ["pprint","pformat","isreadable","isrecursive","saferepr",
"PrettyPrinter"]
# cache these for faster access:
_commajoin = ", ".join
_sys_modules = sys.modules
_id = id
_len = len
_type = type
def pprint(object, stream=None):
"""Pretty-print a Python object to a stream [default is sys.sydout]."""
printer = PrettyPrinter(stream=stream)
printer.pprint(object)
def pformat(object):
"""Format a Python object into a pretty-printed representation."""
return PrettyPrinter().pformat(object)
def saferepr(object):
"""Version of repr() which can handle recursive data structures."""
return _safe_repr(object, {}, None, 0)[0]
def isreadable(object):
"""Determine if saferepr(object) is readable by eval()."""
return _safe_repr(object, {}, None, 0)[1]
def isrecursive(object):
"""Determine if object requires a recursive representation."""
return _safe_repr(object, {}, None, 0)[2]
class PrettyPrinter:
def __init__(self, indent=1, width=80, depth=None, stream=None):
"""Handle pretty printing operations onto a stream using a set of
configured parameters.
indent
Number of spaces to indent for each level of nesting.
width
Attempted maximum number of columns in the output.
depth
The maximum depth to print out nested structures.
stream
The desired output stream. If omitted (or false), the standard
output stream available at construction will be used.
"""
indent = int(indent)
width = int(width)
assert indent >= 0
assert depth is None or depth > 0, "depth must be > 0"
assert width
self.__depth = depth
self.__indent_per_level = indent
self.__width = width
if stream:
self.__stream = stream
else:
self.__stream = sys.stdout
def pprint(self, object):
self.__stream.write(self.pformat(object) + "\n")
def pformat(self, object):
sio = StringIO()
self.__format(object, sio, 0, 0, {}, 0)
return sio.getvalue()
def isrecursive(self, object):
self.__recursive = 0
self.__repr(object, {}, 0)
return self.__recursive
def isreadable(self, object):
self.__recursive = 0
self.__readable = 1
self.__repr(object, {}, 0)
return self.__readable and not self.__recursive
def __format(self, object, stream, indent, allowance, context, level):
level = level + 1
objid = _id(object)
if objid in context:
stream.write(_recursion(object))
self.__recursive = 1
self.__readable = 0
return
rep = self.__repr(object, context, level - 1)
typ = _type(object)
sepLines = _len(rep) > (self.__width - 1 - indent - allowance)
write = stream.write
if sepLines:
if typ is DictType:
write('{')
if self.__indent_per_level > 1:
write((self.__indent_per_level - 1) * ' ')
length = _len(object)
if length:
context[objid] = 1
indent = indent + self.__indent_per_level
items = object.items()
items.sort()
key, ent = items[0]
rep = self.__repr(key, context, level)
write(rep)
write(': ')
self.__format(ent, stream, indent + _len(rep) + 2,
allowance + 1, context, level)
if length > 1:
for key, ent in items[1:]:
rep = self.__repr(key, context, level)
write(',\n%s%s: ' % (' '*indent, rep))
self.__format(ent, stream, indent + _len(rep) + 2,
allowance + 1, context, level)
indent = indent - self.__indent_per_level
del context[objid]
write('}')
return
if typ is ListType or typ is TupleType:
if typ is ListType:
write('[')
endchar = ']'
else:
write('(')
endchar = ')'
if self.__indent_per_level > 1:
write((self.__indent_per_level - 1) * ' ')
length = _len(object)
if length:
context[objid] = 1
indent = indent + self.__indent_per_level
self.__format(object[0], stream, indent, allowance + 1,
context, level)
if length > 1:
for ent in object[1:]:
write(',\n' + ' '*indent)
self.__format(ent, stream, indent,
allowance + 1, context, level)
indent = indent - self.__indent_per_level
del context[objid]
if typ is TupleType and length == 1:
write(',')
write(endchar)
return
write(rep)
def __repr(self, object, context, level):
repr, readable, recursive = _safe_repr(object, context,
self.__depth, level)
if not readable:
self.__readable = 0
if recursive:
self.__recursive = 1
return repr
# Return triple (repr_string, isreadable, isrecursive).
def _safe_repr(object, context, maxlevels, level):
typ = _type(object)
if typ is StringType:
if 'locale' not in _sys_modules:
return `object`, 1, 0
if "'" in object and '"' not in object:
closure = '"'
quotes = {'"': '\\"'}
else:
closure = "'"
quotes = {"'": "\\'"}
qget = quotes.get
sio = StringIO()
write = sio.write
for char in object:
if char.isalpha():
write(char)
else:
write(qget(char, `char`[1:-1]))
return ("%s%s%s" % (closure, sio.getvalue(), closure)), 1, 0
if typ is DictType:
if not object:
return "{}", 1, 0
objid = _id(object)
if maxlevels and level > maxlevels:
return "{...}", 0, objid in context
if objid in context:
return _recursion(object), 0, 1
context[objid] = 1
readable = 1
recursive = 0
components = []
append = components.append
level += 1
saferepr = _safe_repr
for k, v in object.iteritems():
krepr, kreadable, krecur = saferepr(k, context, maxlevels, level)
vrepr, vreadable, vrecur = saferepr(v, context, maxlevels, level)
append("%s: %s" % (krepr, vrepr))
readable = readable and kreadable and vreadable
if krecur or vrecur:
recursive = 1
del context[objid]
return "{%s}" % _commajoin(components), readable, recursive
if typ is ListType or typ is TupleType:
if typ is ListType:
if not object:
return "[]", 1, 0
format = "[%s]"
elif _len(object) == 1:
format = "(%s,)"
else:
if not object:
return "()", 1, 0
format = "(%s)"
objid = _id(object)
if maxlevels and level > maxlevels:
return format % "...", 0, objid in context
if objid in context:
return _recursion(object), 0, 1
context[objid] = 1
readable = 1
recursive = 0
components = []
append = components.append
level += 1
for o in object:
orepr, oreadable, orecur = _safe_repr(o, context, maxlevels, level)
append(orepr)
if not oreadable:
readable = 0
if orecur:
recursive = 1
del context[objid]
return format % _commajoin(components), readable, recursive
rep = `object`
return rep, (rep and not rep.startswith('<')), 0
def _recursion(object):
return ("<Recursion on %s with id=%s>"
% (_type(object).__name__, _id(object)))
def _perfcheck(object=None):
import time
if object is None:
object = [("string", (1, 2), [3, 4], {5: 6, 7: 8})] * 100000
p = PrettyPrinter()
t1 = time.time()
_safe_repr(object, {}, None, 0)
t2 = time.time()
p.pformat(object)
t3 = time.time()
print "_safe_repr:", t2 - t1
print "pformat:", t3 - t2
if __name__ == "__main__":
_perfcheck()
| gpl-3.0 | 2,932,550,155,883,395,600 | 31.635484 | 79 | 0.503884 | false |
tareqalayan/pytest | _pytest/python_api.py | 1 | 23649 | import math
import sys
import py
from six.moves import zip
from _pytest.compat import isclass
from _pytest.outcomes import fail
import _pytest._code
def _cmp_raises_type_error(self, other):
"""__cmp__ implementation which raises TypeError. Used
by Approx base classes to implement only == and != and raise a
TypeError for other comparisons.
Needed in Python 2 only, Python 3 all it takes is not implementing the
other operators at all.
"""
__tracebackhide__ = True
raise TypeError('Comparison operators other than == and != not supported by approx objects')
# builtin pytest.approx helper
class ApproxBase(object):
"""
Provide shared utilities for making approximate comparisons between numbers
or sequences of numbers.
"""
def __init__(self, expected, rel=None, abs=None, nan_ok=False):
self.expected = expected
self.abs = abs
self.rel = rel
self.nan_ok = nan_ok
def __repr__(self):
raise NotImplementedError
def __eq__(self, actual):
return all(
a == self._approx_scalar(x)
for a, x in self._yield_comparisons(actual))
__hash__ = None
def __ne__(self, actual):
return not (actual == self)
if sys.version_info[0] == 2:
__cmp__ = _cmp_raises_type_error
def _approx_scalar(self, x):
return ApproxScalar(x, rel=self.rel, abs=self.abs, nan_ok=self.nan_ok)
def _yield_comparisons(self, actual):
"""
Yield all the pairs of numbers to be compared. This is used to
implement the `__eq__` method.
"""
raise NotImplementedError
class ApproxNumpy(ApproxBase):
"""
Perform approximate comparisons for numpy arrays.
"""
# Tell numpy to use our `__eq__` operator instead of its.
__array_priority__ = 100
def __repr__(self):
# It might be nice to rewrite this function to account for the
# shape of the array...
return "approx({0!r})".format(list(
self._approx_scalar(x) for x in self.expected))
if sys.version_info[0] == 2:
__cmp__ = _cmp_raises_type_error
def __eq__(self, actual):
import numpy as np
try:
actual = np.asarray(actual)
except: # noqa
raise TypeError("cannot compare '{0}' to numpy.ndarray".format(actual))
if actual.shape != self.expected.shape:
return False
return ApproxBase.__eq__(self, actual)
def _yield_comparisons(self, actual):
import numpy as np
# We can be sure that `actual` is a numpy array, because it's
# casted in `__eq__` before being passed to `ApproxBase.__eq__`,
# which is the only method that calls this one.
for i in np.ndindex(self.expected.shape):
yield actual[i], self.expected[i]
class ApproxMapping(ApproxBase):
"""
Perform approximate comparisons for mappings where the values are numbers
(the keys can be anything).
"""
def __repr__(self):
return "approx({0!r})".format(dict(
(k, self._approx_scalar(v))
for k, v in self.expected.items()))
def __eq__(self, actual):
if set(actual.keys()) != set(self.expected.keys()):
return False
return ApproxBase.__eq__(self, actual)
def _yield_comparisons(self, actual):
for k in self.expected.keys():
yield actual[k], self.expected[k]
class ApproxSequence(ApproxBase):
"""
Perform approximate comparisons for sequences of numbers.
"""
# Tell numpy to use our `__eq__` operator instead of its.
__array_priority__ = 100
def __repr__(self):
seq_type = type(self.expected)
if seq_type not in (tuple, list, set):
seq_type = list
return "approx({0!r})".format(seq_type(
self._approx_scalar(x) for x in self.expected))
def __eq__(self, actual):
if len(actual) != len(self.expected):
return False
return ApproxBase.__eq__(self, actual)
def _yield_comparisons(self, actual):
return zip(actual, self.expected)
class ApproxScalar(ApproxBase):
"""
Perform approximate comparisons for single numbers only.
"""
DEFAULT_ABSOLUTE_TOLERANCE = 1e-12
DEFAULT_RELATIVE_TOLERANCE = 1e-6
def __repr__(self):
"""
Return a string communicating both the expected value and the tolerance
for the comparison being made, e.g. '1.0 +- 1e-6'. Use the unicode
plus/minus symbol if this is python3 (it's too hard to get right for
python2).
"""
if isinstance(self.expected, complex):
return str(self.expected)
# Infinities aren't compared using tolerances, so don't show a
# tolerance.
if math.isinf(self.expected):
return str(self.expected)
# If a sensible tolerance can't be calculated, self.tolerance will
# raise a ValueError. In this case, display '???'.
try:
vetted_tolerance = '{:.1e}'.format(self.tolerance)
except ValueError:
vetted_tolerance = '???'
if sys.version_info[0] == 2:
return '{0} +- {1}'.format(self.expected, vetted_tolerance)
else:
return u'{0} \u00b1 {1}'.format(self.expected, vetted_tolerance)
def __eq__(self, actual):
"""
Return true if the given value is equal to the expected value within
the pre-specified tolerance.
"""
# Short-circuit exact equality.
if actual == self.expected:
return True
# Allow the user to control whether NaNs are considered equal to each
# other or not. The abs() calls are for compatibility with complex
# numbers.
if math.isnan(abs(self.expected)):
return self.nan_ok and math.isnan(abs(actual))
# Infinity shouldn't be approximately equal to anything but itself, but
# if there's a relative tolerance, it will be infinite and infinity
# will seem approximately equal to everything. The equal-to-itself
# case would have been short circuited above, so here we can just
# return false if the expected value is infinite. The abs() call is
# for compatibility with complex numbers.
if math.isinf(abs(self.expected)):
return False
# Return true if the two numbers are within the tolerance.
return abs(self.expected - actual) <= self.tolerance
__hash__ = None
@property
def tolerance(self):
"""
Return the tolerance for the comparison. This could be either an
absolute tolerance or a relative tolerance, depending on what the user
specified or which would be larger.
"""
def set_default(x, default):
return x if x is not None else default
# Figure out what the absolute tolerance should be. ``self.abs`` is
# either None or a value specified by the user.
absolute_tolerance = set_default(self.abs, self.DEFAULT_ABSOLUTE_TOLERANCE)
if absolute_tolerance < 0:
raise ValueError("absolute tolerance can't be negative: {}".format(absolute_tolerance))
if math.isnan(absolute_tolerance):
raise ValueError("absolute tolerance can't be NaN.")
# If the user specified an absolute tolerance but not a relative one,
# just return the absolute tolerance.
if self.rel is None:
if self.abs is not None:
return absolute_tolerance
# Figure out what the relative tolerance should be. ``self.rel`` is
# either None or a value specified by the user. This is done after
# we've made sure the user didn't ask for an absolute tolerance only,
# because we don't want to raise errors about the relative tolerance if
# we aren't even going to use it.
relative_tolerance = set_default(self.rel, self.DEFAULT_RELATIVE_TOLERANCE) * abs(self.expected)
if relative_tolerance < 0:
raise ValueError("relative tolerance can't be negative: {}".format(absolute_tolerance))
if math.isnan(relative_tolerance):
raise ValueError("relative tolerance can't be NaN.")
# Return the larger of the relative and absolute tolerances.
return max(relative_tolerance, absolute_tolerance)
class ApproxDecimal(ApproxScalar):
from decimal import Decimal
DEFAULT_ABSOLUTE_TOLERANCE = Decimal('1e-12')
DEFAULT_RELATIVE_TOLERANCE = Decimal('1e-6')
def approx(expected, rel=None, abs=None, nan_ok=False):
"""
Assert that two numbers (or two sets of numbers) are equal to each other
within some tolerance.
Due to the `intricacies of floating-point arithmetic`__, numbers that we
would intuitively expect to be equal are not always so::
>>> 0.1 + 0.2 == 0.3
False
__ https://docs.python.org/3/tutorial/floatingpoint.html
This problem is commonly encountered when writing tests, e.g. when making
sure that floating-point values are what you expect them to be. One way to
deal with this problem is to assert that two floating-point numbers are
equal to within some appropriate tolerance::
>>> abs((0.1 + 0.2) - 0.3) < 1e-6
True
However, comparisons like this are tedious to write and difficult to
understand. Furthermore, absolute comparisons like the one above are
usually discouraged because there's no tolerance that works well for all
situations. ``1e-6`` is good for numbers around ``1``, but too small for
very big numbers and too big for very small ones. It's better to express
the tolerance as a fraction of the expected value, but relative comparisons
like that are even more difficult to write correctly and concisely.
The ``approx`` class performs floating-point comparisons using a syntax
that's as intuitive as possible::
>>> from pytest import approx
>>> 0.1 + 0.2 == approx(0.3)
True
The same syntax also works for sequences of numbers::
>>> (0.1 + 0.2, 0.2 + 0.4) == approx((0.3, 0.6))
True
Dictionary *values*::
>>> {'a': 0.1 + 0.2, 'b': 0.2 + 0.4} == approx({'a': 0.3, 'b': 0.6})
True
And ``numpy`` arrays::
>>> import numpy as np # doctest: +SKIP
>>> np.array([0.1, 0.2]) + np.array([0.2, 0.4]) == approx(np.array([0.3, 0.6])) # doctest: +SKIP
True
By default, ``approx`` considers numbers within a relative tolerance of
``1e-6`` (i.e. one part in a million) of its expected value to be equal.
This treatment would lead to surprising results if the expected value was
``0.0``, because nothing but ``0.0`` itself is relatively close to ``0.0``.
To handle this case less surprisingly, ``approx`` also considers numbers
within an absolute tolerance of ``1e-12`` of its expected value to be
equal. Infinity and NaN are special cases. Infinity is only considered
equal to itself, regardless of the relative tolerance. NaN is not
considered equal to anything by default, but you can make it be equal to
itself by setting the ``nan_ok`` argument to True. (This is meant to
facilitate comparing arrays that use NaN to mean "no data".)
Both the relative and absolute tolerances can be changed by passing
arguments to the ``approx`` constructor::
>>> 1.0001 == approx(1)
False
>>> 1.0001 == approx(1, rel=1e-3)
True
>>> 1.0001 == approx(1, abs=1e-3)
True
If you specify ``abs`` but not ``rel``, the comparison will not consider
the relative tolerance at all. In other words, two numbers that are within
the default relative tolerance of ``1e-6`` will still be considered unequal
if they exceed the specified absolute tolerance. If you specify both
``abs`` and ``rel``, the numbers will be considered equal if either
tolerance is met::
>>> 1 + 1e-8 == approx(1)
True
>>> 1 + 1e-8 == approx(1, abs=1e-12)
False
>>> 1 + 1e-8 == approx(1, rel=1e-6, abs=1e-12)
True
If you're thinking about using ``approx``, then you might want to know how
it compares to other good ways of comparing floating-point numbers. All of
these algorithms are based on relative and absolute tolerances and should
agree for the most part, but they do have meaningful differences:
- ``math.isclose(a, b, rel_tol=1e-9, abs_tol=0.0)``: True if the relative
tolerance is met w.r.t. either ``a`` or ``b`` or if the absolute
tolerance is met. Because the relative tolerance is calculated w.r.t.
both ``a`` and ``b``, this test is symmetric (i.e. neither ``a`` nor
``b`` is a "reference value"). You have to specify an absolute tolerance
if you want to compare to ``0.0`` because there is no tolerance by
default. Only available in python>=3.5. `More information...`__
__ https://docs.python.org/3/library/math.html#math.isclose
- ``numpy.isclose(a, b, rtol=1e-5, atol=1e-8)``: True if the difference
between ``a`` and ``b`` is less that the sum of the relative tolerance
w.r.t. ``b`` and the absolute tolerance. Because the relative tolerance
is only calculated w.r.t. ``b``, this test is asymmetric and you can
think of ``b`` as the reference value. Support for comparing sequences
is provided by ``numpy.allclose``. `More information...`__
__ http://docs.scipy.org/doc/numpy-1.10.0/reference/generated/numpy.isclose.html
- ``unittest.TestCase.assertAlmostEqual(a, b)``: True if ``a`` and ``b``
are within an absolute tolerance of ``1e-7``. No relative tolerance is
considered and the absolute tolerance cannot be changed, so this function
is not appropriate for very large or very small numbers. Also, it's only
available in subclasses of ``unittest.TestCase`` and it's ugly because it
doesn't follow PEP8. `More information...`__
__ https://docs.python.org/3/library/unittest.html#unittest.TestCase.assertAlmostEqual
- ``a == pytest.approx(b, rel=1e-6, abs=1e-12)``: True if the relative
tolerance is met w.r.t. ``b`` or if the absolute tolerance is met.
Because the relative tolerance is only calculated w.r.t. ``b``, this test
is asymmetric and you can think of ``b`` as the reference value. In the
special case that you explicitly specify an absolute tolerance but not a
relative tolerance, only the absolute tolerance is considered.
.. warning::
.. versionchanged:: 3.2
In order to avoid inconsistent behavior, ``TypeError`` is
raised for ``>``, ``>=``, ``<`` and ``<=`` comparisons.
The example below illustrates the problem::
assert approx(0.1) > 0.1 + 1e-10 # calls approx(0.1).__gt__(0.1 + 1e-10)
assert 0.1 + 1e-10 > approx(0.1) # calls approx(0.1).__lt__(0.1 + 1e-10)
In the second example one expects ``approx(0.1).__le__(0.1 + 1e-10)``
to be called. But instead, ``approx(0.1).__lt__(0.1 + 1e-10)`` is used to
comparison. This is because the call hierarchy of rich comparisons
follows a fixed behavior. `More information...`__
__ https://docs.python.org/3/reference/datamodel.html#object.__ge__
"""
from collections import Mapping, Sequence
from _pytest.compat import STRING_TYPES as String
from decimal import Decimal
# Delegate the comparison to a class that knows how to deal with the type
# of the expected value (e.g. int, float, list, dict, numpy.array, etc).
#
# This architecture is really driven by the need to support numpy arrays.
# The only way to override `==` for arrays without requiring that approx be
# the left operand is to inherit the approx object from `numpy.ndarray`.
# But that can't be a general solution, because it requires (1) numpy to be
# installed and (2) the expected value to be a numpy array. So the general
# solution is to delegate each type of expected value to a different class.
#
# This has the advantage that it made it easy to support mapping types
# (i.e. dict). The old code accepted mapping types, but would only compare
# their keys, which is probably not what most people would expect.
if _is_numpy_array(expected):
cls = ApproxNumpy
elif isinstance(expected, Mapping):
cls = ApproxMapping
elif isinstance(expected, Sequence) and not isinstance(expected, String):
cls = ApproxSequence
elif isinstance(expected, Decimal):
cls = ApproxDecimal
else:
cls = ApproxScalar
return cls(expected, rel, abs, nan_ok)
def _is_numpy_array(obj):
"""
Return true if the given object is a numpy array. Make a special effort to
avoid importing numpy unless it's really necessary.
"""
import inspect
for cls in inspect.getmro(type(obj)):
if cls.__module__ == 'numpy':
try:
import numpy as np
return isinstance(obj, np.ndarray)
except ImportError:
pass
return False
# builtin pytest.raises helper
def raises(expected_exception, *args, **kwargs):
"""
Assert that a code block/function call raises ``expected_exception``
and raise a failure exception otherwise.
:arg message: if specified, provides a custom failure message if the
exception is not raised
:arg match: if specified, asserts that the exception matches a text or regex
This helper produces a ``ExceptionInfo()`` object (see below).
You may use this function as a context manager::
>>> with raises(ZeroDivisionError):
... 1/0
.. versionchanged:: 2.10
In the context manager form you may use the keyword argument
``message`` to specify a custom failure message::
>>> with raises(ZeroDivisionError, message="Expecting ZeroDivisionError"):
... pass
Traceback (most recent call last):
...
Failed: Expecting ZeroDivisionError
.. note::
When using ``pytest.raises`` as a context manager, it's worthwhile to
note that normal context manager rules apply and that the exception
raised *must* be the final line in the scope of the context manager.
Lines of code after that, within the scope of the context manager will
not be executed. For example::
>>> value = 15
>>> with raises(ValueError) as exc_info:
... if value > 10:
... raise ValueError("value must be <= 10")
... assert exc_info.type == ValueError # this will not execute
Instead, the following approach must be taken (note the difference in
scope)::
>>> with raises(ValueError) as exc_info:
... if value > 10:
... raise ValueError("value must be <= 10")
...
>>> assert exc_info.type == ValueError
Since version ``3.1`` you can use the keyword argument ``match`` to assert that the
exception matches a text or regex::
>>> with raises(ValueError, match='must be 0 or None'):
... raise ValueError("value must be 0 or None")
>>> with raises(ValueError, match=r'must be \d+$'):
... raise ValueError("value must be 42")
**Legacy forms**
The forms below are fully supported but are discouraged for new code because the
context manager form is regarded as more readable and less error-prone.
It is possible to specify a callable by passing a to-be-called lambda::
>>> raises(ZeroDivisionError, lambda: 1/0)
<ExceptionInfo ...>
or you can specify an arbitrary callable with arguments::
>>> def f(x): return 1/x
...
>>> raises(ZeroDivisionError, f, 0)
<ExceptionInfo ...>
>>> raises(ZeroDivisionError, f, x=0)
<ExceptionInfo ...>
It is also possible to pass a string to be evaluated at runtime::
>>> raises(ZeroDivisionError, "f(0)")
<ExceptionInfo ...>
The string will be evaluated using the same ``locals()`` and ``globals()``
at the moment of the ``raises`` call.
.. currentmodule:: _pytest._code
Consult the API of ``excinfo`` objects: :class:`ExceptionInfo`.
.. note::
Similar to caught exception objects in Python, explicitly clearing
local references to returned ``ExceptionInfo`` objects can
help the Python interpreter speed up its garbage collection.
Clearing those references breaks a reference cycle
(``ExceptionInfo`` --> caught exception --> frame stack raising
the exception --> current frame stack --> local variables -->
``ExceptionInfo``) which makes Python keep all objects referenced
from that cycle (including all local variables in the current
frame) alive until the next cyclic garbage collection run. See the
official Python ``try`` statement documentation for more detailed
information.
"""
__tracebackhide__ = True
msg = ("exceptions must be old-style classes or"
" derived from BaseException, not %s")
if isinstance(expected_exception, tuple):
for exc in expected_exception:
if not isclass(exc):
raise TypeError(msg % type(exc))
elif not isclass(expected_exception):
raise TypeError(msg % type(expected_exception))
message = "DID NOT RAISE {0}".format(expected_exception)
match_expr = None
if not args:
if "message" in kwargs:
message = kwargs.pop("message")
if "match" in kwargs:
match_expr = kwargs.pop("match")
return RaisesContext(expected_exception, message, match_expr)
elif isinstance(args[0], str):
code, = args
assert isinstance(code, str)
frame = sys._getframe(1)
loc = frame.f_locals.copy()
loc.update(kwargs)
# print "raises frame scope: %r" % frame.f_locals
try:
code = _pytest._code.Source(code).compile()
py.builtin.exec_(code, frame.f_globals, loc)
# XXX didn'T mean f_globals == f_locals something special?
# this is destroyed here ...
except expected_exception:
return _pytest._code.ExceptionInfo()
else:
func = args[0]
try:
func(*args[1:], **kwargs)
except expected_exception:
return _pytest._code.ExceptionInfo()
fail(message)
raises.Exception = fail.Exception
class RaisesContext(object):
def __init__(self, expected_exception, message, match_expr):
self.expected_exception = expected_exception
self.message = message
self.match_expr = match_expr
self.excinfo = None
def __enter__(self):
self.excinfo = object.__new__(_pytest._code.ExceptionInfo)
return self.excinfo
def __exit__(self, *tp):
__tracebackhide__ = True
if tp[0] is None:
fail(self.message)
self.excinfo.__init__(tp)
suppress_exception = issubclass(self.excinfo.type, self.expected_exception)
if sys.version_info[0] == 2 and suppress_exception:
sys.exc_clear()
if self.match_expr and suppress_exception:
self.excinfo.match(self.match_expr)
return suppress_exception
| mit | 8,165,388,998,812,500,000 | 36.24252 | 104 | 0.628737 | false |
xuru/pyvisdk | pyvisdk/do/guest_windows_file_attributes.py | 1 | 1096 |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def GuestWindowsFileAttributes(vim, *args, **kwargs):
'''Different attributes for a Windows guest file.'''
obj = vim.client.factory.create('ns0:GuestWindowsFileAttributes')
# do some validation checking...
if (len(args) + len(kwargs)) < 0:
raise IndexError('Expected at least 1 arguments got: %d' % len(args))
required = [ ]
optional = [ 'createTime', 'hidden', 'readOnly', 'accessTime', 'modificationTime',
'symlinkTarget', 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| mit | -5,001,963,359,529,851,000 | 31.264706 | 124 | 0.60219 | false |
SalesforceFoundation/mrbelvedereci | metaci/contrib/sites/migrations/0003_set_site_domain_and_name.py | 1 | 1044 | """
To understand why this file is here, please read:
http://cookiecutter-django.readthedocs.io/en/latest/faq.html#why-is-there-a-django-contrib-sites-directory-in-cookiecutter-django
"""
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations
def update_site_forward(apps, schema_editor):
"""Set site domain and name."""
Site = apps.get_model("sites", "Site")
Site.objects.update_or_create(
id=settings.SITE_ID,
defaults={"domain": "metaci.herokuapp.com", "name": "metaci"},
)
def update_site_backward(apps, schema_editor):
"""Revert site domain and name to default."""
Site = apps.get_model("sites", "Site")
Site.objects.update_or_create(
id=settings.SITE_ID, defaults={"domain": "example.com", "name": "example.com"}
)
class Migration(migrations.Migration):
dependencies = [("sites", "0002_alter_domain_unique")]
operations = [migrations.RunPython(update_site_forward, update_site_backward)]
| bsd-3-clause | 6,772,709,289,891,087,000 | 28.828571 | 129 | 0.685824 | false |
rossant/galry | galry/glrenderer.py | 1 | 60655 | try:
import OpenGL.GL as gl
except:
from galry import log_warn
log_warn(("PyOpenGL is not available and Galry won't be"
" able to render plots."))
class _gl(object):
def mock(*args, **kwargs):
return None
def __getattr__(self, name):
return self.mock
gl = _gl()
from collections import OrderedDict
import numpy as np
import sys
from galry import enforce_dtype, DataNormalizer, log_info, log_debug, \
log_warn, RefVar
__all__ = ['GLVersion', 'GLRenderer']
# GLVersion class
# ---------------
class GLVersion(object):
"""Methods related to the GL version."""
# self.version_header = '#version 120'
# self.precision_header = 'precision mediump float;'
@staticmethod
def get_renderer_info():
"""Return information about the client renderer.
Arguments:
* info: a dictionary with the following keys:
* renderer_name
* opengl_version
* glsl_version
"""
return {
'renderer_name': gl.glGetString(gl.GL_RENDERER),
'opengl_version': gl.glGetString(gl.GL_VERSION),
'glsl_version': gl.glGetString(gl.GL_SHADING_LANGUAGE_VERSION)
}
@staticmethod
def version_header():
if GLVersion.get_renderer_info()['opengl_version'][0:3] < '2.1':
return '#version 110\n'
else:
return '#version 120\n'
@staticmethod
def precision_header():
if GLVersion.get_renderer_info()['glsl_version'] >= '1.3':
return 'precision mediump float;'
else:
return ''
# Low-level OpenGL functions to initialize/load variables
# -------------------------------------------------------
class Attribute(object):
"""Contains OpenGL functions related to attributes."""
@staticmethod
def create():
"""Create a new buffer and return a `buffer` index."""
return gl.glGenBuffers(1)
@staticmethod
def get_gltype(index=False):
if not index:
return gl.GL_ARRAY_BUFFER
else:
return gl.GL_ELEMENT_ARRAY_BUFFER
@staticmethod
def bind(buffer, location=None, index=False):
"""Bind a buffer and associate a given location."""
gltype = Attribute.get_gltype(index)
gl.glBindBuffer(gltype, buffer)
if location >= 0:
gl.glEnableVertexAttribArray(location)
@staticmethod
def set_attribute(location, ndim):
"""Specify the type of the attribute before rendering."""
gl.glVertexAttribPointer(location, ndim, gl.GL_FLOAT, gl.GL_FALSE, 0, None)
@staticmethod
def convert_data(data, index=False):
"""Force 32-bit floating point numbers for data."""
if not index:
return enforce_dtype(data, np.float32)
else:
return np.array(data, np.int32)
@staticmethod
def load(data, index=False):
"""Load data in the buffer for the first time. The buffer must
have been bound before."""
data = Attribute.convert_data(data, index=index)
gltype = Attribute.get_gltype(index)
gl.glBufferData(gltype, data, gl.GL_DYNAMIC_DRAW)
@staticmethod
def update(data, onset=0, index=False):
"""Update data in the currently bound buffer."""
gltype = Attribute.get_gltype(index)
data = Attribute.convert_data(data, index=index)
# convert onset into bytes count
if data.ndim == 1:
ndim = 1
elif data.ndim == 2:
ndim = data.shape[1]
onset *= ndim * data.itemsize
gl.glBufferSubData(gltype, int(onset), data)
@staticmethod
def delete(*buffers):
"""Delete buffers."""
if buffers:
gl.glDeleteBuffers(len(buffers), buffers)
class Uniform(object):
"""Contains OpenGL functions related to uniforms."""
float_suffix = {True: 'f', False: 'i'}
array_suffix = {True: 'v', False: ''}
# glUniform[Matrix]D[f][v]
@staticmethod
def convert_data(data):
if isinstance(data, np.ndarray):
data = enforce_dtype(data, np.float32)
if type(data) == np.float64:
data = np.float32(data)
if type(data) == np.int64:
data = np.int32(data)
if type(data) == list:
data = map(Uniform.convert_data, data)
if type(data) == tuple:
data = tuple(map(Uniform.convert_data, data))
return data
@staticmethod
def load_scalar(location, data):
data = Uniform.convert_data(data)
is_float = (type(data) == float) or (type(data) == np.float32)
funname = 'glUniform1%s' % Uniform.float_suffix[is_float]
getattr(gl, funname)(location, data)
@staticmethod
def load_vector(location, data):
if len(data) > 0:
data = Uniform.convert_data(data)
is_float = (type(data[0]) == float) or (type(data[0]) == np.float32)
ndim = len(data)
funname = 'glUniform%d%s' % (ndim, Uniform.float_suffix[is_float])
getattr(gl, funname)(location, *data)
@staticmethod
def load_array(location, data):
data = Uniform.convert_data(data)
is_float = (data.dtype == np.float32)
size, ndim = data.shape
funname = 'glUniform%d%sv' % (ndim, Uniform.float_suffix[is_float])
getattr(gl, funname)(location, size, data)
@staticmethod
def load_matrix(location, data):
data = Uniform.convert_data(data)
is_float = (data.dtype == np.float32)
n, m = data.shape
# TODO: arrays of matrices?
if n == m:
funname = 'glUniformMatrix%d%sv' % (n, Uniform.float_suffix[is_float])
else:
funname = 'glUniformMatrix%dx%d%sv' % (n, m, Uniform.float_suffix[is_float])
getattr(gl, funname)(location, 1, False, data)
class Texture(object):
"""Contains OpenGL functions related to textures."""
@staticmethod
def create(ndim=2, mipmap=False, minfilter=None, magfilter=None):
"""Create a texture with the specifyed number of dimensions."""
buffer = gl.glGenTextures(1)
gl.glPixelStorei(gl.GL_UNPACK_ALIGNMENT, 1)
Texture.bind(buffer, ndim)
textype = getattr(gl, "GL_TEXTURE_%dD" % ndim)
gl.glTexParameteri(textype, gl.GL_TEXTURE_WRAP_S, gl.GL_CLAMP)
gl.glTexParameteri(textype, gl.GL_TEXTURE_WRAP_T, gl.GL_CLAMP)
if mipmap:
if hasattr(gl, 'glGenerateMipmap'):
gl.glGenerateMipmap(textype)
else:
minfilter = 'NEAREST'
magfilter = 'NEAREST'
if minfilter is None:
minfilter = 'NEAREST'
if magfilter is None:
magfilter = 'NEAREST'
minfilter = getattr(gl, 'GL_' + minfilter)
magfilter = getattr(gl, 'GL_' + magfilter)
gl.glTexParameteri(textype, gl.GL_TEXTURE_MIN_FILTER, minfilter)
gl.glTexParameteri(textype, gl.GL_TEXTURE_MAG_FILTER, magfilter)
return buffer
@staticmethod
def bind(buffer, ndim):
"""Bind a texture buffer."""
textype = getattr(gl, "GL_TEXTURE_%dD" % ndim)
gl.glBindTexture(textype, buffer)
@staticmethod
def get_info(data):
"""Return information about texture data."""
# find shape, ndim, ncomponents
shape = data.shape
if shape[0] == 1:
ndim = 1
elif shape[0] > 1:
ndim = 2
# ndim = 2
ncomponents = shape[2]
# ncomponents==1 ==> GL_R, 3 ==> GL_RGB, 4 ==> GL_RGBA
component_type = getattr(gl, ["GL_INTENSITY8", None, "GL_RGB", "GL_RGBA"] \
[ncomponents - 1])
return ndim, ncomponents, component_type
@staticmethod
def convert_data(data):
"""convert data in a array of uint8 in [0, 255]."""
if data.dtype == np.float32 or data.dtype == np.float64:
return np.array(255 * data, dtype=np.uint8)
elif data.dtype == np.uint8:
return data
else:
raise ValueError("The texture is in an unsupported format.")
@staticmethod
def copy(fbo, tex_src, tex_dst, width, height):
# /// bind the FBO
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, fbo)
# /// attach the source texture to the fbo
gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0,
gl.GL_TEXTURE_2D, tex_src, 0)
# /// bind the destination texture
gl.glBindTexture(gl.GL_TEXTURE_2D, tex_dst)
# /// copy from framebuffer (here, the FBO!) to the bound texture
gl.glCopyTexSubImage2D(gl.GL_TEXTURE_2D, 0, 0, 0, 0, 0, width, height)
# /// unbind the FBO
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)
# # ncomponents==1 ==> GL_R, 3 ==> GL_RGB, 4 ==> GL_RGBA
# component_type = getattr(gl, ["GL_INTENSITY8", None, "GL_RGB", "GL_RGBA"] \
# [ncomponents - 1])
# gl.glCopyTexImage2D(gl.GL_TEXTURE_2D,
# 0, # level
# component_type,
# 0, 0, # x, y offsets
# 0, 0, # x, y
# w, h, # width, height
# 0 # border
# )
# @staticmethod
# def read_buffer(index=0):
# gl.glReadBuffer(getattr(gl, 'GL_COLOR_ATTACHMENT%d' % index))
# @staticmethod
# def draw_buffer():
# gl.glDrawBuffer(gl.GL_FRONT)
@staticmethod
def load(data):
"""Load texture data in a bound texture buffer."""
# convert data in a array of uint8 in [0, 255]
data = Texture.convert_data(data)
shape = data.shape
# get texture info
ndim, ncomponents, component_type = Texture.get_info(data)
textype = getattr(gl, "GL_TEXTURE_%dD" % ndim)
# print ndim, shape, data.shape
# load data in the buffer
if ndim == 1:
gl.glTexImage1D(textype, 0, component_type, shape[1], 0, component_type,
gl.GL_UNSIGNED_BYTE, data)
elif ndim == 2:
# width, height == shape[1], shape[0]: Thanks to the Confusion Club
gl.glTexImage2D(textype, 0, component_type, shape[1], shape[0], 0,
component_type, gl.GL_UNSIGNED_BYTE, data)
@staticmethod
def update(data):
"""Update a texture."""
# convert data in a array of uint8 in [0, 255]
data = Texture.convert_data(data)
shape = data.shape
# get texture info
ndim, ncomponents, component_type = Texture.get_info(data)
textype = getattr(gl, "GL_TEXTURE_%dD" % ndim)
# update buffer
if ndim == 1:
gl.glTexSubImage1D(textype, 0, 0, shape[1],
component_type, gl.GL_UNSIGNED_BYTE, data)
elif ndim == 2:
gl.glTexSubImage2D(textype, 0, 0, 0, shape[1], shape[0],
component_type, gl.GL_UNSIGNED_BYTE, data)
@staticmethod
def delete(*buffers):
"""Delete texture buffers."""
gl.glDeleteTextures(buffers)
class FrameBuffer(object):
"""Contains OpenGL functions related to FBO."""
@staticmethod
def create():
"""Create a FBO."""
if hasattr(gl, 'glGenFramebuffers') and gl.glGenFramebuffers:
buffer = gl.glGenFramebuffers(1)
else:
buffer = None
return buffer
@staticmethod
def bind(buffer=None):
"""Bind a FBO."""
if buffer is None:
buffer = 0
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, buffer)
@staticmethod
def bind_texture(texture, i=0):
gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER,
getattr(gl, 'GL_COLOR_ATTACHMENT%d' % i),
gl.GL_TEXTURE_2D, texture, 0)
@staticmethod
def draw_buffers(n):
gl.glDrawBuffers([getattr(gl, 'GL_COLOR_ATTACHMENT%d' % i) for i in xrange(n)])
@staticmethod
def unbind():
"""Unbind a FBO."""
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)
# Shader manager
# --------------
class ShaderManager(object):
"""Handle vertex and fragment shaders.
TODO: integrate in the renderer the shader code creation module.
"""
# Initialization methods
# ----------------------
def __init__(self, vertex_shader, fragment_shader):
"""Compile shaders and create a program."""
# add headers
vertex_shader = GLVersion.version_header() + vertex_shader
fragment_shader = GLVersion.version_header() + fragment_shader
# set shader source
self.vertex_shader = vertex_shader
self.fragment_shader = fragment_shader
# compile shaders
self.compile()
# create program
self.program = self.create_program()
def compile_shader(self, source, shader_type):
"""Compile a shader (vertex or fragment shader).
Arguments:
* source: the shader source code as a string.
* shader_type: either gl.GL_VERTEX_SHADER or gl.GL_FRAGMENT_SHADER.
"""
# compile shader
shader = gl.glCreateShader(shader_type)
gl.glShaderSource(shader, source)
gl.glCompileShader(shader)
result = gl.glGetShaderiv(shader, gl.GL_COMPILE_STATUS)
infolog = gl.glGetShaderInfoLog(shader)
if infolog:
infolog = "\n" + infolog.strip()
# check compilation error
if not(result) and infolog:
msg = "Compilation error for %s." % str(shader_type)
if infolog is not None:
msg += infolog
msg += source
raise RuntimeError(msg)
else:
log_debug("Compilation succeeded for %s.%s" % (str(shader_type), infolog))
return shader
def compile(self):
"""Compile the shaders."""
# print self.vertex_shader
# print self.fragment_shader
self.vs = self.compile_shader(self.vertex_shader, gl.GL_VERTEX_SHADER)
self.fs = self.compile_shader(self.fragment_shader, gl.GL_FRAGMENT_SHADER)
def create_program(self):
"""Create shader program and attach shaders."""
program = gl.glCreateProgram()
gl.glAttachShader(program, self.vs)
gl.glAttachShader(program, self.fs)
gl.glLinkProgram(program)
result = gl.glGetProgramiv(program, gl.GL_LINK_STATUS)
# check linking error
if not(result):
msg = "Shader program linking error:"
info = gl.glGetProgramInfoLog(program)
if info:
msg += info
raise RuntimeError(msg)
self.program = program
return program
def get_attribute_location(self, name):
"""Return the location of an attribute after the shaders have compiled."""
return gl.glGetAttribLocation(self.program, name)
def get_uniform_location(self, name):
"""Return the location of a uniform after the shaders have compiled."""
return gl.glGetUniformLocation(self.program, name)
# Activation methods
# ------------------
def activate_shaders(self):
"""Activate shaders for the rest of the rendering call."""
# try:
gl.glUseProgram(self.program)
# return True
# except Exception as e:
# log_info("Error while activating the shaders: " + e.message)
# return False
def deactivate_shaders(self):
"""Deactivate shaders for the rest of the rendering call."""
# try:
gl.glUseProgram(0)
# return True
# except Exception as e:
# log_info("Error while activating the shaders: " + e.message)
# return True
# Cleanup methods
# ---------------
def detach_shaders(self):
"""Detach shaders from the program."""
if gl.glIsProgram(self.program):
gl.glDetachShader(self.program, self.vs)
gl.glDetachShader(self.program, self.fs)
def delete_shaders(self):
"""Delete the vertex and fragment shaders."""
if gl.glIsProgram(self.program):
gl.glDeleteShader(self.vs)
gl.glDeleteShader(self.fs)
def delete_program(self):
"""Delete the shader program."""
if gl.glIsProgram(self.program):
gl.glDeleteProgram(self.program)
def cleanup(self):
"""Clean up all shaders."""
self.detach_shaders()
self.delete_shaders()
self.delete_program()
# Slicing classes
# ---------------
MAX_VBO_SIZE = 65000
class Slicer(object):
"""Handle attribute slicing, necessary because of the size
of buffer objects which is limited on some GPUs."""
@staticmethod
def _get_slices(size, maxsize=None):
"""Return a list of slices for a given dataset size.
Arguments:
* size: the size of the dataset, i.e. the number of points.
Returns:
* slices: a list of pairs `(position, slice_size)` where `position`
is the position of this slice in the original buffer, and
`slice_size` the slice size.
"""
if maxsize is None:
maxsize = MAX_VBO_SIZE
if maxsize > 0:
nslices = int(np.ceil(size / float(maxsize)))
else:
nslices = 0
return [(i*maxsize, min(maxsize+1, size-i*maxsize)) for i in xrange(nslices)]
@staticmethod
def _slice_bounds(bounds, position, slice_size, regular=False):
"""Slice data bounds in a *single* slice according to the VBOs slicing.
Arguments:
* bounds: the bounds as specified by the user in `create_dataset`.
* position: the position of the current slice.
* slice_size: the size of the current slice.
Returns:
* bounds_sliced: the bounds for the current slice. It is a list an
1D array of integer indices.
"""
# first bound index after the sliced VBO: nothing to paint
if bounds[0] >= position + slice_size:
bounds_sliced = None
# last bound index before the sliced VBO: nothing to paint
elif bounds[-1] < position:
bounds_sliced = None
# the current sliced VBO intersects the bounds: something to paint
else:
bounds_sliced = bounds
if not regular:
# get the bounds that fall within the sliced VBO
ind = (bounds_sliced>=position) & (bounds_sliced<position + slice_size)
bounds_sliced = bounds_sliced[ind]
# HACK: more efficient algorithm when the bounds are regularly
# spaced
else:
d = float(regular)
p = position
b0 = bounds_sliced[0]
b1 = bounds_sliced[-1]
s = slice_size
i0 = max(0, int(np.ceil((p-b0)/d)))
i1 = max(0, int(np.floor((p+s-b0)/d)))
bounds_sliced = bounds_sliced[i0:i1+1].copy()
ind = ((b0 >= p) and (b0 < p+s), (b1 >= p) and (b1 < p+s))
"""
bounds_sliced = [b0 + d*i]
(p-b0)/d <= i0 < (p+s-b0)/d
i0 = ceil((p-b0)/d), i1 = floor((p+s-b0)/d)
ind = (bs[0] >= p & < p+s, bs[-1])
"""
# remove the onset (first index of the sliced VBO)
bounds_sliced -= position
# handle the case when the slice cuts between two bounds
if not ind[0]:
bounds_sliced = np.hstack((0, bounds_sliced))
if not ind[-1]:
bounds_sliced = np.hstack((bounds_sliced, slice_size))
return enforce_dtype(bounds_sliced, np.int32)
def set_size(self, size, doslice=True):
"""Update the total size of the buffer, and update
the slice information accordingly."""
# deactivate slicing by using a maxsize number larger than the
# actual size
if not doslice:
maxsize = 2 * size
else:
maxsize = None
self.size = size
# if not hasattr(self, 'bounds'):
# self.bounds = np.array([0, size], dtype=np.int32)
# compute the data slicing with respect to bounds (specified in the
# template) and to the maximum size of a VBO.
self.slices = self._get_slices(self.size, maxsize)
# print self.size, maxsize
# print self.slices
self.slice_count = len(self.slices)
def set_bounds(self, bounds=None):
"""Update the bound size, and update the slice information
accordingly."""
if bounds is None:
bounds = np.array([0, self.size], dtype=np.int32)
self.bounds = bounds
# is regular?
d = np.diff(bounds)
r = False
if len(d) > 0:
dm, dM = d.min(), d.max()
if dm == dM:
r = dm
# log_info("Regular bounds")
self.subdata_bounds = [self._slice_bounds(self.bounds, pos, size, r) \
for pos, size in self.slices]
class SlicedAttribute(object):
"""Encapsulate methods for slicing an attribute and handling several
buffer objects for a single attribute."""
def __init__(self, slicer, location, buffers=None):
self.slicer = slicer
self.location = location
if buffers is None:
# create the sliced buffers
self.create()
else:
log_debug("Creating sliced attribute with existing buffers " +
str(buffers))
# or use existing buffers
self.load_buffers(buffers)
def create(self):
"""Create the sliced buffers."""
self.buffers = [Attribute.create() for _ in self.slicer.slices]
def load_buffers(self, buffers):
"""Load existing buffers instead of creating new ones."""
self.buffers = buffers
def delete_buffers(self):
"""Delete all sub-buffers."""
# for buffer in self.buffers:
Attribute.delete(*self.buffers)
def load(self, data):
"""Load data on all sliced buffers."""
for buffer, (pos, size) in zip(self.buffers, self.slicer.slices):
# WARNING: putting self.location instead of None ==> SEGFAULT on Linux with Nvidia drivers
Attribute.bind(buffer, None)
Attribute.load(data[pos:pos + size,...])
def bind(self, slice=None):
if slice is None:
slice = 0
Attribute.bind(self.buffers[slice], self.location)
def update(self, data, mask=None):
"""Update data on all sliced buffers."""
# NOTE: the slicer needs to be updated if the size of the data changes
# default mask
if mask is None:
mask = np.ones(self.slicer.size, dtype=np.bool)
# is the current subVBO within the given [onset, offset]?
within = False
# update VBOs
for buffer, (pos, size) in zip(self.buffers, self.slicer.slices):
subdata = data[pos:pos + size,...]
submask = mask[pos:pos + size]
# if there is at least one True in the slice mask (submask)
if submask.any():
# this sub-buffer contains updated indices
subonset = submask.argmax()
suboffset = len(submask) - 1 - submask[::-1].argmax()
Attribute.bind(buffer, self.location)
Attribute.update(subdata[subonset:suboffset + 1,...], subonset)
# Painter class
# -------------
class Painter(object):
"""Provides low-level methods for calling OpenGL rendering commands."""
@staticmethod
def draw_arrays(primtype, offset, size):
"""Render an array of primitives."""
gl.glDrawArrays(primtype, offset, size)
@staticmethod
def draw_multi_arrays(primtype, bounds):
"""Render several arrays of primitives."""
first = bounds[:-1]
count = np.diff(bounds)
primcount = len(bounds) - 1
gl.glMultiDrawArrays(primtype, first, count, primcount)
@staticmethod
def draw_indexed_arrays(primtype, size):
gl.glDrawElements(primtype, size, gl.GL_UNSIGNED_INT, None)
# Visual renderer
# ---------------
class GLVisualRenderer(object):
"""Handle rendering of one visual"""
def __init__(self, renderer, visual):
"""Initialize the visual renderer, create the slicer, initialize
all variables and the shaders."""
# register the master renderer (to access to other visual renderers)
# and register the scene dictionary
self.renderer = renderer
self.scene = renderer.scene
# register the visual dictionary
self.visual = visual
self.framebuffer = visual.get('framebuffer', None)
# self.beforeclear = visual.get('beforeclear', None)
# options
self.options = visual.get('options', {})
# hold all data changes until the next rendering pass happens
self.data_updating = {}
self.textures_to_copy = []
# set the primitive type from its name
self.set_primitive_type(self.visual['primitive_type'])
# indexed mode? set in initialize_variables
self.use_index = None
# whether to use slicing? always True except when indexing should not
# be used, but slicing neither
self.use_slice = True
# self.previous_size = None
# set the slicer
self.slicer = Slicer()
# used when slicing needs to be deactivated (like for indexed arrays)
self.noslicer = Slicer()
# get size and bounds
size = self.visual['size']
bounds = np.array(self.visual.get('bounds', [0, size]), np.int32)
# self.update_size(size, bounds)
self.slicer.set_size(size)
self.slicer.set_bounds(bounds)
self.noslicer.set_size(size, doslice=False)
self.noslicer.set_bounds(bounds)
# compile and link the shaders
self.shader_manager = ShaderManager(self.visual['vertex_shader'],
self.visual['fragment_shader'])
# DEBUG
# log_info(self.shader_manager.vertex_shader)
# log_info(self.shader_manager.fragment_shader)
# initialize all variables
# self.initialize_normalizers()
self.initialize_variables()
self.initialize_fbocopy()
self.load_variables()
def set_primitive_type(self, primtype):
"""Set the primitive type from its name (without the GL_ prefix)."""
self.primitive_type = getattr(gl, "GL_%s" % primtype.upper())
def getarg(self, name):
"""Get a visual parameter."""
return self.visual.get(name, None)
# Variable methods
# ----------------
def get_visuals(self):
"""Return all visuals defined in the scene."""
return self.scene['visuals']
def get_visual(self, name):
"""Return a visual dictionary from its name."""
visuals = [v for v in self.get_visuals() if v.get('name', '') == name]
if not visuals:
return None
return visuals[0]
def get_variables(self, shader_type=None):
"""Return all variables defined in the visual."""
if not shader_type:
return self.visual.get('variables', [])
else:
return [var for var in self.get_variables() \
if var['shader_type'] == shader_type]
def get_variable(self, name, visual=None):
"""Return a variable by its name, and for any given visual which
is specified by its name."""
# get the variables list
if visual is None:
variables = self.get_variables()
else:
variables = self.get_visual(visual)['variables']
variables = [v for v in variables if v.get('name', '') == name]
if not variables:
return None
return variables[0]
def resolve_reference(self, refvar):
"""Resolve a reference variable: return its true value (a Numpy array).
"""
return self.get_variable(refvar.variable, visual=refvar.visual)
# Initialization methods
# ----------------------
def initialize_fbocopy(self):
"""Create a FBO used when copying textures."""
self.fbocopy = FrameBuffer.create()
def initialize_variables(self):
"""Initialize all variables, after the shaders have compiled."""
# find out whether indexing is used or not, because in this case
# the slicing needs to be deactivated
if self.get_variables('index'):
# deactivate slicing
self.slicer = self.noslicer
log_debug("deactivating slicing because there's an indexed buffer")
self.use_index = True
else:
self.use_index = False
# initialize all variables
for var in self.get_variables():
shader_type = var['shader_type']
# skip varying
if shader_type == 'varying':
continue
name = var['name']
# call initialize_***(name) to initialize that variable
getattr(self, 'initialize_%s' % shader_type)(name)
# special case for uniforms: need to load them the first time
uniforms = self.get_variables('uniform')
self.set_data(**dict([(v['name'], v.get('data', None)) for v in uniforms]))
def initialize_attribute(self, name):
"""Initialize an attribute: get the shader location, create the
sliced buffers, and load the data."""
# retrieve the location of that attribute in the shader
location = self.shader_manager.get_attribute_location(name)
variable = self.get_variable(name)
variable['location'] = location
# deal with reference attributes: share the same buffers between
# several different visuals
if isinstance(variable.get('data', None), RefVar):
# HACK: if the targeted attribute is indexed, we should
# deactivate slicing here
if self.renderer.visual_renderers[variable['data'].visual].use_index:
log_debug("deactivating slicing")
self.slicer = self.noslicer
# use the existing buffers from the target variable
target = self.resolve_reference(variable['data'])
variable['sliced_attribute'] = SlicedAttribute(self.slicer, location,
buffers=target['sliced_attribute'].buffers)
else:
# initialize the sliced buffers
variable['sliced_attribute'] = SlicedAttribute(self.slicer, location)
def initialize_index(self, name):
variable = self.get_variable(name)
variable['buffer'] = Attribute.create()
def initialize_texture(self, name):
variable = self.get_variable(name)
# handle reference variable to texture
if isinstance(variable.get('data', None), RefVar):
target = self.resolve_reference(variable['data'])
variable['buffer'] = target['buffer']
variable['location'] = target['location']
else:
variable['buffer'] = Texture.create(variable['ndim'],
mipmap=variable.get('mipmap', None),
minfilter=variable.get('minfilter', None),
magfilter=variable.get('magfilter', None),
)
# NEW
# get the location of the sampler uniform
location = self.shader_manager.get_uniform_location(name)
variable['location'] = location
def initialize_framebuffer(self, name):
variable = self.get_variable(name)
variable['buffer'] = FrameBuffer.create()
# bind the frame buffer
FrameBuffer.bind(variable['buffer'])
# variable['texture'] is a list of texture names in the current visual
if isinstance(variable['texture'], basestring):
variable['texture'] = [variable['texture']]
# draw as many buffers as there are textures in that frame buffer
FrameBuffer.draw_buffers(len(variable['texture']))
for i, texname in enumerate(variable['texture']):
# get the texture variable:
texture = self.get_variable(texname)
# link the texture to the frame buffer
FrameBuffer.bind_texture(texture['buffer'], i)
# unbind the frame buffer
FrameBuffer.unbind()
def initialize_uniform(self, name):
"""Initialize an uniform: get the location after the shaders have
been compiled."""
location = self.shader_manager.get_uniform_location(name)
variable = self.get_variable(name)
variable['location'] = location
def initialize_compound(self, name):
pass
# Normalization methods
# ---------------------
# def initialize_normalizers(self):
# self.normalizers = {}
# Loading methods
# ---------------
def load_variables(self):
"""Load data for all variables at initialization."""
for var in self.get_variables():
shader_type = var['shader_type']
# skip uniforms
if shader_type == 'uniform' or shader_type == 'varying' or shader_type == 'framebuffer':
continue
# call load_***(name) to load that variable
getattr(self, 'load_%s' % shader_type)(var['name'])
def load_attribute(self, name, data=None):
"""Load data for an attribute variable."""
variable = self.get_variable(name)
if variable['sliced_attribute'].location < 0:
log_debug(("Variable '%s' could not be loaded, probably because "
"it is not used in the shaders") % name)
return
olddata = variable.get('data', None)
if isinstance(olddata, RefVar):
log_debug("Skipping loading data for attribute '%s' since it "
"references a target variable." % name)
return
if data is None:
data = olddata
if data is not None:
# normalization
# if name in self.options.get('normalizers', {}):
# viewbox = self.options['normalizers'][name]
# if viewbox:
# self.normalizers[name] = DataNormalizer(data)
# # normalize data with the specified viewbox, None by default
# # meaning that the natural bounds of the data are used.
# data = self.normalizers[name].normalize(viewbox)
variable['sliced_attribute'].load(data)
def load_index(self, name, data=None):
"""Load data for an index variable."""
variable = self.get_variable(name)
if data is None:
data = variable.get('data', None)
if data is not None:
self.indexsize = len(data)
Attribute.bind(variable['buffer'], index=True)
Attribute.load(data, index=True)
def load_texture(self, name, data=None):
"""Load data for a texture variable."""
variable = self.get_variable(name)
if variable['buffer'] < 0:
log_debug(("Variable '%s' could not be loaded, probably because "
"it is not used in the shaders") % name)
return
if data is None:
data = variable.get('data', None)
# NEW: update sampler location
self.update_samplers = True
if isinstance(data, RefVar):
log_debug("Skipping loading data for texture '%s' since it "
"references a target variable." % name)
return
if data is not None:
Texture.bind(variable['buffer'], variable['ndim'])
Texture.load(data)
def load_uniform(self, name, data=None):
"""Load data for an uniform variable."""
variable = self.get_variable(name)
location = variable['location']
if location < 0:
log_debug(("Variable '%s' could not be loaded, probably because "
"it is not used in the shaders") % name)
return
if data is None:
data = variable.get('data', None)
if data is not None:
ndim = variable['ndim']
size = variable.get('size', None)
# one value
if not size:
# scalar or vector
if type(ndim) == int or type(ndim) == long:
if ndim == 1:
Uniform.load_scalar(location, data)
else:
Uniform.load_vector(location, data)
# matrix
elif type(ndim) == tuple:
Uniform.load_matrix(location, data)
# array
else:
# scalar or vector
if type(ndim) == int or type(ndim) == long:
Uniform.load_array(location, data)
def load_compound(self, name, data=None):
pass
# Updating methods
# ----------------
def update_variable(self, name, data, **kwargs):
"""Update data of a variable."""
variable = self.get_variable(name)
if variable is None:
log_debug("Variable '%s' was not found, unable to update it." % name)
else:
shader_type = variable['shader_type']
# skip compound, which is handled in set_data
if shader_type == 'compound' or shader_type == 'varying' or shader_type == 'framebuffer':
pass
else:
getattr(self, 'update_%s' % shader_type)(name, data, **kwargs)
def update_attribute(self, name, data):#, bounds=None):
"""Update data for an attribute variable."""
variable = self.get_variable(name)
if variable['sliced_attribute'].location < 0:
log_debug(("Variable '%s' could not be updated, probably because "
"it is not used in the shaders") % name)
return
# handle reference variable
olddata = variable.get('data', None)
if isinstance(olddata, RefVar):
raise ValueError("Unable to load data for a reference " +
"attribute. Use the target variable directly.""")
variable['data'] = data
att = variable['sliced_attribute']
if olddata is None:
oldshape = 0
else:
oldshape = olddata.shape
# print name, oldshape, data.shape
# handle size changing
if data.shape[0] != oldshape[0]:
log_debug(("Creating new buffers for variable %s, old size=%s,"
"new size=%d") % (name, oldshape[0], data.shape[0]))
# update the size only when not using index arrays
if self.use_index:
newsize = self.slicer.size
else:
newsize = data.shape[0]
# update the slicer size and bounds
self.slicer.set_size(newsize, doslice=not(self.use_index))
# HACK: update the bounds only if there are no bounds basically
# (ie. 2 bounds only), otherwise we assume the bounds have been
# changed explicitely
if len(self.slicer.bounds) == 2:
self.slicer.set_bounds()
# delete old buffers
att.delete_buffers()
# create new buffers
att.create()
# load data
att.load(data)
# forget previous size
# self.previous_size = None
else:
# update data
att.update(data)
def update_index(self, name, data):
"""Update data for a index variable."""
variable = self.get_variable(name)
prevsize = len(variable['data'])
variable['data'] = data
newsize = len(data)
# handle size changing
if newsize != prevsize:
# update the total size (in slicer)
# self.slicer.set_size(newsize, doslice=False)
self.indexsize = newsize
# delete old buffers
Attribute.delete(variable['buffer'])
# create new buffer
variable['buffer'] = Attribute.create()
# load data
Attribute.bind(variable['buffer'], variable['ndim'], index=True)
Attribute.load(data, index=True)
else:
# update data
Attribute.bind(variable['buffer'], variable['ndim'], index=True)
Attribute.update(data, index=True)
def update_texture(self, name, data):
"""Update data for a texture variable."""
variable = self.get_variable(name)
if variable['buffer'] < 0:
log_debug(("Variable '%s' could not be loaded, probably because "
"it is not used in the shaders") % name)
return
prevshape = variable['data'].shape
variable['data'] = data
# handle size changing
if data.shape != prevshape:
# delete old buffers
# Texture.delete(variable['buffer'])
variable['ndim'], variable['ncomponents'], _ = Texture.get_info(data)
# create new buffer
# variable['buffer'] = Texture.create(variable['ndim'],
# mipmap=variable.get('mipmap', None),
# minfilter=variable.get('minfilter', None),
# magfilter=variable.get('magfilter', None),)
# load data
Texture.bind(variable['buffer'], variable['ndim'])
Texture.load(data)
else:
# update data
Texture.bind(variable['buffer'], variable['ndim'])
Texture.update(data)
def update_uniform(self, name, data):
"""Update data for an uniform variable."""
variable = self.get_variable(name)
variable['data'] = data
# the uniform interface is the same for load/update
self.load_uniform(name, data)
special_keywords = ['visible',
'size',
'bounds',
'primitive_type',
'constrain_ratio',
'constrain_navigation',
]
def set_data(self, **kwargs):
"""Load data for the specified visual. Uploading does not happen here
but in `update_all_variables` instead, since this needs to happen
after shader program binding in the paint method.
Arguments:
* **kwargs: the data to update as name:value pairs. name can be
any field of the visual, plus one of the following keywords:
* visible: whether this visual should be visible,
* size: the size of the visual,
* primitive_type: the GL primitive type,
* constrain_ratio: whether to constrain the ratio of the visual,
* constrain_navigation: whether to constrain the navigation,
"""
# handle compound variables
kwargs2 = kwargs.copy()
for name, data in kwargs2.iteritems():
variable = self.get_variable(name)
if variable is None:
# log_info("variable '%s' unknown" % name)
continue
if variable is not None and variable['shader_type'] == 'compound':
fun = variable['fun']
kwargs.pop(name)
# HACK: if the target variable in the compound is a special
# keyword, we update it in kwargs, otherwise we update the
# data in self.data_updating
# print name, fun(data)
# if name in self.special_keywords:
# kwargs.update(**fun(data))
# else:
# self.data_updating.update(**fun(data))
kwargs.update(**fun(data))
# remove non-visible variables
if not variable.get('visible', True):
kwargs.pop(name)
# handle visual visibility
visible = kwargs.pop('visible', None)
if visible is not None:
self.visual['visible'] = visible
# handle size keyword
size = kwargs.pop('size', None)
# print size
if size is not None:
self.slicer.set_size(size)
# handle bounds keyword
bounds = kwargs.pop('bounds', None)
if bounds is not None:
self.slicer.set_bounds(bounds)
# handle primitive type special keyword
primitive_type = kwargs.pop('primitive_type', None)
if primitive_type is not None:
self.visual['primitive_type'] = primitive_type
self.set_primitive_type(primitive_type)
# handle constrain_ratio keyword
constrain_ratio = kwargs.pop('constrain_ratio', None)
if constrain_ratio is not None:
self.visual['constrain_ratio'] = constrain_ratio
# handle constrain_navigation keyword
constrain_navigation = kwargs.pop('constrain_navigation', None)
if constrain_navigation is not None:
self.visual['constrain_navigation'] = constrain_navigation
# flag the other variables as to be updated
self.data_updating.update(**kwargs)
def copy_texture(self, tex1, tex2):
self.textures_to_copy.append((tex1, tex2))
def update_all_variables(self):
"""Upload all new data that needs to be updated."""
# # current size, that may change following variable updating
# if not self.previous_size:
# self.previous_size = self.slicer.size
# go through all data changes
for name, data in self.data_updating.iteritems():
if data is not None:
# log_info("Updating variable '%s'" % name)
self.update_variable(name, data)
else:
log_debug("Data for variable '%s' is None" % name)
# reset the data updating dictionary
self.data_updating.clear()
def copy_all_textures(self):
# copy textures
for tex1, tex2 in self.textures_to_copy:
# tex1 = self.get_variable(tex1)
tex1 = self.resolve_reference(tex1)
tex2 = self.get_variable(tex2)
# tex2 = self.resolve_reference(tex2)
# # Texture.read_buffer()
# Texture.bind(tex2['buffer'], tex2['ndim'])
# copy(fbo, tex_src, tex_dst, width, height)
Texture.copy(self.fbocopy, tex1['buffer'], tex2['buffer'],
tex1['shape'][0], tex1['shape'][1])
self.textures_to_copy = []
# Binding methods
# ---------------
def bind_attributes(self, slice=None):
"""Bind all attributes of the visual for the given slice.
This method is used during rendering."""
# find all visual variables with shader type 'attribute'
attributes = self.get_variables('attribute')
# for each attribute, bind the sub buffer corresponding to the given
# slice
for variable in attributes:
loc = variable['location']
if loc < 0:
log_debug(("Unable to bind attribute '%s', probably because "
"it is not used in the shaders.") % variable['name'])
continue
variable['sliced_attribute'].bind(slice)
Attribute.set_attribute(loc, variable['ndim'])
def bind_indices(self):
indices = self.get_variables('index')
for variable in indices:
Attribute.bind(variable['buffer'], index=True)
def bind_textures(self):
"""Bind all textures of the visual.
This method is used during rendering."""
textures = self.get_variables('texture')
for i, variable in enumerate(textures):
buffer = variable.get('buffer', None)
if buffer is not None:
# HACK: we update the sampler values here
if self.update_samplers and not isinstance(variable['data'], RefVar):
Uniform.load_scalar(variable['location'], i)
# NEW
gl.glActiveTexture(getattr(gl, 'GL_TEXTURE%d' % i))
Texture.bind(buffer, variable['ndim'])
else:
log_debug("Texture '%s' was not properly initialized." % \
variable['name'])
# deactivate all textures if there are not textures
if not textures:
Texture.bind(0, 1)
Texture.bind(0, 2)
# no need to update the samplers after the first execution of this
# method
self.update_samplers = False
# Paint methods
# -------------
def paint(self):
"""Paint the visual slice by slice."""
# do not display non-visible visuals
if not self.visual.get('visible', True):
return
# activate the shaders
try:
self.shader_manager.activate_shaders()
# if the shaders could not be successfully activated, stop the
# rendering immediately
except Exception as e:
log_info("Error while activating the shaders: " + str(e))
return
# update all variables
self.update_all_variables()
# bind all texturex for that slice
self.bind_textures()
# paint using indices
if self.use_index:
self.bind_attributes()
self.bind_indices()
Painter.draw_indexed_arrays(self.primitive_type, self.indexsize)
# or paint without
elif self.use_slice:
# draw all sliced buffers
for slice in xrange(len(self.slicer.slices)):
# get slice bounds
slice_bounds = self.slicer.subdata_bounds[slice]
# print slice, slice_bounds
# bind all attributes for that slice
self.bind_attributes(slice)
# call the appropriate OpenGL rendering command
# if len(self.slicer.bounds) <= 2:
# print "slice bounds", slice_bounds
if len(slice_bounds) <= 2:
Painter.draw_arrays(self.primitive_type, slice_bounds[0],
slice_bounds[1] - slice_bounds[0])
else:
Painter.draw_multi_arrays(self.primitive_type, slice_bounds)
self.copy_all_textures()
# deactivate the shaders
self.shader_manager.deactivate_shaders()
# Cleanup methods
# ---------------
def cleanup_attribute(self, name):
"""Cleanup a sliced attribute (all sub-buffers)."""
variable = self.get_variable(name)
variable['sliced_attribute'].delete_buffers()
def cleanup_texture(self, name):
"""Cleanup a texture."""
variable = self.get_variable(name)
Texture.delete(variable['buffer'])
def cleanup(self):
"""Clean up all variables."""
log_debug("Cleaning up all variables.")
for variable in self.get_variables():
shader_type = variable['shader_type']
if shader_type in ('attribute', 'texture'):
getattr(self, 'cleanup_%s' % shader_type)(variable['name'])
# clean up shaders
self.shader_manager.cleanup()
# Scene renderer
# --------------
class GLRenderer(object):
"""OpenGL renderer for a Scene.
This class takes a Scene object (dictionary) as an input, and
renders the scene. It provides methods to update the data in real-time.
"""
# Initialization
# --------------
def __init__(self, scene):
"""Initialize the renderer using the information on the scene.
Arguments:
* scene: a Scene dictionary with a `visuals` field containing
the list of visuals.
"""
self.scene = scene
self.viewport = (1., 1.)
self.visual_renderers = {}
def set_renderer_options(self):
"""Set the OpenGL options."""
options = self.scene.get('renderer_options', {})
# use vertex buffer object
gl.glEnableClientState(gl.GL_VERTEX_ARRAY)
# used for multisampling (antialiasing)
if options.get('antialiasing', None):
gl.glEnable(gl.GL_MULTISAMPLE)
# used for sprites
if options.get('sprites', True):
gl.glEnable(gl.GL_VERTEX_PROGRAM_POINT_SIZE)
gl.glEnable(gl.GL_POINT_SPRITE)
# enable transparency
if options.get('transparency', True):
gl.glEnable(gl.GL_BLEND)
blendfunc = options.get('transparency_blendfunc',
('SRC_ALPHA', 'ONE_MINUS_SRC_ALPHA')
# ('ONE_MINUS_DST_ALPHA', 'ONE')
)
blendfunc = [getattr(gl, 'GL_' + x) for x in blendfunc]
gl.glBlendFunc(*blendfunc)
# enable depth buffer, necessary for 3D rendering
if options.get('activate3D', None):
gl.glEnable(gl.GL_DEPTH_TEST)
gl.glDepthMask(gl.GL_TRUE)
gl.glDepthFunc(gl.GL_LEQUAL)
gl.glDepthRange(0.0, 1.0)
# TODO: always enable??
gl.glClearDepth(1.0)
# Paint the background with the specified color (black by default)
background = options.get('background', (0, 0, 0, 0))
gl.glClearColor(*background)
def get_renderer_option(self, name):
return self.scene.get('renderer_options', {}).get(name, None)
# Visual methods
# --------------
def get_visuals(self):
"""Return all visuals defined in the scene."""
return self.scene.get('visuals', [])
def get_visual(self, name):
"""Return a visual by its name."""
visuals = [v for v in self.get_visuals() if v.get('name', '') == name]
if not visuals:
raise ValueError("The visual %s has not been found" % name)
return visuals[0]
# Data methods
# ------------
def set_data(self, name, **kwargs):
"""Load data for the specified visual. Uploading does not happen here
but in `update_all_variables` instead, since this needs to happen
after shader program binding in the paint method.
Arguments:
* visual: the name of the visual as a string, or a visual dict.
* **kwargs: the data to update as name:value pairs. name can be
any field of the visual, plus one of the following keywords:
* size: the size of the visual,
* primitive_type: the GL primitive type,
* constrain_ratio: whether to constrain the ratio of the visual,
* constrain_navigation: whether to constrain the navigation,
"""
# call set_data on the given visual renderer
if name in self.visual_renderers:
self.visual_renderers[name].set_data(**kwargs)
def copy_texture(self, name, tex1, tex2):
self.visual_renderers[name].copy_texture(tex1, tex2)
# Rendering methods
# -----------------
def initialize(self):
"""Initialize the renderer."""
# print the renderer information
for key, value in GLVersion.get_renderer_info().iteritems():
if key is not None and value is not None:
log_debug(key + ": " + value)
# initialize the renderer options using the options set in the Scene
self.set_renderer_options()
# create the VisualRenderer objects
self.visual_renderers = OrderedDict()
for visual in self.get_visuals():
name = visual['name']
self.visual_renderers[name] = GLVisualRenderer(self, visual)
# detect FBO
self.fbos = []
for name, vr in self.visual_renderers.iteritems():
fbos = vr.get_variables('framebuffer')
if fbos:
self.fbos.extend([fbo['buffer'] for fbo in fbos])
def clear(self):
"""Clear the scene."""
# clear the buffer (and depth buffer is 3D is activated)
if self.scene.get('renderer_options', {}).get('activate3D', None):
gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
else:
gl.glClear(gl.GL_COLOR_BUFFER_BIT)
def paint(self):
"""Paint the scene."""
# non-FBO rendering
if not self.fbos:
self.clear()
for name, visual_renderer in self.visual_renderers.iteritems():
visual_renderer.paint()
# render each FBO separately, then non-VBO
else:
for fbo in self.fbos:
FrameBuffer.bind(fbo)
# fbo index
ifbo = self.fbos.index(fbo)
# clear
self.clear()
# paint all visual renderers
for name, visual_renderer in self.visual_renderers.iteritems():
if visual_renderer.framebuffer == ifbo:
# print ifbo, visual_renderer
visual_renderer.paint()
# finally, paint screen
FrameBuffer.unbind()
# render screen (non-FBO) visuals
self.clear()
for name, visual_renderer in self.visual_renderers.iteritems():
if visual_renderer.framebuffer == 'screen':
# print "screen", visual_renderer
visual_renderer.paint()
# print
def resize(self, width, height):
"""Resize the canvas and make appropriate changes to the scene."""
# paint within the whole window
gl.glViewport(0, 0, width, height)
# compute the constrained viewport
x = y = 1.0
if self.get_renderer_option('constrain_ratio'):
if height > 0:
aw = float(width) / height
ar = self.get_renderer_option('constrain_ratio')
if ar is True:
ar = 1.
if ar < aw:
x, y = aw / ar, 1.
else:
x, y = 1., ar / aw
self.viewport = x, y
width = float(width)
height = float(height)
# update the viewport and window size for all visuals
for visual in self.get_visuals():
self.set_data(visual['name'],
viewport=self.viewport,
window_size=(width, height))
# Cleanup methods
# ---------------
def cleanup(self):
"""Clean up all allocated OpenGL objects."""
for name, renderer in self.visual_renderers.iteritems():
renderer.cleanup()
| bsd-3-clause | -5,473,990,361,901,274,000 | 36.791277 | 102 | 0.550144 | false |
polyaxon/polyaxon | core/polyaxon/schemas/fields/__init__.py | 1 | 1125 | #!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from polyaxon.schemas.fields.dict_or_str import DictOrStr # noqa
from polyaxon.schemas.fields.float_or_str import FloatOrStr # noqa
from polyaxon.schemas.fields.indexed_dict import IndexedDict # noqa
from polyaxon.schemas.fields.int_or_str import IntOrStr # noqa
from polyaxon.schemas.fields.obj_list_obj import ObjectOrListObject # noqa
from polyaxon.schemas.fields.str_or_fct import StrOrFct # noqa
from polyaxon.schemas.fields.tensor import Tensor # noqa
from polyaxon.schemas.fields.uuids import UUID # noqa
| apache-2.0 | -1,068,725,016,627,593,100 | 45.875 | 75 | 0.779556 | false |
DolphinDream/sverchok | nodes/logic/input_switch_mod.py | 1 | 7641 | # This file is part of project Sverchok. It's copyrighted by the contributors
# recorded in the version control history of the file, available from
# its original location https://github.com/nortikin/sverchok/commit/master
#
# SPDX-License-Identifier: GPL3
# License-Filename: LICENSE
import bpy
from bpy.props import IntProperty, BoolProperty
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import updateNode
GREEK_LABELS = [
"Alpha", "Beta", "Gamma", "Delta", "Epsilon", "Zeta", "Eta", "Theta",
"Iota", "Kappa", "Lambda", "Mu", "Nu", "Xi", "Omicron", "Pi", "Rho",
"Sigma", "Tau", "Upsilon", "Phi", "Chi", "Psi", "Omega"]
GENERIC_SOCKET = "SvStringsSocket"
SEPARATOR_SOCKET = "SvSeparatorSocket"
MAX_SET_SIZE = 9
MAX_NUM_SWITCHES = 9
def get_indices_that_should_be_visible(node):
""" instead this could be composed of multiple calls to 'get_indices_from_groupnum' function """
socket_index = 1
vis_dict = {}
vis_dict[0] = True # selector always visible, first socket
for group_index in range(node.max_groups):
vis_dict[socket_index] = group_index < node.num_visible_groups
socket_index += 1
for set_item in range(node.max_items_per_group):
vis_dict[socket_index] = (group_index < node.num_visible_groups and set_item < node.num_items_per_group)
socket_index += 1
# g = "".join(["01"[k] for k in node.values()])
# print(g)
return vis_dict
def get_indices_for_groupnum(node, group_lookup):
idx = 2 + ((node.max_items_per_group * group_lookup) + group_lookup)
return list(range(idx, idx + node.num_items_per_group))
class SvInputSwitchNodeMOD(bpy.types.Node, SverchCustomTreeNode):
"""
Triggers: Sets, Switch, Select
Tooltip: Switch among multiple input sets
auto expanding feature:
determined by "any_sockets_of_last_input_set_connected" till last visble is max groups
if the node looks like:
alpha 1
alpha 2
beta 1 <- if any of these gets a linked socket, the next socket set is automatically generated
beta 2 <-
gamma 1
gamma 2
debug tools:
# (import inspect)
# stick this line of code at the top of a function, and it will print the name of the function when called
# print('doing', inspect.stack()[0][3])
"""
bl_idname = 'SvInputSwitchNodeMOD'
bl_label = 'Input Switch MOD'
sv_icon = 'SV_INPUT_SWITCH'
@property
def interface_fully_initialized(self):
return len(self.outputs) == MAX_SET_SIZE
@property
def not_already_maxed_out(self):
return self.num_switches < MAX_NUM_SWITCHES
@property
def any_sockets_of_last_input_set_connected(self):
indices = get_indices_for_groupnum(self.node_state, self.num_switches-1)
return any([self.inputs[idx].is_linked for idx in indices])
@property
def node_state(self):
state = lambda: None
state.max_groups = MAX_NUM_SWITCHES
state.num_visible_groups = self.num_switches
state.max_items_per_group = MAX_SET_SIZE
state.num_items_per_group = self.num_sockets_per_set
return state
def configure_sockets_for_switchnum(self, context):
""" called when user adjust num sockets per set slider """
self.set_hidestate_output_sockets_to_cope_with_switchnum()
self.set_hidestate_input_sockets_to_cope_with_switchnum()
num_sockets_per_set: IntProperty(
name="Num Sockets per set", description="Num sockets per set",
default=2, min=1, max=MAX_SET_SIZE, update=configure_sockets_for_switchnum)
num_switches: IntProperty(
name="Num Switches", description="Number of switch items (no update associated)",
default=2, min=2, max=MAX_NUM_SWITCHES)
num_available_switches: IntProperty(
default=2, min=2, description='keep track of current state (no update associated)')
selected: IntProperty(
name="Selected", description="Selected Set",
default=0, min=0, update=updateNode)
def initialize_input_sockets(self):
inew = self.inputs.new
inew(GENERIC_SOCKET, "Selected").prop_name = "selected"
for group in range(MAX_NUM_SWITCHES):
socket = inew(SEPARATOR_SOCKET, f"Separator {group}")
for set_item in range(MAX_SET_SIZE):
inew(GENERIC_SOCKET, f"{GREEK_LABELS[group]} {set_item}")
def initialize_output_sockets(self):
for i in range(MAX_SET_SIZE): self.outputs.new(GENERIC_SOCKET, f"Data {i}")
def replace_socket_if_needed(self, input_socket):
if input_socket.bl_idname != input_socket.other.bl_idname:
input_socket.replace_socket(input_socket.other.bl_idname)
def adjust_input_socket_bl_idname_to_match_linked_input(self):
for input_socket in self.inputs:
if input_socket.is_linked:
self.replace_socket_if_needed(input_socket)
def adjust_output_sockets_bl_idname_to_match_selected_set(self, remap_indices):
for out_idx, in_idx in enumerate(remap_indices):
input_bl_idname = self.inputs[in_idx].bl_idname
if input_bl_idname != self.outputs[out_idx].bl_idname:
self.outputs[out_idx].replace_socket(input_bl_idname)
def set_hidestate_input_sockets_to_cope_with_switchnum(self):
tndict = get_indices_that_should_be_visible(self.node_state)
for key, value in tndict.items():
socket = self.inputs[key]
desired_hide_state = not(value)
if not socket.hide == desired_hide_state:
socket.hide_safe = desired_hide_state
def set_hidestate_output_sockets_to_cope_with_switchnum(self):
for i in range(MAX_SET_SIZE):
socket = self.outputs[i]
desired_state = (i > (self.num_sockets_per_set - 1))
if socket.hide != desired_state:
socket.hide_safe = desired_state
def interface_unhide_inputs_to_handle_new_set_if_needed(self):
if self.not_already_maxed_out and self.any_sockets_of_last_input_set_connected:
self.num_switches += 1
self.set_hidestate_input_sockets_to_cope_with_switchnum()
def draw_buttons(self, context, layout):
layout.prop(self, "num_sockets_per_set")
def sv_update(self):
if not self.interface_fully_initialized:
return
self.interface_unhide_inputs_to_handle_new_set_if_needed()
def sv_init(self, context):
self.initialize_input_sockets()
self.set_hidestate_input_sockets_to_cope_with_switchnum()
self.initialize_output_sockets()
self.set_hidestate_output_sockets_to_cope_with_switchnum()
def process(self):
active_index = self.inputs["Selected"].sv_get()[0][0]
remap_indices = get_indices_for_groupnum(self.node_state, active_index)
self.adjust_input_socket_bl_idname_to_match_linked_input()
self.adjust_output_sockets_bl_idname_to_match_selected_set(remap_indices)
for output_idx, input_idx in enumerate(remap_indices):
input_socket = self.inputs[input_idx]
if input_socket.is_linked:
A = input_socket.sv_get()
else:
A = [None]
self.outputs[output_idx].sv_set(A)
def get_local_function(self, named_function):
if named_function in globals():
return globals()[named_function]
classes = [SvInputSwitchNodeMOD]
register, unregister = bpy.utils.register_classes_factory(classes)
| gpl-3.0 | -1,290,757,482,226,110,700 | 37.984694 | 117 | 0.647821 | false |
udbhav/kishore | kishore/urls/music.py | 1 | 1392 | from django.conf.urls import *
from kishore.views import (ArtistDetail, ArtistList, SongDetail, SongList, ReleaseDetail,
ReleaseList, ArtistSongList, ArtistsByTag, SongsByTag, ReleasesByTag,
DownloadSong)
urlpatterns = patterns(
'',
url(r'^artists/$', ArtistList.as_view(), name='kishore_artists_index'),
url(r'^artists/tagged/(?P<tag>[-\w]+)/$', ArtistsByTag.as_view(), name='kishore_artists_by_tag'),
url(r'^artists/(?P<slug>[-\w]+)/$', ArtistDetail.as_view(), name='kishore_artist_detail'),
url(r'^artists/(?P<slug>[-\w]+)/songs/$', ArtistSongList.as_view(), name='kishore_artist_songs'),
url(r'^songs/$', SongList.as_view(), name='kishore_songs_index'),
url(r'^songs/tagged/(?P<tag>[-\w]+)/$', SongsByTag.as_view(), name='kishore_songs_by_tag'),
url(r'^songs/play/$', 'kishore.views.play_song', {}, 'kishore_song_play'),
url(r'^songs/download/(?P<pk>\d+)/$', DownloadSong.as_view(), name='kishore_song_download'),
url(r'^songs/(?P<slug>[-\w]+)/$', SongDetail.as_view(), name='kishore_song_detail'),
url(r'^releases/$', ReleaseList.as_view(), name='kishore_releases_index'),
url(r'^releases/tagged/(?P<tag>[-\w]+)/$', ReleasesByTag.as_view(),
name='kishore_releases_by_tag'),
url(r'^releases/(?P<slug>[-\w]+)/$', ReleaseDetail.as_view(), name='kishore_release_detail'),
)
| mit | -3,174,408,630,466,457,000 | 59.521739 | 101 | 0.623563 | false |
naibaf7/PyGreentea | examples/3D_337znni/mknet.py | 1 | 1566 | from __future__ import print_function
import sys, os, math
import numpy as np
from numpy import float32, int32, uint8, dtype
# Load PyGreentea
# Relative path to where PyGreentea resides
pygt_path = '../..'
sys.path.append(pygt_path)
import pygreentea.pygreentea as pygt
import caffe
from caffe import layers as L
from caffe import params as P
from caffe import to_proto
from pygreentea.pygreentea import metalayers as ML
net = caffe.NetSpec()
net.data = L.MemoryData(dim=[1, 1], ntop=1)
net.label = L.MemoryData(dim=[1, 1], ntop=1, include=[dict(phase=0)])
net.sknet = ML.SKNet(net.data,
ip_depth=0,
dropout=0,
fmap_inc_rule = lambda fmaps: 80,
fmap_dec_rule = lambda fmaps: 80,
fmap_bridge_rule = lambda fmaps: 3,
fmap_start=80,
conv=[[3,3,3],[3,3,3],[3,3,3],[3,3,3],[3,3,3],[3,3,3]],
pool=[[2,2,2],[2,2,2],[2,2,2],[1,1,1]],
padding=[85,85,85])
net.prob = L.Softmax(net.sknet, ntop=1, in_place=False, include=[dict(phase=1)])
net.loss = L.SoftmaxWithLoss(net.sknet, net.label, ntop=0, loss_weight=1.0, include=[dict(phase=0)])
pygt.fix_input_dims(net,
[net.data, net.label],
max_shapes = [[130,130,130],[130,130,130]],
shape_coupled = [-1, 0, 0])
protonet = net.to_proto()
protonet.name = 'net';
# Store the network as prototxt
with open(protonet.name + '.prototxt', 'w') as f:
print(protonet, file=f)
| bsd-2-clause | 370,031,498,582,636,740 | 32.319149 | 100 | 0.58046 | false |
merrikat/volunteerAnalysisPage | combine-csv.py | 1 | 3302 | """
Combine .csv files into a single .csv file with all fields
2013-Nov-25 by Robert Woodhead, [email protected]
2016-Apr by merrikat
Usage:
python combine-csv.py {csv folder} {output file} [{optional count field name}]
Where:
{csv folder} is a folder containing .csv files.
{output file} is the destination file.
{optional count field name} if present, is added to the list of fields; only unique lines
are output, with a count of how many occurrences.
IMPORTANT: If NOT PRESENT, then an additional field with the source file name of the line is appended.
If you are outputting field counts, the source file name line is not emitted because the same line
could be present in multiple files.
Output:
Reads all the .csv files in the csv folder. Compiles a list of all the
header fields that are present. Rereads the files, and outputs a single
.csv containing all the records, shuffling fields to match the global
header fields. Combines contents of duplicate fields using a | delimiter.
Adds either a file name field or a count field (with definable name) to each line.
"""
import sys
import os
import glob
import csv
import copy
#
# Globals
#
error_count = 0 # number of errors encountered during processing
#
# Error reporting
#
def add_error(desc):
global error_count
error_count += 1
sys.stderr.write(desc + '\n')
def optional(caption,value):
if value == '':
return ''
else:
return caption + value + '\n'
def cleanup(str):
while str.rfind(' ') != -1:
str = str.replace(' ',' ')
return str
#
# process command line arguments and execute
#
if __name__ == '__main__':
if not (3 <= len(sys.argv) <= 4):
print 'usage: python combine-csv.py {thread folder} {output file} [{optional count field name}]'
sys.exit(1)
hdrList = []
hdrLen = []
doCount = (len(sys.argv) == 4)
counts = {}
# get the headers
for filename in glob.iglob(os.path.join(sys.argv[1],'*.csv')):
with open(filename,'rb') as f:
csvIn = csv.reader(f)
hdr = csvIn.next()
hdr[0] = hdr[0].replace('\xef\xbb\xbf','')
hdrList.append((len(hdr),hdr))
# construct the list of unique headers
hdrList.sort(reverse=True)
hdrs = []
template = []
for t in hdrList:
for f in t[1]:
if not (f in hdrs):
hdrs.append(f)
template.append('')
if doCount:
hdrs.append(sys.argv[3])
else:
hdrs.append('Source File')
# output the combined file
with open(sys.argv[2],'wb') as of:
csvOut = csv.writer(of)
csvOut.writerow(hdrs)
for filename in glob.iglob(os.path.join(sys.argv[1],'*.csv')):
with open(filename,'rb') as f:
csvIn = csv.reader(f)
hdr = csvIn.next()
hdr[0] = hdr[0].replace('\xef\xbb\xbf','')
for row in csvIn:
newRow = list(template)
for i,v in enumerate(row):
j = hdrs.index(hdr[i])
if newRow[j] == '':
newRow[j] = v
else:
newRow[j] = newRow[j] + '|' + v
if doCount:
newRow = tuple(newRow)
if newRow in counts:
counts[newRow] += 1
else:
counts[newRow] = 1
else:
newRow.append(os.path.splitext(os.path.basename(filename))[0].title())
csvOut.writerow(newRow)
# if doing counts, output newRow
print counts
for k,v in counts.iteritems():
k = list(k)
k.append(v)
csvOut.writerow(k)
| mit | -5,366,394,753,895,992,000 | 21.161074 | 104 | 0.654755 | false |
r24mille/env_canada_weather_history | import_history.py | 1 | 20203 | import argparse
import csv
from datetime import datetime, timezone
from urllib import request
from xml.etree import ElementTree
import mysql.connector
import pytz
from config import mysql_config
from models import Observation, Station
def canadian_timezones():
"""
Valid set of Canadian timezone strings according to the IANA standard.
See https://en.wikipedia.org/wiki/Time_in_Canada#IANA_time_zone_database
Return
List of valid, Canadian timezone strings.
"""
timezones = ['America/St_Johns', 'America/Halifax', 'America/Glace_Bay',
'America/Moncton', 'America/Goose_Bay',
'America/Blanc-Sablon', 'America/Montreal',
'America/Toronto', 'America/Nipigon', 'America/Thunder_Bay',
'America/Iqaluit', 'America/Pangnirtung', 'America/Resolute',
'America/Atikokan', 'America/Rankin_Inlet',
'America/Winnipeg', 'America/Rainy_River', 'America/Regina',
'America/Swift_Current', 'America/Edmonton',
'America/Cambridge_Bay', 'America/Yellowknife',
'America/Inuvik', 'America/Creston', 'America/Dawson_Creek',
'America/Vancouver', 'America/Whitehorse', 'America/Dawson']
return timezones
def csv_write_station(station, filename='station.csv'):
"""Write model.Station information to CSV file."""
with open(filename, 'w', newline='') as csvfile:
csvw = csv.writer(csvfile)
csvw.writerow(['station_id', 'name', 'province', 'longitude',
'latitude', 'elevation', 'climate_identifier',
'local_tz_str'])
csvw.writerow([station.station_id, station.name, station.province,
station.longitude, station.latitude, station.elevation,
station.climate_identifier, station.local_tz_str])
def csv_write_observations(observations, filename='observations.csv'):
"""Write model.Observation list to CSV file."""
with open(filename, 'w', newline='') as csvfile:
csvw = csv.writer(csvfile)
csvw.writerow(['station_id', 'temp_c', 'dewpoint_temp_c',
'rel_humidity_pct', 'wind_dir_deg', 'wind_speed_kph',
'visibility_km', 'station_pressure_kpa', 'humidex',
'wind_chill', 'weather_desc', 'obs_datetime_std',
'obs_datetime_dst', 'obs_quality'])
for obs in observations:
csvw.writerow([obs.station_id, obs.temp_c, obs.dewpoint_temp_c,
obs.rel_humidity_pct, obs.wind_dir_deg,
obs.wind_speed_kph, obs.visibility_km,
obs.station_pressure_kpa, obs.humidex,
obs.wind_chill, obs.weather_desc,
obs.obs_datetime_std, obs.obs_datetime_dst,
obs.obs_quality])
def fetch_content(station_id, year_num, month_num, day_num_start,
timeframe=1, frmt='xml'):
"""
Fetch weather history data from Environment Canada.
TODO(r24mille): Allow a user to switch between XML/CSV data for parsing.
TODO(r24mille): Allow user to change the timeframe parameter for different
data resolution (ie. daily, hourly, etc.)
Keyword arguments:
station_id -- Integer corresponding to an Environment Canada station ID
(ie. location of weather reading).
year_num -- Integer indicating the year of the requested data.
month_num -- Integer indicating the month of the requested data.
day_num_start -- Integer indicating the starting day of the forecast,
though multiple days of forecasted data may be returned.
timeframe -- Controls the time span of data that is returned
(default 1=month of hourly observations).
frmt -- Controls the format that Environment Canada data should be
returned in (default 'xml').
Return:
The request.urlopen response.
"""
data_url = ('http://climate.weather.gc.ca/climateData/bulkdata_e.html' +
'?format=' + frmt +
'&stationID=' + str(station_id) +
'&Year=' + str(year_num) +
'&Month=' + str(month_num) +
'&Day=' + str(day_num_start) +
'&timeframe=' + str(timeframe))
print('URL: ' + data_url)
url_response = request.urlopen(data_url)
return url_response
def sql_insert_observations(observations, config, batch_size=100):
"""
Inserts Observations (ie. stationdata) into a database.
TODO (r24mille): Decouple this method from the main file
TODO (r24mille): Support some other driver other than mysql.connector
Keyword arguments:
observation -- A list of models.Observation objects to be inserted into a
MySQL database
config -- A dict of MySQL configuration credentials (see config-example.py)
batch_size -- INSERT min(batch_size, len(observations)) rows at a time for
fairly fast INSERT times (default batch_size=100).
"""
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor()
# Batch size of query
insert_complete = False
batch_start_idx = 0
batch_size = min(batch_size, len(observations))
# Continue batched INSERTs until Observations list has been processed
while insert_complete == False:
batch_idx_upperbound = (batch_start_idx + batch_size)
ins_data = ()
ins_obs = ("INSERT INTO envcan_observation (stationID, " +
"obs_datetime_std, obs_datetime_dst, temp_c, " +
"dewpoint_temp_c, rel_humidity_pct, wind_dir_deg, " +
"wind_speed_kph, visibility_km, station_pressure_kpa, " +
"humidex, wind_chill, weather_desc, quality) VALUES ")
for i in range(batch_start_idx, batch_idx_upperbound):
obs = observations[i]
ins_obs += "(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
ins_data += (obs.station_id,
obs.obs_datetime_std.strftime('%Y-%m-%d %H:%M:%S'),
obs.obs_datetime_dst.strftime('%Y-%m-%d %H:%M:%S'),
obs.temp_c, obs.dewpoint_temp_c,
obs.rel_humidity_pct, obs.wind_dir_deg,
obs.wind_speed_kph, obs.visibility_km,
obs.station_pressure_kpa, obs.humidex, obs.wind_chill,
obs.weather_desc, obs.obs_quality)
# If i isn't the last item in batch, add a comma to the VALUES items
if i != (batch_idx_upperbound - 1):
ins_obs += ", "
cursor.execute(ins_obs, ins_data)
# If the upper bound is the last observation, mark INSERTs complete
if len(observations) - batch_idx_upperbound == 0:
insert_complete = True
else: # Slide batch window
batch_size = min(batch_size,
len(observations) - batch_idx_upperbound)
batch_start_idx = batch_idx_upperbound
# Make sure data is committed to the database
cnx.commit()
cursor.close()
cnx.close()
def sql_insert_station(station, config):
"""
Checks if a station matching the stationID exists. If no match exists,
then one is inserted.
TODO (r24mille): Decouple this method from the main file
TODO (r24mille): Support some other driver other than mysql.connector
Keyword arguments:
station -- A models.Station object to be inserted into a MySQL database
config -- A dict of MySQL configuration credentials (see config-example.py)
"""
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor()
# Query envcan_station for matching stationID
query_station = ("SELECT * FROM envcan_station " +
"WHERE stationID = %(station_id)s")
cursor.execute(query_station, {'station_id':station.station_id})
station_row = cursor.fetchone()
# If no station exists matching that stationID, insert one
if station_row == None:
insert_station = ("INSERT INTO envcan_station (stationID, name, " +
"province, latitude, longitude, elevation, " +
"climate_identifier, local_timezone) " +
"VALUES (%s, %s, %s, %s, %s, %s, %s, %s)")
insert_data = (station.station_id, station.name, station.province,
station.latitude, station.longitude, station.elevation,
station.climate_identifier, station.local_tz_str)
cursor.execute(insert_station, insert_data)
# Make sure data is committed to the database
cnx.commit()
cursor.close()
cnx.close()
def range_hourly(station_id, year_start, year_end, month_start, month_end,
day_start, local_tz_name):
"""
Calls Environment Canada endpoint and parses the returned XML into
StationData objects.
Keyword arguments:
station_id -- Integer corresponding to an Environment Canada station ID
(ie. location of weather reading).
year_start -- Integer indicating the year of the first weather history
request.
year_end -- Integer indicating the year of the last weather history
request (inclusive). In combination with month_start and
month_end, all weather history between start and end times
will be requested.
month_start -- Integer indicating the month of the first weather history
request.
month_end -- Integer indicating the month of the last weather history
request (inclusive). In combination with year_start and
year_end, all weather history between start and end times
will be requested.
day_start -- Integer indicating the starting day of the forecast,
though multiple days of forecasted data will be returned.
local_tz_name -- String representation of local timezone name
(eg. 'America/Toronto').
Return:
Two two-item vector [station, observations] where station is a
model.Station object and observations is a list of hourly
model.Observation objects.
"""
# Instantiate objects that are returned by this method
station = None
observations = list()
y = year_start
m = month_start
d = day_start
req_date = datetime(y, m, d)
end_date = datetime(year_end, month_end, day_start)
while req_date <= end_date:
xml_response = fetch_content(station_id=station_id, year_num=y,
month_num=m, day_num_start=d, timeframe=1,
frmt='xml')
xml_string = xml_response.read().decode('utf-8')
weather_root = ElementTree.fromstring(xml_string)
# Only populate Station once
if station == None:
station = Station()
station.station_id = station_id
station.local_tz_str = local_tz_name
station_local_tz = pytz.timezone(local_tz_name)
epoch = datetime.utcfromtimestamp(0)
offset_delta = station_local_tz.utcoffset(epoch)
station_std_tz = timezone(offset_delta)
for si_elmnt in weather_root.iter('stationinformation'):
name_txt = si_elmnt.find('name').text
if name_txt and name_txt != ' ':
station.name = name_txt
province_txt = si_elmnt.find('province').text
if province_txt and province_txt != ' ':
station.province = province_txt
latitude_txt = si_elmnt.find('latitude').text
if latitude_txt and latitude_txt != ' ':
station.latitude = float(latitude_txt)
longitude_txt = si_elmnt.find('longitude').text
if longitude_txt and longitude_txt != ' ':
station.longitude = float(longitude_txt)
elevation_txt = si_elmnt.find('elevation').text
if elevation_txt and elevation_txt != ' ':
station.elevation = float(elevation_txt)
climate_id_txt = si_elmnt.find('climate_identifier').text
if climate_id_txt and climate_id_txt != ' ':
station.climate_identifier = int(climate_id_txt)
# Iterate stationdata XML elements and append Observations to list
for sd_elmnt in weather_root.iter('stationdata'):
observation = Observation()
# Get portions of date_time for observation
year_txt = sd_elmnt.attrib['year']
month_txt = sd_elmnt.attrib['month']
day_txt = sd_elmnt.attrib['day']
hour_txt = sd_elmnt.attrib['hour']
minute_txt = sd_elmnt.attrib['minute']
if year_txt and month_txt and day_txt and hour_txt and minute_txt:
observation.obs_datetime_std = datetime(year=int(year_txt),
month=int(month_txt),
day=int(day_txt),
hour=int(hour_txt),
minute=int(minute_txt),
second=0,
microsecond=0,
tzinfo=station_std_tz)
observation.obs_datetime_dst = observation.obs_datetime_std.astimezone(station_local_tz)
if 'quality' in sd_elmnt.attrib:
quality_txt = sd_elmnt.attrib['quality']
else:
quality_txt = None
if quality_txt and quality_txt != ' ':
observation.obs_quality = quality_txt
# Set StationData fields based on child elements' values
observation.station_id = station_id
temp_txt = sd_elmnt.find('temp').text
if temp_txt and temp_txt != ' ':
observation.temp_c = float(temp_txt)
dptemp_txt = sd_elmnt.find('dptemp').text
if dptemp_txt and dptemp_txt != ' ':
observation.dewpoint_temp_c = float(dptemp_txt)
relhum_txt = sd_elmnt.find('relhum').text
if relhum_txt and relhum_txt != ' ':
observation.rel_humidity_pct = int(relhum_txt)
winddir_txt = sd_elmnt.find('winddir').text
if winddir_txt and winddir_txt != ' ':
observation.wind_dir_deg = int(winddir_txt) * 10
windspd_txt = sd_elmnt.find('windspd').text
if windspd_txt and windspd_txt != ' ':
observation.wind_speed_kph = int(windspd_txt)
visibility_txt = sd_elmnt.find('visibility').text
if visibility_txt and visibility_txt != ' ':
observation.visibility_km = float(visibility_txt)
stnpress_txt = sd_elmnt.find('stnpress').text
if stnpress_txt and stnpress_txt != ' ':
observation.station_pressure_kpa = float(stnpress_txt)
humidex_txt = sd_elmnt.find('humidex').text
if humidex_txt and humidex_txt != ' ':
observation.humidex = float(humidex_txt)
windchill_txt = sd_elmnt.find('windchill').text
if windchill_txt and windchill_txt != ' ':
observation.wind_chill = int(windchill_txt)
observation.weather_desc = sd_elmnt.find('weather').text
# Add StationData element to list
observations.append(observation)
# Increment year and month to populate date range
if m < 12:
m += 1
else:
y += 1
m = 1
req_date = datetime(y, m, d)
# Return XML elements parsed into a list of StationData objects
return [station, observations]
def main():
"""
Main method intended to by called via command-line
TODO(r24mille): Parse SQL config from file rather than Python method
TODO(r24mille): Add means to customize CSV filenames generated
"""
description = 'Environment Canda historical weather parser/import tool.'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('--station_id', required=True, type=int,
help='The stationID from climate.weather.gc.ca URL')
parser.add_argument('--year_start', required=True, type=int,
help='Year for start of historical weather range')
parser.add_argument('--year_end', required=True, type=int,
help='Year for end of historical weather range (inclusive)')
parser.add_argument('--month_start', required=True, type=int,
help='Month for start of historical weather range')
parser.add_argument('--month_end', required=True, type=int,
help='Month for end of historical weather range (inclusive)')
parser.add_argument('--tz_name', required=True, type=str,
choices=canadian_timezones(),
help='IANA timezone string of the weather station')
parser.add_argument('--day_start', default=1, type=int,
help='Starting day for the historical weather range')
parser.add_argument('--dest', default='csv', type=str,
choices=['csv', 'sql'],
help='Destination of the parsed weather information')
parser.add_argument('--batch_size', default=100, type=int,
help='If destination is SQL, control the INSERT batch size')
args = parser.parse_args()
print(args)
# Fetch range of hourly weather observations
[station, observations] = range_hourly(station_id=args.station_id,
year_start=args.year_start,
year_end=args.year_end,
month_start=args.month_start,
month_end=args.month_end,
day_start=args.day_start,
local_tz_name=args.tz_name)
# Write parsed information to appropriate destination
if args.dest == 'sql':
sql_insert_station(station=station, config=mysql_config())
sql_insert_observations(observations=observations,
config=mysql_config(),
batch_size=args.batch_size)
elif args.dest == 'csv':
csv_write_station(station=station,
filename=(str(args.station_id) +
'station.csv'))
csv_write_observations(observations=observations,
filename=(str(args.station_id) +
'observations.csv'))
if __name__ == "__main__":
main() | apache-2.0 | 5,419,807,624,132,069,000 | 45.879147 | 104 | 0.544375 | false |
tcew/OCCA | scripts/setupKernelOperators.py | 1 | 15182 | from os import environ as ENV
maxN = 50
nSpacing = 3
def vnlc(n, N):
ret = ''
if n < (N - 1):
ret = ', '
if n != (N - 1) and ((n + 1) % nSpacing) == 0:
ret += '\n '
return ret;
def nlc(n, N):
ret = ''
if n < (N - 1):
ret = ', '
if n != (N - 1) and ((n + 1) % nSpacing) == 0:
ret += '\n '
return ret;
def runFunctionFromArguments(N):
return 'switch(argc){\n' + '\n'.join([runFunctionFromArgument(n + 1) for n in xrange(N)]) + '}'
def runFunctionFromArgument(N):
return ' case ' + str(N) + """:
f(occaKernelInfoArgs, occaInnerId0, occaInnerId1, occaInnerId2, """ + ', '.join(['args[{0}]'.format(n) for n in xrange(N)]) + """); break;"""
def runKernelFromArguments(N):
return 'switch(argc){\n' + '\n'.join([runKernelFromArgument(n + 1) for n in xrange(N)]) + '}'
def runKernelFromArgument(N):
return ' case ' + str(N) + """:
if(kHandle->nestedKernelCount == 0){
(*kHandle)(""" + ', '.join(['args[{0}]'.format(n) for n in xrange(N)]) + """);
}""" + (("""
else{
(*kHandle)(kHandle->nestedKernels, """ + ', '.join(['args[{0}]'.format(n) for n in xrange(N)]) + """);
}""") if (N < maxN) else '') + """
break;"""
def virtualOperatorDeclarations(N):
return '\n\n'.join([virtualOperatorDeclaration(n + 1) for n in xrange(N)])
def virtualOperatorDeclaration(N):
return ' virtual void operator () ({0}) = 0;'.format( ' '.join(['const kernelArg &arg' + str(n) + vnlc(n, N) for n in xrange(N)]) )
def operatorDeclarations(mode, N):
return '\n\n'.join([operatorDeclaration(mode, n + 1) for n in xrange(N)])
def operatorDeclaration(mode, N):
if mode == 'Base':
ret = ' void operator () ({0});'.format( ' '.join(['const kernelArg &arg' + str(n) + nlc(n, N) for n in xrange(N)]) )
else:
ret = ' template <>\n'\
+ ' void kernel_t<{0}>::operator () ({1});'.format(mode, ' '.join(['const kernelArg &arg' + str(n) + nlc(n, N) for n in xrange(N)]) )
return ret
def operatorDefinitions(mode, N):
return '\n\n'.join([operatorDefinition(mode, n + 1) for n in xrange(N)])
def operatorDefinition(mode, N):
if mode == 'Base':
return """ void kernel::operator() (""" + ' '.join(['const kernelArg &arg' + str(n) + nlc(n, N) for n in xrange(N)]) + """){
""" + '\n '.join(['arg' + str(n) + '.setupForKernelCall(kHandle->metaInfo.argIsConst(' + str(n) + '));' for n in xrange(N)]) + """
if(kHandle->nestedKernelCount == 0){
(*kHandle)(""" + ' '.join(['arg' + str(n) + nlc(n, N) for n in xrange(N)]) + """);
}
else{""" + (("""
(*kHandle)(kHandle->nestedKernels, """ + ' '.join(['arg' + str(n) + nlc(n, N) for n in xrange(N)]) + """);""") \
if (N < maxN) else '') + """
}
}
void kernelDatabase::operator() (""" + ' '.join(['const kernelArg &arg' + str(n) + nlc(n, N) for n in xrange(N)]) + """){/*
occa::device_v *launchDevice = NULL;
if(arg0.dHandle) launchDevice = const_cast<occa::device_v*>(arg0.dHandle);
""" + ' '.join([('else if(arg' + str(n + 1) + '.dHandle) launchDevice = const_cast<occa::device_v*>(arg' + str(n + 1) + '.dHandle);\n') for n in xrange(N - 1)]) + """
(*this)[launchDevice](""" + ' '.join(['arg' + str(n) + nlc(n, N) for n in xrange(N)]) + """);*/
}"""
else:
header = operatorDefinitionHeader(mode, N)
return header + operatorModeDefinition[mode](N) + "\n }"
def operatorDefinitionHeader(mode, N):
return """ template <>
void kernel_t<{0}>::operator () ({1}){{""".format(mode, ' '.join(['const kernelArg &arg' + str(n) + nlc(n, N) for n in xrange(N)]))
def pthreadOperatorDefinition(N):
return """
PthreadsKernelData_t &data_ = *((PthreadsKernelData_t*) data);
kernelArg args[""" + str(N) + """] = {""" + ' '.join(['arg' + str(n) + nlc(n, N) for n in xrange(N)]) + """};
pthreads::runFromArguments(data_, dims, inner, outer, """ + str(N) + """, args);"""
def serialOperatorDefinition(N):
return """
SerialKernelData_t &data_ = *((SerialKernelData_t*) data);
handleFunction_t tmpKernel = (handleFunction_t) data_.handle;
int occaKernelArgs[6];
occaKernelArgs[0] = outer.z;
occaKernelArgs[1] = outer.y;
occaKernelArgs[2] = outer.x;
occaKernelArgs[3] = inner.z;
occaKernelArgs[4] = inner.y;
occaKernelArgs[5] = inner.x;
int argc = 0;
const kernelArg *args[""" + str(N) + """] = {""" + ' '.join(['&arg' + str(n) + nlc(n, N) for n in xrange(N)]) + """};
for(int i = 0; i < """ + str(N) + """; ++i){
for(int j = 0; j < args[i]->argc; ++j){
data_.vArgs[argc++] = args[i]->args[j].ptr();
}
}
int occaInnerId0 = 0, occaInnerId1 = 0, occaInnerId2 = 0;
cpu::runFunction(tmpKernel,
occaKernelArgs,
occaInnerId0, occaInnerId1, occaInnerId2,
argc, data_.vArgs);"""
def ompOperatorDefinition(N):
return """
OpenMPKernelData_t &data_ = *((OpenMPKernelData_t*) data);
handleFunction_t tmpKernel = (handleFunction_t) data_.handle;
int occaKernelArgs[6];
occaKernelArgs[0] = outer.z;
occaKernelArgs[1] = outer.y;
occaKernelArgs[2] = outer.x;
occaKernelArgs[3] = inner.z;
occaKernelArgs[4] = inner.y;
occaKernelArgs[5] = inner.x;
int argc = 0;
const kernelArg *args[""" + str(N) + """] = {""" + ' '.join(['&arg' + str(n) + nlc(n, N) for n in xrange(N)]) + """};
for(int i = 0; i < """ + str(N) + """; ++i){
for(int j = 0; j < args[i]->argc; ++j){
data_.vArgs[argc++] = args[i]->args[j].ptr();
}
}
int occaInnerId0 = 0, occaInnerId1 = 0, occaInnerId2 = 0;
cpu::runFunction(tmpKernel,
occaKernelArgs,
occaInnerId0, occaInnerId1, occaInnerId2,
argc, data_.vArgs);"""
def clOperatorDefinition(N):
return """
OpenCLKernelData_t &data_ = *((OpenCLKernelData_t*) data);
cl_kernel kernel_ = data_.kernel;
occa::dim fullOuter = outer*inner;
int argc = 0;
const kernelArg *args[""" + str(N) + """] = {""" + (', '.join((('\n ' + (' ' if (10 <= N) else ''))
if (n and ((n % 5) == 0))
else '')
+ "&arg{0}".format(n) for n in xrange(N))) + """};
OCCA_CL_CHECK("Kernel (" + metaInfo.name + ") : Setting Kernel Argument [0]",
clSetKernelArg(kernel_, argc++, sizeof(void*), NULL));
for(int i = 0; i < """ + str(N) + """; ++i){
for(int j = 0; j < args[i]->argc; ++j){
OCCA_CL_CHECK("Kernel (" + metaInfo.name + ") : Setting Kernel Argument [" << (i + 1) << "]",
clSetKernelArg(kernel_, argc++, args[i]->args[j].size, args[i]->args[j].ptr()));
}
}
OCCA_CL_CHECK("Kernel (" + metaInfo.name + ") : Kernel Run",
clEnqueueNDRangeKernel(*((cl_command_queue*) dHandle->currentStream),
kernel_,
(cl_int) dims,
NULL,
(uintptr_t*) &fullOuter,
(uintptr_t*) &inner,
0, NULL, NULL));"""
def cudaOperatorDefinition(N):
return """
CUDAKernelData_t &data_ = *((CUDAKernelData_t*) data);
CUfunction function_ = data_.function;
int occaKernelInfoArgs = 0;
int argc = 0;
const kernelArg *args[""" + str(N) + """] = {""" + (', '.join((('\n ' + (' ' if (10 <= N) else ''))
if (n and ((n % 5) == 0))
else '')
+ "&arg{0}".format(n) for n in xrange(N))) + """};
data_.vArgs[argc++] = &occaKernelInfoArgs;
for(int i = 0; i < """ + str(N) + """; ++i){
for(int j = 0; j < args[i]->argc; ++j){
data_.vArgs[argc++] = args[i]->args[j].ptr();
}
}
OCCA_CUDA_CHECK("Launching Kernel",
cuLaunchKernel(function_,
outer.x, outer.y, outer.z,
inner.x, inner.y, inner.z,
0, *((CUstream*) dHandle->currentStream),
data_.vArgs, 0));"""
def coiOperatorDefinition(N):
return """
COIKernelData_t &data_ = *((COIKernelData_t*) data);
COIDeviceData_t &dData = *((COIDeviceData_t*) ((device_t<COI>*) dHandle)->data);
int occaKernelArgs[6];
occaKernelArgs[0] = outer.z;
occaKernelArgs[1] = outer.y;
occaKernelArgs[2] = outer.x;
occaKernelArgs[3] = inner.z;
occaKernelArgs[4] = inner.y;
occaKernelArgs[5] = inner.x;
uintptr_t kSize = sizeof(data_.kernel);
::memcpy(&(data_.hostArgv[0]) , &(data_.kernel) , kSize);
::memcpy(&(data_.hostArgv[kSize]), &(occaKernelArgs[0]), 6*sizeof(int));
int hostPos = kSize + 6*sizeof(int) + """ + str(N) + """*sizeof(int);
int typePos = 0;
int devicePos = 0;
int *typePtr = (int*) (data_.hostArgv + kSize + 6*sizeof(int));
""" + '\n '.join(["""if(arg{0}.pointer){{
typePtr[typePos++] = (int) ((1 << 31) + devicePos);
data_.deviceArgv[devicePos] = *((coiMemory*) arg{0}.data());
data_.deviceFlags[devicePos] = COI_SINK_WRITE;
++devicePos;
}}
else{{
typePtr[typePos++] = (int) ((0 << 31) + hostPos);
::memcpy(&(data_.hostArgv[hostPos]), &(arg{0}.arg), arg{0}.size);
hostPos += arg{0}.size;
}}""".format(n) for n in xrange(N)]) + """
coiStream &stream = *((coiStream*) dHandle->currentStream);
COIPipelineRunFunction(stream.handle,
dData.kernelWrapper[""" + str(N - 1) + """],
devicePos,
(const coiMemory*) data_.deviceArgv,
(const coiMemoryFlags*) data_.deviceFlags,
false,
__null,
data_.hostArgv,
hostPos,
__null,
0,
&(stream.lastEvent));"""
def cOperatorDeclarations(N):
return '\n\n'.join([cOperatorDeclaration(n + 1) for n in xrange(N)])
def cOperatorDeclaration(N):
return ' OCCA_LFUNC void OCCA_RFUNC occaKernelRun{0}(occaKernel kernel, {1});\n'.format(N, ' '.join(['void *arg' + str(n) + nlc(n, N) for n in xrange(N)]) )
def cOperatorDefinitions(N):
return '\n\n'.join([cOperatorDefinition(n + 1) for n in xrange(N)])
def cOperatorDefinition(N):
argsContent = ', '.join('(occaType_t*) arg' + str(n) for n in xrange(N))
return (' void OCCA_RFUNC occaKernelRun{0}(occaKernel kernel, {1}){{\n'.format(N, ' '.join(['void *arg' + str(n) + nlc(n, N) for n in xrange(N)]) ) + \
' occa::kernel kernel_((occa::kernel_v*) kernel);\n' + \
' kernel_.clearArgumentList();\n' + \
' \n' + \
' occaType_t *args[' + str(N) + '] = {' + argsContent + '};\n' + \
' \n' + \
' for(int i = 0; i < ' + str(N) + '; ++i){\n' + \
' occaType_t &arg = *(args[i]);\n' + \
' void *argPtr = arg.value.data.void_;\n' + \
' \n' + \
' if(arg.type == OCCA_TYPE_MEMORY){\n' + \
' occa::memory memory_((occa::memory_v*) argPtr);\n' + \
' kernel_.addArgument(i, occa::kernelArg(memory_));\n' + \
' }\n' + \
' else if(arg.type == OCCA_TYPE_PTR){\n' + \
' occa::memory memory_((void*) argPtr);\n' + \
' kernel_.addArgument(i, occa::kernelArg(memory_));\n' + \
' }\n' + \
' else {\n' + \
' kernel_.addArgument(i, occa::kernelArg(arg.value));\n' + \
' delete (occaType_t*) args[i];\n' + \
' }\n' + \
' }\n' + \
' \n' + \
' kernel_.runFromArguments();\n' + \
' }\n');
operatorModeDefinition = { 'Serial' : serialOperatorDefinition,
'OpenMP' : ompOperatorDefinition,
'OpenCL' : clOperatorDefinition,
'CUDA' : cudaOperatorDefinition,
'Pthreads' : pthreadOperatorDefinition,
'COI' : coiOperatorDefinition }
occaDir = ENV['OCCA_DIR']
hpp = open(occaDir + '/include/occa/operators/virtualDeclarations.hpp', 'w')
hpp.write(virtualOperatorDeclarations(maxN));
hpp.write('\n'); # Make sure there is a newline at the end of the file
hpp.close()
hpp = open(occaDir + '/include/occa/operators/declarations.hpp', 'w')
hpp.write(operatorDeclarations('Base', maxN));
hpp.write('\n');
hpp.close()
hpp = open(occaDir + '/src/operators/definitions.cpp', 'w')
hpp.write(operatorDefinitions('Base', maxN));
hpp.write('\n');
hpp.close()
hpp = open(occaDir + '/src/operators/runFunctionFromArguments.cpp', 'w')
hpp.write(runFunctionFromArguments(maxN));
hpp.write('\n');
hpp.close()
hpp = open(occaDir + '/src/operators/runKernelFromArguments.cpp', 'w')
hpp.write(runKernelFromArguments(maxN));
hpp.write('\n');
hpp.close()
for mode in operatorModeDefinition:
hpp = open(occaDir + '/include/occa/operators/' + mode + 'KernelOperators.hpp', 'w')
hpp.write(operatorDeclarations(mode, maxN));
hpp.write('\n');
hpp.close()
cpp = open(occaDir + '/src/operators/' + mode + 'KernelOperators.cpp', 'w')
cpp.write(operatorDefinitions(mode, maxN));
cpp.write('\n');
cpp.close()
hpp = open(occaDir + '/include/occa/operators/cKernelOperators.hpp', 'w')
hpp.write(cOperatorDeclarations(maxN));
hpp.write('\n');
hpp.close()
cpp = open(occaDir + '/src/operators/cKernelOperators.cpp', 'w')
cpp.write(cOperatorDefinitions(maxN));
cpp.write('\n');
cpp.close()
| mit | 6,060,624,692,903,006,000 | 40.143631 | 173 | 0.471479 | false |
yugangzhang/chxanalys | chxanalys/chx_correlation.py | 1 | 42284 | # ######################################################################
# Developed at the NSLS-II, Brookhaven National Laboratory #
# Developed by Sameera K. Abeykoon, February 2014 #
# #
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
"""
This module is for functions specific to time correlation
"""
from __future__ import absolute_import, division, print_function
from skbeam.core.utils import multi_tau_lags
from skbeam.core.roi import extract_label_indices
from collections import namedtuple
import numpy as np
from scipy.signal import fftconvolve
# for a convenient status bar
try:
from tqdm import tqdm
except ImportError:
def tqdm(iterator):
return iterator
import logging
logger = logging.getLogger(__name__)
def _one_time_process(buf, G, past_intensity_norm, future_intensity_norm,
label_array, num_bufs, num_pixels, img_per_level,
level, buf_no, norm, lev_len):
"""Reference implementation of the inner loop of multi-tau one time
correlation
This helper function calculates G, past_intensity_norm and
future_intensity_norm at each level, symmetric normalization is used.
.. warning :: This modifies inputs in place.
Parameters
----------
buf : array
image data array to use for correlation
G : array
matrix of auto-correlation function without normalizations
past_intensity_norm : array
matrix of past intensity normalizations
future_intensity_norm : array
matrix of future intensity normalizations
label_array : array
labeled array where all nonzero values are ROIs
num_bufs : int, even
number of buffers(channels)
num_pixels : array
number of pixels in certain ROI's
ROI's, dimensions are : [number of ROI's]X1
img_per_level : array
to track how many images processed in each level
level : int
the current multi-tau level
buf_no : int
the current buffer number
norm : dict
to track bad images
lev_len : array
length of each level
Notes
-----
.. math::
G = <I(\tau)I(\tau + delay)>
.. math::
past_intensity_norm = <I(\tau)>
.. math::
future_intensity_norm = <I(\tau + delay)>
"""
img_per_level[level] += 1
# in multi-tau correlation, the subsequent levels have half as many
# buffers as the first
i_min = num_bufs // 2 if level else 0
for i in range(i_min, min(img_per_level[level], num_bufs)):
# compute the index into the autocorrelation matrix
t_index = level * num_bufs / 2 + i
delay_no = (buf_no - i) % num_bufs
# get the images for correlating
past_img = buf[level, delay_no]
future_img = buf[level, buf_no]
# find the normalization that can work both for bad_images
# and good_images
ind = int(t_index - lev_len[:level].sum())
normalize = img_per_level[level] - i - norm[level+1][ind]
# take out the past_ing and future_img created using bad images
# (bad images are converted to np.nan array)
if np.isnan(past_img).any() or np.isnan(future_img).any():
norm[level + 1][ind] += 1
else:
for w, arr in zip([past_img*future_img, past_img, future_img],
[G, past_intensity_norm, future_intensity_norm]):
binned = np.bincount(label_array, weights=w)[1:]
arr[t_index] += ((binned / num_pixels -
arr[t_index]) / normalize)
return None # modifies arguments in place!
results = namedtuple(
'correlation_results',
['g2', 'lag_steps', 'internal_state']
)
_internal_state = namedtuple(
'correlation_state',
['buf',
'G',
'past_intensity',
'future_intensity',
'img_per_level',
'label_array',
'track_level',
'cur',
'pixel_list',
'num_pixels',
'lag_steps',
'norm',
'lev_len']
)
_two_time_internal_state = namedtuple(
'two_time_correlation_state',
['buf',
'img_per_level',
'label_array',
'track_level',
'cur',
'pixel_list',
'num_pixels',
'lag_steps',
'g2',
'count_level',
'current_img_time',
'time_ind',
'norm',
'lev_len']
)
def _init_state_one_time(num_levels, num_bufs, labels):
"""Initialize a stateful namedtuple for the generator-based multi-tau
for one time correlation
Parameters
----------
num_levels : int
num_bufs : int
labels : array
Two dimensional labeled array that contains ROI information
Returns
-------
internal_state : namedtuple
The namedtuple that contains all the state information that
`lazy_one_time` requires so that it can be used to pick up
processing after it was interrupted
"""
(label_array, pixel_list, num_rois, num_pixels, lag_steps, buf,
img_per_level, track_level, cur, norm,
lev_len) = _validate_and_transform_inputs(num_bufs, num_levels, labels)
# G holds the un normalized auto- correlation result. We
# accumulate computations into G as the algorithm proceeds.
G = np.zeros(((num_levels + 1) * num_bufs / 2, num_rois),
dtype=np.float64)
# matrix for normalizing G into g2
past_intensity = np.zeros_like(G)
# matrix for normalizing G into g2
future_intensity = np.zeros_like(G)
return _internal_state(
buf,
G,
past_intensity,
future_intensity,
img_per_level,
label_array,
track_level,
cur,
pixel_list,
num_pixels,
lag_steps,
norm,
lev_len,
)
def lazy_one_time(image_iterable, num_levels, num_bufs, labels,
internal_state=None):
"""Generator implementation of 1-time multi-tau correlation
If you do not want multi-tau correlation, set num_levels to 1 and
num_bufs to the number of images you wish to correlate
Parameters
----------
image_iterable : iterable of 2D arrays
num_levels : int
how many generations of downsampling to perform, i.e., the depth of
the binomial tree of averaged frames
num_bufs : int, must be even
maximum lag step to compute in each generation of downsampling
labels : array
Labeled array of the same shape as the image stack.
Each ROI is represented by sequential integers starting at one. For
example, if you have four ROIs, they must be labeled 1, 2, 3,
4. Background is labeled as 0
internal_state : namedtuple, optional
internal_state is a bucket for all of the internal state of the
generator. It is part of the `results` object that is yielded from
this generator
Yields
------
namedtuple
A `results` object is yielded after every image has been processed.
This `reults` object contains, in this order:
- `g2`: the normalized correlation
shape is (len(lag_steps), num_rois)
- `lag_steps`: the times at which the correlation was computed
- `_internal_state`: all of the internal state. Can be passed back in
to `lazy_one_time` as the `internal_state` parameter
Notes
-----
The normalized intensity-intensity time-autocorrelation function
is defined as
.. math::
g_2(q, t') = \\frac{<I(q, t)I(q, t + t')> }{<I(q, t)>^2}
t' > 0
Here, ``I(q, t)`` refers to the scattering strength at the momentum
transfer vector ``q`` in reciprocal space at time ``t``, and the brackets
``<...>`` refer to averages over time ``t``. The quantity ``t'`` denotes
the delay time
This implementation is based on published work. [1]_
References
----------
.. [1] D. Lumma, L. B. Lurio, S. G. J. Mochrie and M. Sutton,
"Area detector based photon correlation in the regime of
short data batches: Data reduction for dynamic x-ray
scattering," Rev. Sci. Instrum., vol 71, p 3274-3289, 2000.
"""
if internal_state is None:
internal_state = _init_state_one_time(num_levels, num_bufs, labels)
# create a shorthand reference to the results and state named tuple
s = internal_state
# iterate over the images to compute multi-tau correlation
for image in image_iterable:
# Compute the correlations for all higher levels.
level = 0
# increment buffer
s.cur[0] = (1 + s.cur[0]) % num_bufs
# Put the ROI pixels into the ring buffer.
s.buf[0, s.cur[0] - 1] = np.ravel(image)[s.pixel_list]
buf_no = s.cur[0] - 1
# Compute the correlations between the first level
# (undownsampled) frames. This modifies G,
# past_intensity, future_intensity,
# and img_per_level in place!
_one_time_process(s.buf, s.G, s.past_intensity, s.future_intensity,
s.label_array, num_bufs, s.num_pixels,
s.img_per_level, level, buf_no, s.norm, s.lev_len)
# check whether the number of levels is one, otherwise
# continue processing the next level
processing = num_levels > 1
level = 1
while processing:
if not s.track_level[level]:
s.track_level[level] = True
processing = False
else:
prev = (1 + (s.cur[level - 1] - 2) % num_bufs)
s.cur[level] = (
1 + s.cur[level] % num_bufs)
s.buf[level, s.cur[level] - 1] = ((
s.buf[level - 1, prev - 1] +
s.buf[level - 1, s.cur[level - 1] - 1]) / 2)
# make the track_level zero once that level is processed
s.track_level[level] = False
# call processing_func for each multi-tau level greater
# than one. This is modifying things in place. See comment
# on previous call above.
buf_no = s.cur[level] - 1
_one_time_process(s.buf, s.G, s.past_intensity,
s.future_intensity, s.label_array, num_bufs,
s.num_pixels, s.img_per_level, level, buf_no,
s.norm, s.lev_len)
level += 1
# Checking whether there is next level for processing
processing = level < num_levels
# If any past intensities are zero, then g2 cannot be normalized at
# those levels. This if/else code block is basically preventing
# divide-by-zero errors.
if len(np.where(s.past_intensity == 0)[0]) != 0:
g_max = np.where(s.past_intensity == 0)[0][0]
else:
g_max = s.past_intensity.shape[0]
g2 = (s.G[:g_max] / (s.past_intensity[:g_max] *
s.future_intensity[:g_max]))
yield results(g2, s.lag_steps[:g_max], s)
def multi_tau_auto_corr(num_levels, num_bufs, labels, images):
"""Wraps generator implementation of multi-tau
Original code(in Yorick) for multi tau auto correlation
author: Mark Sutton
For parameter description, please reference the docstring for
lazy_one_time. Note that there is an API difference between this function
and `lazy_one_time`. The `images` arugment is at the end of this function
signature here for backwards compatibility, but is the first argument in
the `lazy_one_time()` function. The semantics of the variables remain
unchanged.
"""
gen = lazy_one_time(images, num_levels, num_bufs, labels)
for result in gen:
pass
return result.g2, result.lag_steps
def auto_corr_scat_factor(lags, beta, relaxation_rate, baseline=1):
"""
This model will provide normalized intensity-intensity time
correlation data to be minimized.
Parameters
----------
lags : array
delay time
beta : float
optical contrast (speckle contrast), a sample-independent
beamline parameter
relaxation_rate : float
relaxation time associated with the samples dynamics.
baseline : float, optional
baseline of one time correlation
equal to one for ergodic samples
Returns
-------
g2 : array
normalized intensity-intensity time autocorreltion
Notes :
-------
The intensity-intensity autocorrelation g2 is connected to the intermediate
scattering factor(ISF) g1
.. math::
g_2(q, \\tau) = \\beta_1[g_1(q, \\tau)]^{2} + g_\infty
For a system undergoing diffusive dynamics,
.. math::
g_1(q, \\tau) = e^{-\gamma(q) \\tau}
.. math::
g_2(q, \\tau) = \\beta_1 e^{-2\gamma(q) \\tau} + g_\infty
These implementation are based on published work. [1]_
References
----------
.. [1] L. Li, P. Kwasniewski, D. Orsi, L. Wiegart, L. Cristofolini,
C. Caronna and A. Fluerasu, " Photon statistics and speckle
visibility spectroscopy with partially coherent X-rays,"
J. Synchrotron Rad. vol 21, p 1288-1295, 2014
"""
return beta * np.exp(-2 * relaxation_rate * lags) + baseline
def two_time_corr(labels, images, num_frames, num_bufs, num_levels=1):
"""Wraps generator implementation of multi-tau two time correlation
This function computes two-time correlation
Original code : author: Yugang Zhang
Returns
-------
results : namedtuple
For parameter definition, see the docstring for the `lazy_two_time()`
function in this module
"""
gen = lazy_two_time(labels, images, num_frames, num_bufs, num_levels)
for result in gen:
pass
return two_time_state_to_results(result)
def lazy_two_time(labels, images, num_frames, num_bufs, num_levels=1,
two_time_internal_state=None):
"""Generator implementation of two-time correlation
If you do not want multi-tau correlation, set num_levels to 1 and
num_bufs to the number of images you wish to correlate
Multi-tau correlation uses a scheme to achieve long-time correlations
inexpensively by downsampling the data, iteratively combining successive
frames.
The longest lag time computed is ``num_levels * num_bufs``.
See Also
--------
comments on `multi_tau_auto_corr`
Parameters
----------
labels : array
labeled array of the same shape as the image stack;
each ROI is represented by a distinct label (i.e., integer)
images : iterable of 2D arrays
dimensions are: (rr, cc), iterable of 2D arrays
num_frames : int
number of images to use
default is number of images
num_bufs : int, must be even
maximum lag step to compute in each generation of
downsampling
num_levels : int, optional
how many generations of downsampling to perform, i.e.,
the depth of the binomial tree of averaged frames
default is one
Yields
------
namedtuple
A ``results`` object is yielded after every image has been processed.
This `reults` object contains, in this order:
- ``g2``: the normalized correlation
shape is (num_rois, len(lag_steps), len(lag_steps))
- ``lag_steps``: the times at which the correlation was computed
- ``_internal_state``: all of the internal state. Can be passed back in
to ``lazy_one_time`` as the ``internal_state`` parameter
Notes
-----
The two-time correlation function is defined as
.. math::
C(q,t_1,t_2) = \\frac{<I(q,t_1)I(q,t_2)>}{<I(q, t_1)><I(q,t_2)>}
Here, the ensemble averages are performed over many pixels of detector,
all having the same ``q`` value. The average time or age is equal to
``(t1+t2)/2``, measured by the distance along the ``t1 = t2`` diagonal.
The time difference ``t = |t1 - t2|``, with is distance from the
``t1 = t2`` diagonal in the perpendicular direction.
In the equilibrium system, the two-time correlation functions depend only
on the time difference ``t``, and hence the two-time correlation contour
lines are parallel.
[1]_
References
----------
.. [1] A. Fluerasu, A. Moussaid, A. Mandsen and A. Schofield,
"Slow dynamics and aging in collodial gels studied by x-ray
photon correlation spectroscopy," Phys. Rev. E., vol 76, p
010401(1-4), 2007.
"""
if two_time_internal_state is None:
two_time_internal_state = _init_state_two_time(num_levels, num_bufs,
labels, num_frames)
# create a shorthand reference to the results and state named tuple
s = two_time_internal_state
for img in images:
s.cur[0] = (1 + s.cur[0]) % num_bufs # increment buffer
s.count_level[0] = 1 + s.count_level[0]
# get the current image time
s = s._replace(current_img_time=(s.current_img_time + 1))
# Put the image into the ring buffer.
s.buf[0, s.cur[0] - 1] = (np.ravel(img))[s.pixel_list]
#print( np.sum( s.buf[0, s.cur[0] - 1] ) )
# Compute the two time correlations between the first level
# (undownsampled) frames. two_time and img_per_level in place!
_two_time_process(s.buf, s.g2, s.label_array, num_bufs,
s.num_pixels, s.img_per_level, s.lag_steps,
s.current_img_time,
level=0, buf_no=s.cur[0] - 1)
# time frame for each level
s.time_ind[0].append(s.current_img_time)
# check whether the number of levels is one, otherwise
# continue processing the next level
processing = num_levels > 1
# Compute the correlations for all higher levels.
level = 1
while processing:
if not s.track_level[level]:
s.track_level[level] = 1
processing = False
else:
prev = 1 + (s.cur[level - 1] - 2) % num_bufs
s.cur[level] = 1 + s.cur[level] % num_bufs
s.count_level[level] = 1 + s.count_level[level]
s.buf[level, s.cur[level] - 1] = (s.buf[level - 1, prev - 1] +
s.buf[level - 1,
s.cur[level - 1] - 1])/2
t1_idx = (s.count_level[level] - 1) * 2
current_img_time = ((s.time_ind[level - 1])[t1_idx] +
(s.time_ind[level - 1])[t1_idx + 1])/2.
# time frame for each level
s.time_ind[level].append(current_img_time)
# make the track_level zero once that level is processed
s.track_level[level] = 0
# call the _two_time_process function for each multi-tau level
# for multi-tau levels greater than one
# Again, this is modifying things in place. See comment
# on previous call above.
_two_time_process(s.buf, s.g2, s.label_array, num_bufs,
s.num_pixels, s.img_per_level, s.lag_steps,
current_img_time,
level=level, buf_no=s.cur[level]-1)
level += 1
# Checking whether there is next level for processing
processing = level < num_levels
yield s
def two_time_state_to_results(state):
"""Convert the internal state of the two time generator into usable results
Parameters
----------
state : namedtuple
The internal state that is yielded from `lazy_two_time`
Returns
-------
results : namedtuple
A results object that contains the two time correlation results
and the lag steps
"""
for q in range(np.max(state.label_array)):
x0 = (state.g2)[q, :, :]
(state.g2)[q, :, :] = (np.tril(x0) + np.tril(x0).T -
np.diag(np.diag(x0)))
return results(state.g2, state.lag_steps, state)
def _two_time_process(buf, g2, label_array, num_bufs, num_pixels,
img_per_level, lag_steps, current_img_time,
level, buf_no):
"""
Parameters
----------
buf: array
image data array to use for two time correlation
g2: array
two time correlation matrix
shape (number of labels(ROI), number of frames, number of frames)
label_array: array
Elements not inside any ROI are zero; elements inside each
ROI are 1, 2, 3, etc. corresponding to the order they are specified
in edges and segments
num_bufs: int, even
number of buffers(channels)
num_pixels : array
number of pixels in certain ROI's
ROI's, dimensions are len(np.unique(label_array))
img_per_level: array
to track how many images processed in each level
lag_steps : array
delay or lag steps for the multiple tau analysis
shape num_levels
current_img_time : int
the current image number
level : int
the current multi-tau level
buf_no : int
the current buffer number
"""
img_per_level[level] += 1
# in multi-tau correlation other than first level all other levels
# have to do the half of the correlation
if level == 0:
i_min = 0
else:
i_min = num_bufs//2
for i in range(i_min, min(img_per_level[level], num_bufs)):
t_index = level*num_bufs/2 + i
delay_no = (buf_no - i) % num_bufs
past_img = buf[level, delay_no]
future_img = buf[level, buf_no]
# get the matrix of correlation function without normalizations
tmp_binned = (np.bincount(label_array,
weights=past_img*future_img)[1:])
# get the matrix of past intensity normalizations
pi_binned = (np.bincount(label_array,
weights=past_img)[1:])
# get the matrix of future intensity normalizations
fi_binned = (np.bincount(label_array,
weights=future_img)[1:])
tind1 = (current_img_time - 1)
tind2 = (current_img_time - lag_steps[t_index] - 1)
#print( current_img_time )
if not isinstance(current_img_time, int):
nshift = 2**(level-1)
for i in range(-nshift+1, nshift+1):
g2[:, int(tind1+i),
int(tind2+i)] = (tmp_binned/(pi_binned *
fi_binned))*num_pixels
else:
g2[:, tind1, tind2] = tmp_binned/(pi_binned * fi_binned)*num_pixels
def _init_state_two_time(num_levels, num_bufs, labels, num_frames):
"""Initialize a stateful namedtuple for two time correlation
Parameters
----------
num_levels : int
num_bufs : int
labels : array
Two dimensional labeled array that contains ROI information
num_frames : int
number of images to use
default is number of images
Returns
-------
internal_state : namedtuple
The namedtuple that contains all the state information that
`lazy_two_time` requires so that it can be used to pick up processing
after it was interrupted
"""
(label_array, pixel_list, num_rois, num_pixels, lag_steps,
buf, img_per_level, track_level, cur, norm,
lev_len) = _validate_and_transform_inputs(num_bufs, num_levels, labels)
# to count images in each level
count_level = np.zeros(num_levels, dtype=np.int64)
# current image time
current_img_time = 0
# generate a time frame for each level
time_ind = {key: [] for key in range(num_levels)}
# two time correlation results (array)
g2 = np.zeros((num_rois, num_frames, num_frames), dtype=np.float64)
return _two_time_internal_state(
buf,
img_per_level,
label_array,
track_level,
cur,
pixel_list,
num_pixels,
lag_steps,
g2,
count_level,
current_img_time,
time_ind,
norm,
lev_len,
)
def _validate_and_transform_inputs(num_bufs, num_levels, labels):
"""
This is a helper function to validate inputs and create initial state
inputs for both one time and two time correlation
Parameters
----------
num_bufs : int
num_levels : int
labels : array
labeled array of the same shape as the image stack;
each ROI is represented by a distinct label (i.e., integer)
Returns
-------
label_array : array
labels of the required region of interests(ROI's)
pixel_list : array
1D array of indices into the raveled image for all
foreground pixels (labeled nonzero)
e.g., [5, 6, 7, 8, 14, 15, 21, 22]
num_rois : int
number of region of interests (ROI)
num_pixels : array
number of pixels in each ROI
lag_steps : array
the times at which the correlation was computed
buf : array
image data for correlation
img_per_level : array
to track how many images processed in each level
track_level : array
to track processing each level
cur : array
to increment the buffer
norm : dict
to track bad images
lev_len : array
length of each levels
"""
if num_bufs % 2 != 0:
raise ValueError("There must be an even number of `num_bufs`. You "
"provided %s" % num_bufs)
label_array, pixel_list = extract_label_indices(labels)
# map the indices onto a sequential list of integers starting at 1
label_mapping = {label: n+1
for n, label in enumerate(np.unique(label_array))}
# remap the label array to go from 1 -> max(_labels)
for label, n in label_mapping.items():
label_array[label_array == label] = n
# number of ROI's
num_rois = len(label_mapping)
# stash the number of pixels in the mask
num_pixels = np.bincount(label_array)[1:]
# Convert from num_levels, num_bufs to lag frames.
tot_channels, lag_steps, dict_lag = multi_tau_lags(num_levels, num_bufs)
# these norm and lev_len will help to find the one time correlation
# normalization norm will updated when there is a bad image
norm = {key: [0] * len(dict_lag[key]) for key in (dict_lag.keys())}
lev_len = np.array([len(dict_lag[i]) for i in (dict_lag.keys())])
# Ring buffer, a buffer with periodic boundary conditions.
# Images must be keep for up to maximum delay in buf.
buf = np.zeros((num_levels, num_bufs, len(pixel_list)),
dtype=np.float64)
# to track how many images processed in each level
img_per_level = np.zeros(num_levels, dtype=np.int64)
# to track which levels have already been processed
track_level = np.zeros(num_levels, dtype=bool)
# to increment buffer
cur = np.ones(num_levels, dtype=np.int64)
return (label_array, pixel_list, num_rois, num_pixels,
lag_steps, buf, img_per_level, track_level, cur,
norm, lev_len)
def one_time_from_two_time(two_time_corr):
"""
This will provide the one-time correlation data from two-time
correlation data.
Parameters
----------
two_time_corr : array
matrix of two time correlation
shape (number of labels(ROI's), number of frames, number of frames)
Returns
-------
one_time_corr : array
matrix of one time correlation
shape (number of labels(ROI's), number of frames)
"""
one_time_corr = np.zeros((two_time_corr.shape[0], two_time_corr.shape[2]))
for g in two_time_corr:
for j in range(two_time_corr.shape[2]):
one_time_corr[:, j] = np.trace(g, offset=j)/two_time_corr.shape[2]
return one_time_corr
class CrossCorrelator:
'''
Compute a 1D or 2D cross-correlation on data.
This uses a mask, which may be binary (array of 0's and 1's),
or a list of non-negative integer id's to compute cross-correlations
separately on.
The symmetric averaging scheme introduced here is inspired by a paper
from Schätzel, although the implementation is novel in that it
allows for the usage of arbitrary masks. [1]_
Examples
--------
>> ccorr = CrossCorrelator(mask.shape, mask=mask)
>> # correlated image
>> cimg = cc(img1)
or, mask may m
>> cc = CrossCorrelator(ids)
#(where ids is same shape as img1)
>> cc1 = cc(img1)
>> cc12 = cc(img1, img2)
# if img2 shifts right of img1, point of maximum correlation is shifted
# right from correlation center
References
----------
.. [1] Schätzel, Klaus, Martin Drewel, and Sven Stimac. “Photon
correlation measurements at large lag times: improving
statistical accuracy.” Journal of Modern Optics 35.4 (1988):
711-718.
'''
# TODO : when mask is None, don't compute a mask, submasks
def __init__(self, shape, mask=None, normalization=None,
wrap=False):
'''
Prepare the spatial correlator for various regions specified by the
id's in the image.
Parameters
----------
shape : 1 or 2-tuple
The shape of the incoming images or curves. May specify 1D or
2D shapes by inputting a 1 or 2-tuple
mask : 1D or 2D np.ndarray of int, optional
Each non-zero integer represents unique bin. Zero integers are
assumed to be ignored regions. If None, creates a mask with
all points set to 1
normalization: string or list of strings, optional
These specify the normalization and may be any of the
following:
'regular' : divide by pixel number
'symavg' : use symmetric averaging
Defaults to ['regular'] normalization
wrap : bool, optional
If False, assume dimensions don't wrap around. If True
assume they do. The latter is useful for circular
dimensions such as angle.
'''
if normalization is None:
normalization = ['regular']
elif not isinstance(normalization, list):
normalization = list([normalization])
self.wrap = wrap
self.normalization = normalization
if mask is None:
mask = np.ones(shape)
# the IDs for the image, called mask
self.mask = mask
# initialize all the masks for the correlation
# Making a list of arrays holding the masks for each id. Ideally, mask
# is binary so this is one element to quickly index original images
self.pxlsts = list()
self.submasks = list()
# to quickly index the sub images
self.subpxlsts = list()
# the temporary images (double the size for the cross correlation)
self.tmpimgs = list()
self.tmpimgs2 = list()
self.centers = list()
self.shapes = list() # the shapes of each correlation
# the positions of each axes of each correlation
self.positions = list()
self.ids = np.sort(np.unique(mask))
# remove the zero since we ignore, but only if it is there (sometimes
# may not be)
if self.ids[0] == 0:
self.ids = self.ids[1:]
self.nids = len(self.ids)
self.maskcorrs = list()
# regions where the correlations are not zero
self.pxlst_maskcorrs = list()
# basically saving bunch of mask related stuff like indexing etc, just
# to save some time when actually computing the cross correlations
for idno in self.ids:
masktmp = (mask == idno)
self.pxlsts.append(np.where(masktmp.ravel() == 1)[0])
# this could be replaced by skimage cropping and padding
submasktmp = _crop_from_mask(masktmp)
if self.wrap is False:
submask = _expand_image(submasktmp)
tmpimg = np.zeros_like(submask)
self.submasks.append(submask)
self.subpxlsts.append(np.where(submask.ravel() == 1)[0])
self.tmpimgs.append(tmpimg)
# make sure it's a copy and not a ref
self.tmpimgs2.append(tmpimg.copy())
maskcorr = _cross_corr(submask)
# quick fix for finite numbers should be integer so
# choose some small value to threshold
maskcorr *= maskcorr > .5
self.maskcorrs.append(maskcorr)
self.pxlst_maskcorrs.append(maskcorr > 0)
# centers are shape//2 as performed by fftshift
center = np.array(maskcorr.shape)//2
self.centers.append(np.array(maskcorr.shape)//2)
self.shapes.append(np.array(maskcorr.shape))
if mask.ndim == 1:
self.positions.append(np.arange(maskcorr.shape[0]) - center[0])
elif mask.ndim == 2:
self.positions.append([np.arange(maskcorr.shape[0]) -
center[0],
np.arange(maskcorr.shape[1]) -
center[1]])
if len(self.ids) == 1:
self.positions = self.positions[0]
self.centers = self.centers[0]
self.shapes = self.shapes[0]
def __call__(self, img1, img2=None, normalization=None):
''' Run the cross correlation on an image/curve or against two
images/curves
Parameters
----------
img1 : 1D or 2D np.ndarray
The image (or curve) to run the cross correlation on
img2 : 1D or 2D np.ndarray
If not set to None, run cross correlation of this image (or
curve) against img1. Default is None.
normalization : string or list of strings
normalization types. If not set, use internally saved
normalization parameters
Returns
-------
ccorrs : 1d or 2d np.ndarray
An image of the correlation. The zero correlation is
located at shape//2 where shape is the 1 or 2-tuple
shape of the array
'''
if normalization is None:
normalization = self.normalization
if img2 is None:
self_correlation = True
img2 = img1
else:
self_correlation = False
ccorrs = list()
rngiter = tqdm(range(self.nids))
for i in rngiter:
self.tmpimgs[i] *= 0
self.tmpimgs[i].ravel()[
self.subpxlsts[i]
] = img1.ravel()[self.pxlsts[i]]
if not self_correlation:
self.tmpimgs2[i] *= 0
self.tmpimgs2[i].ravel()[
self.subpxlsts[i]
] = img2.ravel()[self.pxlsts[i]]
# multiply by maskcorrs > 0 to ignore invalid regions
if self_correlation:
ccorr = _cross_corr(self.tmpimgs[i])*(self.maskcorrs[i] > 0)
else:
ccorr = _cross_corr(self.tmpimgs[i], self.tmpimgs2[i]) * \
(self.maskcorrs[i] > 0)
# now handle the normalizations
if 'symavg' in normalization:
# do symmetric averaging
Icorr = _cross_corr(self.tmpimgs[i] *
self.submasks[i], self.submasks[i])
if self_correlation:
Icorr2 = _cross_corr(self.submasks[i], self.tmpimgs[i] *
self.submasks[i])
else:
Icorr2 = _cross_corr(self.submasks[i], self.tmpimgs2[i] *
self.submasks[i])
# there is an extra condition that Icorr*Icorr2 != 0
w = np.where(np.abs(Icorr*Icorr2) > 0)
ccorr[w] *= self.maskcorrs[i][w]/Icorr[w]/Icorr2[w]
if 'regular' in normalization:
# only run on overlapping regions for correlation
w = self.pxlst_maskcorrs[i]
ccorr[w] /= self.maskcorrs[i][w] * \
np.average(self.tmpimgs[i].
ravel()[self.subpxlsts[i]])**2
ccorrs.append(ccorr)
if len(ccorrs) == 1:
ccorrs = ccorrs[0]
return ccorrs
def _cross_corr(img1, img2=None):
''' Compute the cross correlation of one (or two) images.
Parameters
----------
img1 : np.ndarray
the image or curve to cross correlate
img2 : 1d or 2d np.ndarray, optional
If set, cross correlate img1 against img2. A shift of img2
to the right of img1 will lead to a shift of the point of
highest correlation to the right.
Default is set to None
'''
ndim = img1.ndim
if img2 is None:
img2 = img1
if img1.shape != img2.shape:
errorstr = "Image shapes don't match. "
errorstr += "(img1 : {},{}; img2 : {},{})"\
.format(*img1.shape, *img2.shape)
raise ValueError(errorstr)
# need to reverse indices for second image
# fftconvolve(A,B) = FFT^(-1)(FFT(A)*FFT(B))
# but need FFT^(-1)(FFT(A(x))*conj(FFT(B(x)))) = FFT^(-1)(A(x)*B(-x))
reverse_index = [slice(None, None, -1) for i in range(ndim)]
imgc = fftconvolve(img1, img2[reverse_index], mode='same')
return imgc
def _crop_from_mask(mask):
'''
Crop an image from a given mask
Parameters
----------
mask : 1d or 2d np.ndarray
The data to be cropped. This consists of integers >=0.
Regions with 0 are masked and regions > 1 are kept.
Returns
-------
mask : 1d or 2d np.ndarray
The cropped image. This image is cropped as much as possible
without losing unmasked data.
'''
dims = mask.shape
pxlst = np.where(mask.ravel() != 0)[0]
# this is the assumed width along the fastest-varying dimension
if len(dims) > 1:
imgwidth = dims[1]
else:
imgwidth = 1
# A[row,col] where row is y and col is x
# (matrix notation)
pixely = pxlst % imgwidth
pixelx = pxlst//imgwidth
minpixelx = np.min(pixelx)
minpixely = np.min(pixely)
maxpixelx = np.max(pixelx)
maxpixely = np.max(pixely)
oldimg = np.zeros(dims)
oldimg.ravel()[pxlst] = 1
if len(dims) > 1:
mask = np.copy(oldimg[minpixelx:maxpixelx+1, minpixely:maxpixely+1])
else:
mask = np.copy(oldimg[minpixelx:maxpixelx+1])
return mask
def _expand_image(img):
''' Convenience routine to make an image with twice the size, plus one.
Parameters
----------
img : 1d or 2d np.ndarray
The image (or curve) to expand
Returns
-------
img : 1d or 2d np.ndarray
The expanded image
'''
imgold = img
dims = imgold.shape
if len(dims) > 1:
img = np.zeros((dims[0]*2+1, dims[1]*2+1))
img[:dims[0], :dims[1]] = imgold
else:
img = np.zeros((dims[0]*2+1))
img[:dims[0]] = imgold
return img | bsd-3-clause | 5,104,684,623,473,763,000 | 37.089189 | 79 | 0.573135 | false |
LLNL/spack | var/spack/repos/builtin/packages/neuron/package.py | 2 | 7706 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack import *
class Neuron(Package):
"""NEURON is a simulation environment for single and networks of neurons.
NEURON is a simulation environment for modeling individual and networks of
neurons. NEURON models individual neurons via the use of sections that are
automatically subdivided into individual compartments, instead of
requiring the user to manually create compartments. The primary scripting
language is hoc but a Python interface is also available.
"""
homepage = "https://www.neuron.yale.edu/"
url = "http://www.neuron.yale.edu/ftp/neuron/versions/v7.5/nrn-7.5.tar.gz"
git = "https://github.com/nrnhines/nrn.git"
version('develop', branch='master')
version('7.5', sha256='67642216a969fdc844da1bd56643edeed5e9f9ab8c2a3049dcbcbcccba29c336')
version('7.4', sha256='1403ba16b2b329d2376f4bf007d96e6bf2992fa850f137f1068ad5b22b432de6')
version('7.3', sha256='71cff5962966c5cd5d685d90569598a17b4b579d342126b31e2d431128cc8832')
version('7.2', sha256='c777d73a58ff17a073e8ea25f140cb603b8b5f0df3c361388af7175e44d85b0e')
variant('mpi', default=True, description='Enable MPI parallelism')
variant('python', default=True, description='Enable python')
variant('shared', default=False, description='Build shared libraries')
variant('cross-compile', default=False, description='Build for cross-compile environment')
variant('multisend', default=True, description="Enable multi-send spike exchange")
variant('rx3d', default=False, description="Enable cython translated 3-d rxd")
depends_on('flex', type='build')
depends_on('bison', type='build')
depends_on('automake', type='build')
depends_on('automake', type='build')
depends_on('autoconf', type='build')
depends_on('libtool', type='build')
depends_on('pkgconfig', type='build')
depends_on('mpi', when='+mpi')
depends_on('[email protected]:', when='+python')
depends_on('ncurses', when='~cross-compile')
conflicts('~shared', when='+python')
filter_compiler_wrappers('*/bin/nrniv_makefile')
def get_neuron_archdir(self):
"""Determine the architecture-specific neuron base directory.
Instead of recreating the logic of the neuron's configure
we dynamically find the architecture-specific directory by
looking for a specific binary.
"""
file_list = find(self.prefix, '*/bin/nrniv_makefile')
# check needed as when initially evaluated the prefix is empty
if file_list:
neuron_archdir = os.path.dirname(os.path.dirname(file_list[0]))
else:
neuron_archdir = self.prefix
return neuron_archdir
def patch(self):
# aclocal need complete include path (especially on os x)
pkgconf_inc = '-I %s/share/aclocal/' % (self.spec['pkgconfig'].prefix)
libtool_inc = '-I %s/share/aclocal/' % (self.spec['libtool'].prefix)
newpath = 'aclocal -I m4 %s %s' % (pkgconf_inc, libtool_inc)
filter_file(r'aclocal -I m4', r'%s' % newpath, "build.sh")
def get_arch_options(self, spec):
options = []
if spec.satisfies('+cross-compile'):
options.extend(['cross_compiling=yes',
'--without-memacs',
'--without-nmodl'])
# need to enable bg-q arch
if 'bgq' in self.spec.architecture:
options.extend(['--enable-bluegeneQ',
'--host=powerpc64'])
# on os-x disable building carbon 'click' utility
if 'darwin' in self.spec.architecture:
options.append('macdarwin=no')
return options
def get_python_options(self, spec):
options = []
if spec.satisfies('+python'):
python_exec = spec['python'].command.path
py_inc = spec['python'].headers.directories[0]
py_lib = spec['python'].prefix.lib
if not os.path.isdir(py_lib):
py_lib = spec['python'].prefix.lib64
options.extend(['--with-nrnpython=%s' % python_exec,
'--disable-pysetup',
'PYINCDIR=%s' % py_inc,
'PYLIBDIR=%s' % py_lib])
if spec.satisfies('~cross-compile'):
options.append('PYTHON_BLD=%s' % python_exec)
else:
options.append('--without-nrnpython')
return options
def get_compiler_options(self, spec):
flags = '-O2 -g'
if 'bgq' in self.spec.architecture:
flags = '-O3 -qtune=qp -qarch=qp -q64 -qstrict -qnohot -g'
if self.spec.satisfies('%pgi'):
flags += ' ' + self.compiler.cc_pic_flag
return ['CFLAGS=%s' % flags,
'CXXFLAGS=%s' % flags]
def build_nmodl(self, spec, prefix):
# build components for front-end arch in cross compiling environment
options = ['--prefix=%s' % prefix,
'--with-nmodl-only',
'--without-x']
if 'bgq' in self.spec.architecture:
flags = '-qarch=ppc64'
options.extend(['CFLAGS=%s' % flags,
'CXXFLAGS=%s' % flags])
if 'cray' in self.spec.architecture:
flags = '-target-cpu=x86_64 -target-network=none'
options.extend(['CFLAGS=%s' % flags,
'CXXFLAGS=%s' % flags])
configure = Executable(join_path(self.stage.source_path, 'configure'))
configure(*options)
make()
make('install')
def install(self, spec, prefix):
options = ['--prefix=%s' % prefix,
'--without-iv',
'--without-x',
'--without-readline']
if spec.satisfies('+multisend'):
options.append('--with-multisend')
if spec.satisfies('~rx3d'):
options.append('--disable-rx3d')
if spec.satisfies('+mpi'):
options.extend(['MPICC=%s' % spec['mpi'].mpicc,
'MPICXX=%s' % spec['mpi'].mpicxx,
'--with-paranrn'])
else:
options.append('--without-paranrn')
if spec.satisfies('~shared'):
options.extend(['--disable-shared',
'linux_nrnmech=no'])
options.extend(self.get_arch_options(spec))
options.extend(self.get_python_options(spec))
options.extend(self.get_compiler_options(spec))
build = Executable('./build.sh')
build()
with working_dir('build', create=True):
if spec.satisfies('+cross-compile'):
self.build_nmodl(spec, prefix)
srcpath = self.stage.source_path
configure = Executable(join_path(srcpath, 'configure'))
configure(*options)
make('VERBOSE=1')
make('install')
def setup_run_environment(self, env):
neuron_archdir = self.get_neuron_archdir()
env.prepend_path('PATH', join_path(neuron_archdir, 'bin'))
env.prepend_path('LD_LIBRARY_PATH', join_path(neuron_archdir, 'lib'))
def setup_dependent_build_environment(self, env, dependent_spec):
neuron_archdir = self.get_neuron_archdir()
env.prepend_path('PATH', join_path(neuron_archdir, 'bin'))
env.prepend_path('LD_LIBRARY_PATH', join_path(neuron_archdir, 'lib'))
| lgpl-2.1 | 5,630,564,554,303,368,000 | 37.53 | 94 | 0.590449 | false |
juju/python-libjuju | examples/config.py | 1 | 1244 | """
This example:
1. Connects to the current model
2. Resets it
3. Deploys a charm and prints its config and constraints
"""
import logging
from juju.model import Model
from juju import loop
log = logging.getLogger(__name__)
MB = 1
async def main():
model = Model()
# connect to current model with current user, per Juju CLI
await model.connect()
ubuntu_app = await model.deploy(
'cs:mysql',
application_name='mysql',
series='trusty',
channel='stable',
config={
'tuning-level': 'safest',
},
constraints={
'mem': 256 * MB,
},
)
# update and check app config
await ubuntu_app.set_config({'tuning-level': 'fast'})
config = await ubuntu_app.get_config()
assert(config['tuning-level']['value'] == 'fast')
# update and check app constraints
await ubuntu_app.set_constraints({'mem': 512 * MB})
constraints = await ubuntu_app.get_constraints()
assert(constraints['mem'] == 512 * MB)
await model.disconnect()
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
ws_logger = logging.getLogger('websockets.protocol')
ws_logger.setLevel(logging.INFO)
loop.run(main())
| apache-2.0 | -6,192,165,979,067,351,000 | 22.037037 | 62 | 0.620579 | false |
robertbarrett/eventsottawabot | eventsOttawaBot.py | 1 | 7649 | #!/bin/python
import praw
import re
import os
import sqlite3
import time
from config_bot import *
import datetime
header = "Type|Artist(s)/Event|Venue|Price|Time\n---|---|---|---|---|---|---|\n"
footer1 = "------------------------\n[Link to next week's draft post]("
footer2 = "). If you know of something awesome happening, let us know here\n\nErrors and Omissions ABSOLUTELY expected. If you see something wrong, or have any suggestions on how we can make this better, let us know on /r/eventsottawa. This is new, so if you have any suggestions on improving the format, please share\n\nIf you know of any other events: just comment below, I'll try to keep the table updated."
nextLink = "https://www.reddit.com/r/eventsottawa/comments/4fvkrv/58_510_draft_thread/"
loggerLevel = 0
dbName = "eventsOttawa.db"
def isEntry(comment):
commentLines = comment.body.split('\n')
if (len(commentLines) >= 11 and commentLines[0].startswith("Type:")
and commentLines[2].startswith("Event:") and commentLines[4].startswith("Link:")
and commentLines[6].startswith("Venue:") and commentLines[8].startswith("Price:")
and commentLines[10].startswith("Time:")):
return True
else:
return False
def IsPostInDB(endTime):
con = sqlite3.connect(dbName, detect_types=sqlite3.PARSE_DECLTYPES)
with con:
cur = con.cursor()
cur.execute("SELECT count(*) FROM Posts WHERE Posts.EndTime='" + endTime + "';")
if (cur.fetchone()[0] == 0):
return False
else:
return True
# how can i con.close here?
# TODO: TEST
def isEventInDB(commentID):
con = sqlite3.connect(dbName, detect_types=sqlite3.PARSE_DECLTYPES)
with con:
cur = con.cursor()
cur.execute("SELECT count(*) FROM Events WHERE Events.CommentID='" + commentID + "';")
if (cur.fetchone()[0] == 0):
return False
else:
return True
def getTableLine(comment):
commentLines = comment.body.split('\n')
type = re.sub('[|]', '', commentLines[0][5:].lstrip())
event = re.sub('[|]', '', commentLines[2][6:].lstrip())
link = re.sub('[|]', '', commentLines[4][5:].lstrip())
venue = re.sub('[|]', '', commentLines[6][6:].lstrip())
price = re.sub('[|]', '', commentLines[8][6:].lstrip())
time = re.sub('[|]', '', commentLines[10][5:].lstrip())
if link != "":
combEvent = "[" + event + "](" + link + ")"
else:
combEvent = event
seq = (type, combEvent, venue, price, time)
return '|'.join(seq)
def verifyTablesExist():
con = sqlite3.connect(dbName, detect_types=sqlite3.PARSE_DECLTYPES)
with con:
cur = con.cursor()
cur.execute("SELECT count(*) FROM sqlite_master WHERE type='table' and name='Events';")
if (cur.fetchone()[0] == 0):
cur.execute("CREATE TABLE Events(ID INTEGER PRIMARY KEY, PostID TEXT, CommentID TEXT, EventType TEXT, Event TEXT, Link TEXT, Venue TEXT, Price TEXT, EventTime TEXT);")
cur.execute("SELECT count(*) FROM sqlite_master WHERE type='table' and name='Posts';")
if (cur.fetchone()[0] == 0):
cur.execute("CREATE TABLE Posts(ID INTEGER PRIMARY KEY, PostID TEXT, EndTime timestamp, NextPostID TEXT, Whitelist boolean);")
# taken from: http://stackoverflow.com/questions/1829872/how-to-read-datetime-back-from-sqlite-as-a-datetime-instead-of-string-in-python
# TODO: TEST
con.commit()
con.close()
def getDBTableRow(rowName):
if (rowName == "ID"):
return 0
elif (rowName == "PostID"):
return 1
elif (rowName == "CommentID"):
return 2
elif (rowName == "EventType"):
return 3
elif (rowName == "Event"):
return 4
elif (rowName == "Link"):
return 5
elif (rowName == "Venue"):
return 6
elif (rowName == "Price"):
return 7
elif (rowName == "EventTime"):
return 8
def getPostBody(commentID):
textBody = header
con = sqlite3.connect(dbName, detect_types=sqlite3.PARSE_DECLTYPES)
with con:
cur = con.cursor()
cur.execute("SELECT * FROM Events WHERE PostID = '" + commentID + "' ORDER BY EventTime;")
rows = cur.fetchall()
# print rows.keys()
# hmmm, trying to figure out how to use row headings instead of row[0], etc
for row in rows:
if row[4] != "":
combEvent = "[" + row[4] + "](" + row[5] + ")"
else:
combEvent = row[4]
seq = (row[3],combEvent,row[6],row[7],row[8])
textBody = textBody + '|'.join(seq) + '\n'
textBody = textBody + footer1 + nextLink + footer2
con.close()
return textBody
def updateEventTable(comment):
commentLines = comment.body.split('\n')
type = re.sub('[|]', '', commentLines[0][5:].lstrip())
event = re.sub('[|]', '', commentLines[2][6:].lstrip())
link = re.sub('[|]', '', commentLines[4][5:].lstrip())
venue = re.sub('[|]', '', commentLines[6][6:].lstrip())
price = re.sub('[|]', '', commentLines[8][6:].lstrip())
time = re.sub('[|]', '', commentLines[10][5:].lstrip())
con = sqlite3.connect(dbName, detect_types=sqlite3.PARSE_DECLTYPES)
with con:
cur = con.cursor()
cur.execute(
"INSERT INTO Events (PostID, CommentID, EventType, Event, Link, Venue, Price, EventTime) VALUES (?, ?, ?, ?, ?, ?, ?, ?)",
(comment.link_id[3:], comment.id, type, event, link, venue, price, time))
con.commit()
con.close()
def makePost(REDDIT_USERNAME, REDDIT_PASS, user_agent, subreddit, title):
r = praw.Reddit(user_agent=user_agent)
r.login(REDDIT_USERNAME, REDDIT_PASS, disable_warning=True)
r.submit(subreddit, title, text='')
def makeTitle(titleText, startDate, endDate):
return titleText + startDate.strftime("(%b %d - " + endDate.strftime("%b %d)"))
def isAfterThurs4pm(d):
if d.weekday()*24+d.hour < 88: # It is not yet 4pm Thurs. 24 * 3 + 16 = 88
return False
else:
return True
def nextThreeThursday4pms(d):
days_ahead = 3 - d.weekday()
times = []
if isAfterThurs4pm(d):
days_ahead += 7
for x in range(0, 3):
times.append(d.replace(hour=16, minute=0, second=0, microsecond=0) + datetime.timedelta(days_ahead))
days_ahead += 7
return times
#Program code starts here
if not os.path.isfile("config_bot.py"):
print "You must create a config file with your username and password."
exit(1)
# http://pythonforengineers.com/build-a-reddit-bot-part-2-reply-to-posts/
verifyTablesExist()
threadVal = raw_input('Please enter Reddit thread to be watched: ')
user_agent = ("/r/EventsOttawa automation for /u/SergeantAlPowell")
r = praw.Reddit(user_agent=user_agent)
r.login(REDDIT_USERNAME, REDDIT_PASS, disable_warning=True)
while True:
now = datetime.datetime.today()
thursdayFourPM = datetime.datetime(now.year, now.month, now.day, 16, 00) + datetime.timedelta(days=3 - now.weekday())
#if (now > thursdayFourPM) and IsPostInDB(thursdayFourPM):
newEntry = False
submission = r.get_submission(submission_id=threadVal)
submission.replace_more_comments(limit=200, threshold=0)
textBody = header
for comment in submission.comments: # submission.comments are in a forest, so this will only parse the root comment
if (isEntry(comment) and not isEventInDB(comment.id)):
updateEventTable(comment)
newEntry = True
if newEntry:
submission.edit(getPostBody(threadVal))
time.sleep(300)
| mit | 1,773,334,267,225,267,500 | 31.828326 | 410 | 0.615898 | false |
hivesolutions/netius | src/netius/base/agent.py | 1 | 3121 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Hive Netius System
# Copyright (c) 2008-2020 Hive Solutions Lda.
#
# This file is part of Hive Netius System.
#
# Hive Netius System is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by the Apache
# Foundation, either version 2.0 of the License, or (at your option) any
# later version.
#
# Hive Netius System is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License along with
# Hive Netius System. If not, see <http://www.apache.org/licenses/>.
__author__ = "João Magalhães <[email protected]>"
""" The author(s) of the module """
__version__ = "1.0.0"
""" The version of the module """
__revision__ = "$LastChangedRevision$"
""" The revision number of the module """
__date__ = "$LastChangedDate$"
""" The last change date of the module """
__copyright__ = "Copyright (c) 2008-2020 Hive Solutions Lda."
""" The copyright for the module """
__license__ = "Apache License, Version 2.0"
""" The license for the module """
import threading
from . import legacy
from . import observer
class Agent(observer.Observable):
"""
Top level class for the entry point classes of the multiple
client and server protocol implementations.
These classes should contain a series of utilities that facilitate
the interaction with the Protocol, Event Loop and Transport
objects (with the end developer in mind).
Most of the interaction for a simple protocol should be implemented
using static or class methods, avoiding internal object state and
instantiation of the concrete Agent class.
For complex protocols instantiation may be useful to provided extra
flexibility and context for abstract operations.
"""
@classmethod
def cleanup_s(cls):
pass
def cleanup(self, destroy = True):
if destroy: self.destroy()
def destroy(self):
observer.Observable.destroy(self)
class ClientAgent(Agent):
_clients = dict()
""" The global static clients map meant to be reused by the
various static clients that may be created, this client
may leak creating blocking threads that will prevent the
system from exiting correctly, in order to prevent that
the cleanup method should be called """
@classmethod
def cleanup_s(cls):
super(ClientAgent, cls).cleanup_s()
for client in legacy.itervalues(cls._clients):
client.close()
cls._clients.clear()
@classmethod
def get_client_s(cls, *args, **kwargs):
tid = threading.current_thread().ident
client = cls._clients.get(tid, None)
if client: return client
client = cls(*args, **kwargs)
cls._clients[tid] = client
return client
class ServerAgent(Agent):
pass
| apache-2.0 | -7,933,112,070,094,923,000 | 29.826531 | 76 | 0.671369 | false |
risbudaditya/FightFiresWithTheSmokeyBears | flask_twilio/routes.py | 1 | 2868 | import psycopg2
from googlemaps import client
import sys
key= "A GOOGLE API KEY AUTHORIZED ON SEVERAL DIRECTIONS APIS"
myClient = client.Client(key)
def getRoutes(cursor) :
valsToText = []
cursor.execute("""select * from person""")
person=cursor.fetchone()
while(person):
[minCluster,minDirections] = getClosestCluster(cursor,person)
urlForRoute = getURLForRoute(cursor, minDirections,person)
valsToText.append([person[1], urlForRoute])
person = cursor.fetchone()
return valsToText
def getClosestCluster(cursor,person) :
cursor.execute("""select * from cluster""")
cluster = cursor.fetchone()
minDistance = sys.maxsize
minCluster = None
minDirections = None
while(cluster):
directions = myClient.directions(getCoordsForGoogle(person[2],person[3]),getCoordsForGoogle(cluster[1],cluster[2]),alternatives=True)
distance = directions[0].get('legs')[0].get('duration').get('value')
if(distance<minDistance):
minDistance=distance
minCluster=cluster
minDirections = directions
cluster = cursor.fetchone()
return [minCluster, minDirections]
def getCoordsForGoogle(lat,lon):
if abs(lat) > 1000:
lat = lat/1000
if abs(lon) > 1000:
lon = lon/1000
return str(lat)+","+str(lon)
def getURLForRoute(cursor, minDirections, person):
aFireRoute = FireRoute()
aFireRoute.addLatLon(person[2],person[3])
for route in minDirections:
if not routeHasFires(cursor,route):
for step in route['legs'][0]['steps']:
aFireRoute.addLatLon(step['end_location']['lat'],step['end_location']['lng'])
return aFireRoute.getURL()
return ""
def routeHasFires(cursor, route):
cursor.execute("""select * from fire_point""")
fire_point = cursor.fetchone()
while(fire_point):
for step in route.get('legs')[0]['steps']:
endLoc = step['end_location']
if(distFrom(endLoc['lat'], endLoc['lng'],fire_point[1], fire_point[2])<1000):
return True
fire_point = cursor.fetchone()
return False
def distFrom(lat1, lon1, lat2, lon2):
from math import sin, cos, sqrt, atan2, radians
earthRadius = 6371000
dlat = radians(lat2-lat1)
dlon = radians(lon2-lon1)
a = sin(dlat/2)**2 + cos(radians(lat1)) * cos(radians(lat2)) * sin(dlon/2)**2
c = 2 * atan2(sqrt(a), sqrt(1-a))
return earthRadius * c
class FireRoute:
#aList = []
PREFIX = "https://www.google.com/maps/dir"
DELIMITER = "/"
def __init__(self):
self.aList = []
def addLatLon(self,lat, lon):
self.aList.append(getCoordsForGoogle(lat,lon))
def getURL(self):
aURL = FireRoute.PREFIX
for loc in self.aList:
aURL+= FireRoute.DELIMITER + loc
return aURL
| mit | 5,612,678,407,702,821,000 | 31.965517 | 141 | 0.634937 | false |
kholia/Loki | src/loki_gtk.py | 1 | 90804 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# loki_gtk.py
#
# Copyright 2014 Daniel Mende <[email protected]>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import base64
import copy
import hashlib
import sys
import os
import platform
import threading
import time
import traceback
import signal
import string
import struct
import ConfigParser
import dpkt
import dnet
import IPy
import loki
import pygtk
pygtk.require('2.0')
import gobject
import gtk
gtk.gdk.threads_init()
DEBUG = loki.DEBUG
VERSION = loki.VERSION
PLATFORM = loki.PLATFORM
MODULE_PATH = loki.MODULE_PATH
CONFIG_PATH = loki.CONFIG_PATH
DATA_DIR = loki.DATA_DIR
class network_window(object):
BR_INT_ROW = 0
BR_MAC_ROW = 1
BR_ADR_ROW = 2
BR_NET_ROW = 3
BR_ACT_ROW = 4
BR_INT_EDIT_ROW = 5
BR_MAC_EDIT_ROW = 6
BR_ADR_EDIT_ROW = 7
BR_NET_EDIT_ROW = 8
L2_SRC_INT_ROW = 0
L2_SRC_DST_IP_ROW = 1
L2_SRC_TO_MAC_ROW = 2
L2_SRC_ACTIVE_ROW = 3
L2_DST_INT_ROW = 0
L2_DST_SRC_IP_ROW = 1
L2_DST_TO_MAC_ROW = 2
L2_DST_ACTIVE_ROW = 3
L3_SRC_INT_ROW = 0
L3_SRC_SRC_ROW = 1
L3_SRC_DST_ROW = 2
L3_SRC_PROTO_ROW = 3
L3_SRC_DPORT_ROW = 4
L3_SRC_TO_ROW = 5
L3_SRC_ACTIVE_ROW = 6
L3_DST_INT_ROW = 0
L3_DST_SRC_ROW = 1
L3_DST_DST_ROW = 2
L3_DST_PROTO_ROW = 3
L3_DST_DPORT_ROW = 4
L3_DST_TO_ROW = 5
L3_DST_ACTIVE_ROW = 6
DEBUG = False
def __init__(self, parent):
self.parent = parent
self.interfaces_liststore = gtk.ListStore(str)
self.macaddresses_liststore = gtk.ListStore(str)
self.addresses_liststore = gtk.ListStore(str)
self.netmasks_liststore = gtk.ListStore(str)
self.protocols_liststore = gtk.ListStore(str)
self.protocols_liststore.append(["tcp"])
self.protocols_liststore.append(["udp"])
self.ports_liststore = gtk.ListStore(int)
self.br_treestore = gtk.TreeStore(str, str, str, str, bool, bool, bool, bool, bool)
self.l2_src_liststore = gtk.ListStore(str, str, str, bool)
self.l2_dst_liststore = gtk.ListStore(str, str, str, bool)
self.l3_src_liststore = gtk.ListStore(str, str, str, str, int, str, bool)
self.l3_dst_liststore = gtk.ListStore(str, str, str, str, int, str, bool)
self.glade_xml = gtk.glade.XML(DATA_DIR + MODULE_PATH + "/network_config.glade")
dic = { "on_br_add_toolbutton_clicked" : self.on_br_add_toolbutton_clicked,
"on_br_del_toolbutton_clicked" : self.on_br_del_toolbutton_clicked,
"on_br_new_toolbutton_clicked" : self.on_br_new_toolbutton_clicked,
"on_br_run_toolbutton_toggled" : self.on_br_run_toolbutton_toggled,
"on_l2_src_add_toolbutton_clicked" : self.on_l2_src_add_toolbutton_clicked,
"on_l2_src_del_toolbutton_clicked" : self.on_l2_src_del_toolbutton_clicked,
"on_l2_src_new_toolbutton_clicked" : self.on_l2_src_new_toolbutton_clicked,
"on_l2_dst_add_toolbutton_clicked" : self.on_l2_dst_add_toolbutton_clicked,
"on_l2_dst_del_toolbutton_clicked" : self.on_l2_dst_del_toolbutton_clicked,
"on_l2_dst_new_toolbutton_clicked" : self.on_l2_dst_new_toolbutton_clicked,
"on_l2_run_toolbutton_toggled" : self.on_l2_run_toolbutton_toggled,
"on_l3_src_add_toolbutton_clicked" : self.on_l3_src_add_toolbutton_clicked,
"on_l3_src_del_toolbutton_clicked" : self.on_l3_src_del_toolbutton_clicked,
"on_l3_src_new_toolbutton_clicked" : self.on_l3_src_new_toolbutton_clicked,
"on_l3_dst_add_toolbutton_clicked" : self.on_l3_dst_add_toolbutton_clicked,
"on_l3_dst_del_toolbutton_clicked" : self.on_l3_dst_del_toolbutton_clicked,
"on_l3_dst_new_toolbutton_clicked" : self.on_l3_dst_new_toolbutton_clicked,
"on_l3_run_toolbutton_toggled" : self.on_l3_run_toolbutton_toggled,
"on_open_toolbutton_clicked" : self.on_open_toolbutton_clicked,
"on_save_toolbutton_clicked" : self.on_save_toolbutton_clicked,
"on_ok_button_clicked" : self.on_ok_button_clicked,
"on_cancel_button_clicked" : self.on_cancel_button_clicked,
}
self.glade_xml.signal_autoconnect(dic)
self.window = self.glade_xml.get_widget("network_window")
#~ self.window.set_parent(self.parent.window)
self.br_treeview = self.glade_xml.get_widget("br_treeview")
self.br_treeview.set_model(self.br_treestore)
self.br_treeview.set_headers_visible(True)
renderer_combo = gtk.CellRendererCombo()
renderer_combo.set_property("editable", True)
renderer_combo.set_property("model", self.interfaces_liststore)
renderer_combo.set_property("text-column", 0)
renderer_combo.set_property("has-entry", True)
renderer_combo.connect("edited", self.on_br_interfaces_combo_changed)
column = gtk.TreeViewColumn("Interface", renderer_combo, text=self.BR_INT_ROW, editable=self.BR_INT_EDIT_ROW)
self.br_treeview.append_column(column)
renderer_combo = gtk.CellRendererCombo()
renderer_combo.set_property("editable", True)
renderer_combo.set_property("model", self.macaddresses_liststore)
renderer_combo.set_property("text-column", 0)
renderer_combo.set_property("has-entry", True)
renderer_combo.connect("edited", self.on_br_macaddress_combo_changed)
column = gtk.TreeViewColumn("MAC Address", renderer_combo, text=self.BR_MAC_ROW, editable=self.BR_MAC_EDIT_ROW)
self.br_treeview.append_column(column)
renderer_combo = gtk.CellRendererCombo()
renderer_combo.set_property("editable", True)
renderer_combo.set_property("model", self.addresses_liststore)
renderer_combo.set_property("text-column", 0)
renderer_combo.set_property("has-entry", True)
renderer_combo.connect("edited", self.on_br_address_combo_changed)
column = gtk.TreeViewColumn("Address", renderer_combo, text=self.BR_ADR_ROW, editable=self.BR_ADR_EDIT_ROW)
self.br_treeview.append_column(column)
renderer_combo = gtk.CellRendererCombo()
renderer_combo.set_property("editable", True)
renderer_combo.set_property("model", self.netmasks_liststore)
renderer_combo.set_property("text-column", 0)
renderer_combo.set_property("has-entry", True)
renderer_combo.connect("edited", self.on_br_netmasks_combo_changed)
column = gtk.TreeViewColumn("Netmask", renderer_combo, text=self.BR_NET_ROW, editable=self.BR_NET_EDIT_ROW)
self.br_treeview.append_column(column)
renderer_toggle = gtk.CellRendererToggle()
renderer_toggle.connect("toggled", self.on_br_active_toggle_toggled)
column = gtk.TreeViewColumn("Active", renderer_toggle, active=self.BR_ACT_ROW)
self.br_treeview.append_column(column)
self.l2_src_treeview = self.glade_xml.get_widget("l2_src_treeview")
self.l2_src_treeview.set_model(self.l2_src_liststore)
self.l2_src_treeview.set_headers_visible(True)
renderer_combo = gtk.CellRendererCombo()
renderer_combo.set_property("editable", True)
renderer_combo.set_property("model", self.interfaces_liststore)
renderer_combo.set_property("text-column", 0)
renderer_combo.set_property("has-entry", True)
renderer_combo.connect("edited", self.on_l2_src_interfaces_combo_changed)
column = gtk.TreeViewColumn("Interface", renderer_combo, text=self.L2_SRC_INT_ROW)
self.l2_src_treeview.append_column(column)
renderer_combo = gtk.CellRendererCombo()
renderer_combo.set_property("editable", True)
renderer_combo.set_property("model", self.addresses_liststore)
renderer_combo.set_property("text-column", 0)
renderer_combo.set_property("has-entry", True)
renderer_combo.connect("edited", self.on_l2_src_address_combo_changed)
column = gtk.TreeViewColumn("Destination IP address", renderer_combo, text=self.L2_SRC_DST_IP_ROW)
self.l2_src_treeview.append_column(column)
renderer_combo = gtk.CellRendererCombo()
renderer_combo.set_property("editable", True)
renderer_combo.set_property("model", self.macaddresses_liststore)
renderer_combo.set_property("text-column", 0)
renderer_combo.set_property("has-entry", True)
renderer_combo.connect("edited", self.on_l2_src_macaddress_combo_changed)
column = gtk.TreeViewColumn("NAT source MAC", renderer_combo, text=self.L2_SRC_TO_MAC_ROW)
self.l2_src_treeview.append_column(column)
renderer_toggle = gtk.CellRendererToggle()
renderer_toggle.connect("toggled", self.on_l2_src_active_toggle_toggled)
column = gtk.TreeViewColumn("Active", renderer_toggle, active=self.L2_SRC_ACTIVE_ROW)
self.l2_src_treeview.append_column(column)
self.l2_dst_treeview = self.glade_xml.get_widget("l2_dst_treeview")
self.l2_dst_treeview.set_model(self.l2_dst_liststore)
self.l2_dst_treeview.set_headers_visible(True)
renderer_combo = gtk.CellRendererCombo()
renderer_combo.set_property("editable", True)
renderer_combo.set_property("model", self.interfaces_liststore)
renderer_combo.set_property("text-column", 0)
renderer_combo.set_property("has-entry", True)
renderer_combo.connect("edited", self.on_l2_dst_interfaces_combo_changed)
column = gtk.TreeViewColumn("Interface", renderer_combo, text=self.L2_DST_INT_ROW)
self.l2_dst_treeview.append_column(column)
renderer_combo = gtk.CellRendererCombo()
renderer_combo.set_property("editable", True)
renderer_combo.set_property("model", self.addresses_liststore)
renderer_combo.set_property("text-column", 0)
renderer_combo.set_property("has-entry", True)
renderer_combo.connect("edited", self.on_l2_dst_address_combo_changed)
column = gtk.TreeViewColumn("Source IP address", renderer_combo, text=self.L2_DST_SRC_IP_ROW)
self.l2_dst_treeview.append_column(column)
renderer_combo = gtk.CellRendererCombo()
renderer_combo.set_property("editable", True)
renderer_combo.set_property("model", self.macaddresses_liststore)
renderer_combo.set_property("text-column", 0)
renderer_combo.set_property("has-entry", True)
renderer_combo.connect("edited", self.on_l2_dst_macaddress_combo_changed)
column = gtk.TreeViewColumn("NAT destination MAC", renderer_combo, text=self.L2_DST_TO_MAC_ROW)
self.l2_dst_treeview.append_column(column)
renderer_toggle = gtk.CellRendererToggle()
renderer_toggle.connect("toggled", self.on_l2_dst_active_toggle_toggled)
column = gtk.TreeViewColumn("Active", renderer_toggle, active=self.L2_DST_ACTIVE_ROW)
self.l2_dst_treeview.append_column(column)
self.l3_src_treeview = self.glade_xml.get_widget("l3_src_treeview")
self.l3_src_treeview.set_model(self.l3_src_liststore)
self.l3_src_treeview.set_headers_visible(True)
renderer_combo = gtk.CellRendererCombo()
renderer_combo.set_property("editable", True)
renderer_combo.set_property("model", self.interfaces_liststore)
renderer_combo.set_property("text-column", 0)
renderer_combo.set_property("has-entry", True)
renderer_combo.connect("edited", self.on_l3_src_interfaces_combo_changed)
column = gtk.TreeViewColumn("Interface", renderer_combo, text=self.L3_SRC_INT_ROW)
self.l3_src_treeview.append_column(column)
renderer_combo = gtk.CellRendererCombo()
renderer_combo.set_property("editable", True)
renderer_combo.set_property("model", self.addresses_liststore)
renderer_combo.set_property("text-column", 0)
renderer_combo.set_property("has-entry", True)
renderer_combo.connect("edited", self.on_l3_src_saddress_combo_changed)
column = gtk.TreeViewColumn("Source IP address", renderer_combo, text=self.L3_SRC_SRC_ROW)
self.l3_src_treeview.append_column(column)
renderer_combo = gtk.CellRendererCombo()
renderer_combo.set_property("editable", True)
renderer_combo.set_property("model", self.addresses_liststore)
renderer_combo.set_property("text-column", 0)
renderer_combo.set_property("has-entry", True)
renderer_combo.connect("edited", self.on_l3_src_daddress_combo_changed)
column = gtk.TreeViewColumn("Destination IP address", renderer_combo, text=self.L3_SRC_DST_ROW)
self.l3_src_treeview.append_column(column)
renderer_combo = gtk.CellRendererCombo()
renderer_combo.set_property("editable", True)
renderer_combo.set_property("model", self.protocols_liststore)
renderer_combo.set_property("text-column", 0)
renderer_combo.set_property("has-entry", True)
renderer_combo.connect("edited", self.on_l3_src_proto_combo_changed)
column = gtk.TreeViewColumn("Protocol", renderer_combo, text=self.L3_SRC_PROTO_ROW)
self.l3_src_treeview.append_column(column)
renderer_combo = gtk.CellRendererCombo()
renderer_combo.set_property("editable", True)
renderer_combo.set_property("model", self.ports_liststore)
renderer_combo.set_property("text-column", 0)
renderer_combo.set_property("has-entry", True)
renderer_combo.connect("edited", self.on_l3_src_dport_combo_changed)
column = gtk.TreeViewColumn("Destination port", renderer_combo, text=self.L3_SRC_DPORT_ROW)
self.l3_src_treeview.append_column(column)
renderer_combo = gtk.CellRendererCombo()
renderer_combo.set_property("editable", True)
renderer_combo.set_property("model", self.addresses_liststore)
renderer_combo.set_property("text-column", 0)
renderer_combo.set_property("has-entry", True)
renderer_combo.connect("edited", self.on_l3_src_toaddress_combo_changed)
column = gtk.TreeViewColumn("To IP address", renderer_combo, text=self.L3_SRC_TO_ROW)
self.l3_src_treeview.append_column(column)
renderer_toggle = gtk.CellRendererToggle()
renderer_toggle.connect("toggled", self.on_l3_src_active_toggle_toggled)
column = gtk.TreeViewColumn("Active", renderer_toggle, active=self.L3_SRC_ACTIVE_ROW)
self.l3_src_treeview.append_column(column)
self.l3_dst_treeview = self.glade_xml.get_widget("l3_dst_treeview")
self.l3_dst_treeview.set_model(self.l3_dst_liststore)
self.l3_dst_treeview.set_headers_visible(True)
renderer_combo = gtk.CellRendererCombo()
renderer_combo.set_property("editable", True)
renderer_combo.set_property("model", self.interfaces_liststore)
renderer_combo.set_property("text-column", 0)
renderer_combo.set_property("has-entry", True)
renderer_combo.connect("edited", self.on_l3_dst_interfaces_combo_changed)
column = gtk.TreeViewColumn("Interface", renderer_combo, text=self.L3_DST_INT_ROW)
self.l3_dst_treeview.append_column(column)
renderer_combo = gtk.CellRendererCombo()
renderer_combo.set_property("editable", True)
renderer_combo.set_property("model", self.addresses_liststore)
renderer_combo.set_property("text-column", 0)
renderer_combo.set_property("has-entry", True)
renderer_combo.connect("edited", self.on_l3_dst_saddress_combo_changed)
column = gtk.TreeViewColumn("Source IP address", renderer_combo, text=self.L3_DST_SRC_ROW)
self.l3_dst_treeview.append_column(column)
renderer_combo = gtk.CellRendererCombo()
renderer_combo.set_property("editable", True)
renderer_combo.set_property("model", self.addresses_liststore)
renderer_combo.set_property("text-column", 0)
renderer_combo.set_property("has-entry", True)
renderer_combo.connect("edited", self.on_l3_dst_daddress_combo_changed)
column = gtk.TreeViewColumn("Destination IP address", renderer_combo, text=self.L3_DST_DST_ROW)
self.l3_dst_treeview.append_column(column)
renderer_combo = gtk.CellRendererCombo()
renderer_combo.set_property("editable", True)
renderer_combo.set_property("model", self.protocols_liststore)
renderer_combo.set_property("text-column", 0)
renderer_combo.set_property("has-entry", True)
renderer_combo.connect("edited", self.on_l3_dst_proto_combo_changed)
column = gtk.TreeViewColumn("Protocol", renderer_combo, text=self.L3_DST_PROTO_ROW)
self.l3_dst_treeview.append_column(column)
renderer_combo = gtk.CellRendererCombo()
renderer_combo.set_property("editable", True)
renderer_combo.set_property("model", self.ports_liststore)
renderer_combo.set_property("text-column", 0)
renderer_combo.set_property("has-entry", True)
renderer_combo.connect("edited", self.on_l3_dst_dport_combo_changed)
column = gtk.TreeViewColumn("Destination port", renderer_combo, text=self.L3_DST_DPORT_ROW)
self.l3_dst_treeview.append_column(column)
renderer_combo = gtk.CellRendererCombo()
renderer_combo.set_property("editable", True)
renderer_combo.set_property("model", self.addresses_liststore)
renderer_combo.set_property("text-column", 0)
renderer_combo.set_property("has-entry", True)
renderer_combo.connect("edited", self.on_l3_dst_toaddress_combo_changed)
column = gtk.TreeViewColumn("To IP address", renderer_combo, text=self.L3_DST_TO_ROW)
self.l3_dst_treeview.append_column(column)
renderer_toggle = gtk.CellRendererToggle()
renderer_toggle.connect("toggled", self.on_l3_dst_active_toggle_toggled)
column = gtk.TreeViewColumn("Active", renderer_toggle, active=self.L3_DST_ACTIVE_ROW)
self.l3_dst_treeview.append_column(column)
self.filechooser = gtk.FileChooserDialog("Open..",
None,
gtk.FILE_CHOOSER_ACTION_OPEN,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OPEN, gtk.RESPONSE_OK))
self.filechooser.set_default_response(gtk.RESPONSE_OK)
filter = gtk.FileFilter()
filter.set_name("All files")
filter.add_pattern("*")
self.filechooser.add_filter(filter)
filter = gtk.FileFilter()
filter.set_name("Config file")
filter.add_pattern("*.cfg")
self.filechooser.add_filter(filter)
self.devices = self.get_devices()
def on_br_interfaces_combo_changed(self, widget, path, text):
self.br_treestore[path][self.BR_INT_ROW] = text
self.add_interfaces_store(text)
try:
mac = dnet.eth_ntoa(self.devices[text]["mac"])
self.br_treestore[path][self.BR_MAC_ROW] = mac
except:
pass
def on_br_macaddress_combo_changed(self, widget, path, text):
if self.check_macaddress(text):
self.br_treestore[path][self.BR_MAC_ROW] = text
self.add_macaddresses_store(text)
def on_br_address_combo_changed(self, widget, path, text):
if self.check_address(text):
self.br_treestore[path][self.BR_ADR_ROW] = text
self.add_addresses_store(text)
def on_br_netmasks_combo_changed(self, widget, path, text):
if self.check_address(text):
self.br_treestore[path][self.BR_NET_ROW] = text
self.add_netmasks_store(text)
def on_br_active_toggle_toggled(self, widget, path):
self.br_treestore[path][self.BR_ACT_ROW] = not self.br_treestore[path][self.BR_ACT_ROW]
def on_l2_src_interfaces_combo_changed(self, widget, path, text):
self.l2_src_liststore[path][self.L2_SRC_INT_ROW] = text
self.add_interfaces_store(text)
def on_l2_src_address_combo_changed(self, widget, path, text):
if self.check_address(text):
self.l2_src_liststore[path][self.L2_SRC_DST_IP_ROW] = text
self.add_addresses_store(text)
def on_l2_src_macaddress_combo_changed(self, widget, path, text):
if self.check_macaddress(text):
self.l2_src_liststore[path][self.L2_DST_TO_MAC_ROW] = text
self.add_macaddresses_store(text)
def on_l2_src_active_toggle_toggled(self, widget, path):
self.l2_src_liststore[path][self.L2_SRC_ACTIVE_ROW] = not self.l2_src_liststore[path][self.L2_SRC_ACTIVE_ROW]
def on_l2_dst_interfaces_combo_changed(self, widget, path, text):
self.l2_dst_liststore[path][self.L2_DST_INT_ROW] = text
self.add_interfaces_store(text)
def on_l2_dst_address_combo_changed(self, widget, path, text):
if self.check_address(text):
self.l2_dst_liststore[path][self.L2_DST_SRC_IP_ROW] = text
self.add_addresses_store(text)
def on_l2_dst_macaddress_combo_changed(self, widget, path, text):
if self.check_macaddress(text):
self.l2_dst_liststore[path][self.L2_DST_TO_MAC_ROW] = text
self.add_macaddresses_store(text)
def on_l2_dst_active_toggle_toggled(self, widget, path):
self.l2_dst_liststore[path][self.L2_DST_ACTIVE_ROW] = not self.l2_dst_liststore[path][self.L2_DST_ACTIVE_ROW]
def on_l3_src_interfaces_combo_changed(self, widget, path, text):
self.l3_src_liststore[path][self.L3_SRC_INT_ROW] = text
self.add_interfaces_store(text)
def on_l3_src_saddress_combo_changed(self, widget, path, text):
if self.check_address(text):
self.l3_src_liststore[path][self.L3_SRC_SRC_ROW] = text
self.add_addresses_store(text)
def on_l3_src_daddress_combo_changed(self, widget, path, text):
if self.check_address(text):
self.l3_src_liststore[path][self.L3_SRC_DST_ROW] = text
self.add_addresses_store(text)
def on_l3_src_proto_combo_changed(self, widget, path, text):
self.l3_src_liststore[path][self.L3_SRC_PROTO_ROW] = text
self.add_protocols_store(text)
def on_l3_src_dport_combo_changed(self, widget, path, text):
if self.check_port(text):
port = int(text)
self.l3_src_liststore[path][self.L3_SRC_DPORT_ROW] = port
self.add_ports_store(port)
def on_l3_src_toaddress_combo_changed(self, widget, path, text):
if self.check_address(text):
self.l3_src_liststore[path][self.L3_SRC_TO_ROW] = text
self.add_addresses_store(text)
def on_l3_src_active_toggle_toggled(self, widget, path):
self.l3_src_liststore[path][self.L3_SRC_ACTIVE_ROW] = not self.l3_src_liststore[path][self.L3_SRC_ACTIVE_ROW]
def on_l3_dst_interfaces_combo_changed(self, widget, path, text):
self.l3_dst_liststore[path][self.L3_DST_INT_ROW] = text
self.add_interfaces_store(text)
def on_l3_dst_saddress_combo_changed(self, widget, path, text):
if self.check_address(text):
self.l3_dst_liststore[path][self.L3_DST_SRC_ROW] = text
self.add_addresses_store(text)
def on_l3_dst_daddress_combo_changed(self, widget, path, text):
if self.check_address(text):
self.l3_dst_liststore[path][self.L3_DST_DST_ROW] = text
self.add_addresses_store(text)
def on_l3_dst_proto_combo_changed(self, widget, path, text):
self.l3_dst_liststore[path][self.L3_DST_PROTO_ROW] = text
self.add_protocols_store(text)
def on_l3_dst_dport_combo_changed(self, widget, path, text):
if self.check_port(text):
port = int(text)
self.l3_dst_liststore[path][self.L3_DST_DPORT_ROW] = port
self.add_ports_store(port)
def on_l3_dst_toaddress_combo_changed(self, widget, path, text):
if self.check_address(text):
self.l3_dst_liststore[path][self.L3_DST_TO_ROW] = text
self.add_addresses_store(text)
def on_l3_dst_active_toggle_toggled(self, widget, path):
self.l3_dst_liststore[path][self.L3_DST_ACTIVE_ROW] = not self.l3_dst_liststore[path][self.L3_DST_ACTIVE_ROW]
def add_interfaces_store(self, interf):
for row in self.interfaces_liststore:
if row[0] == interf:
return
self.interfaces_liststore.append([interf])
def add_macaddresses_store(self, addr):
for row in self.macaddresses_liststore:
if row[0] == addr:
return
self.macaddresses_liststore.append([addr])
def add_addresses_store(self, addr):
for row in self.addresses_liststore:
if row[0] == addr:
return
self.addresses_liststore.append([addr])
def add_netmasks_store(self, mask):
for row in self.netmasks_liststore:
if row[0] == mask:
return
self.netmasks_liststore.append([mask])
def add_protocols_store(self, proto):
for row in self.protocols_liststore:
if row[0] == proto:
return
self.protos_liststore.append([proto])
def add_ports_store(self, port):
for row in self.ports_liststore:
if row[0] == port:
return
self.ports_liststore.append([port])
def check_macaddress(self, address):
try:
dnet.eth_aton(address)
except:
self.msg("MAC Address invalid")
return False
return True
def check_address(self, address):
try:
dnet.ip_aton(address)
except:
try:
dnet.ip6_aton(address)
except:
self.msg("Address invalid")
return False
return True
def check_port(self, port):
if type(port) == str:
try:
port = int(port)
except:
self.msg("Port invalid, no int")
return False
if port < 0 or port > 65535:
self.msg("Port invalid, out of range")
return False
return True
def on_br_add_toolbutton_clicked(self, btn):
select = self.br_treeview.get_selection()
(model, paths) = select.get_selected_rows()
if len(paths) > 0:
for i in paths:
dev = self.select_device()
if not dev is None:
self.br_treestore.append(model.get_iter(i), [dev, dnet.eth_ntoa(self.devices[dev]["mac"]), "", "", True, True, False, False, False])
else:
self.br_treestore.append(None, ["br23", "00:01:02:03:04:05", "0.0.0.0", "0.0.0.0", True, True, True, True, True])
def on_br_del_toolbutton_clicked(self, btn):
select = self.br_treeview.get_selection()
(model, paths) = select.get_selected_rows()
if len(paths) > 0:
for i in paths:
del self.br_treestore[i]
def on_br_new_toolbutton_clicked(self, btn):
#~ self.interfaces_liststore.clear()
#~ self.macaddresses_liststore.clear()
#~ self.addresses_liststore.clear()
#~ self.netmasks_liststore.clear()
self.br_treestore.clear()
#~ self.devices = self.get_devices()
def on_br_run_toolbutton_toggled(self, btn):
if btn.get_active():
self.execute_br()
else:
self.unexecute_br()
def on_l2_src_add_toolbutton_clicked(self, btn):
self.l2_src_liststore.append(["br23", "0.0.0.0", "00:01:02:03:04:05", True])
def on_l2_src_del_toolbutton_clicked(self, btn):
select = self.l2_src_treeview.get_selection()
(model, paths) = select.get_selected_rows()
if len(paths) > 0:
for i in paths:
del self.l2_src_liststore[i]
def on_l2_src_new_toolbutton_clicked(self, btn):
self.l2_src_liststore.clear()
def on_l2_dst_add_toolbutton_clicked(self, btn):
self.l2_dst_liststore.append(["br23", "0.0.0.0", "00:01:02:03:04:05", True])
def on_l2_dst_del_toolbutton_clicked(self, btn):
select = self.l2_dst_treeview.get_selection()
(model, paths) = select.get_selected_rows()
if len(paths) > 0:
for i in paths:
del self.l2_dst_liststore[i]
def on_l2_dst_new_toolbutton_clicked(self, btn):
self.l2_dst_liststore.clear()
def on_l2_run_toolbutton_toggled(self, btn):
if btn.get_active():
self.execute_l2()
else:
self.unexecute_l2()
def on_l3_src_add_toolbutton_clicked(self, btn):
self.l3_src_liststore.append(["br23", "0.0.0.0", "0.0.0.0", "tcp", 80, "0.0.0.0", True])
def on_l3_src_del_toolbutton_clicked(self, btn):
select = self.l3_src_treeview.get_selection()
(model, paths) = select.get_selected_rows()
if len(paths) > 0:
for i in paths:
del self.l3_src_liststore[i]
def on_l3_src_new_toolbutton_clicked(self, btn):
self.l3_src_liststore.clear()
def on_l3_dst_add_toolbutton_clicked(self, btn):
self.l3_dst_liststore.append(["br23", "0.0.0.0", "0.0.0.0", "tcp", 80, "0.0.0.0", True])
def on_l3_dst_del_toolbutton_clicked(self, btn):
select = self.l3_dst_treeview.get_selection()
(model, paths) = select.get_selected_rows()
if len(paths) > 0:
for i in paths:
del self.l3_dst_liststore[i]
def on_l3_dst_new_toolbutton_clicked(self, btn):
self.l3_dst_liststore.clear()
def on_l3_run_toolbutton_toggled(self, btn):
if btn.get_active():
self.execute_l3()
else:
self.unexecute_l3()
def on_open_toolbutton_clicked(self, btn):
response = self.filechooser.run()
if response == gtk.RESPONSE_OK:
self.load_config(self.filechooser.get_filename())
self.filechooser.hide()
def on_save_toolbutton_clicked(self, btn):
response = self.filechooser.run()
if response == gtk.RESPONSE_OK:
self.save_config(self.filechooser.get_filename())
self.filechooser.hide()
def on_ok_button_clicked(self, btn):
self.save_config(CONFIG_PATH + "/network.cfg")
self.execute_br()
self.execute_l2()
self.execute_l3()
self.parent.netcfg_configured = True
self.parent.select_interface()
self.window.hide()
def on_cancel_button_clicked(self, btn):
self.window.hide()
def read_br_config(self):
config = []
i = self.br_treestore.get_iter_root()
while i != None:
(act, dev, mac, ip, mask) = self.br_treestore.get(i, self.BR_ACT_ROW, self.BR_INT_ROW, self.BR_MAC_ROW, self.BR_ADR_ROW, self.BR_NET_ROW)
if not act:
i = self.br_treestore.iter_next(i)
continue
if dev in self.devices:
self.msg("Device '%s' already exists" % dev)
i = self.br_treestore.iter_next(i)
continue
if not self.br_treestore.iter_has_child(i):
self.msg("No Interfaces added to bridge")
i = self.br_treestore.iter_next(i)
continue
ifs = []
j = self.br_treestore.iter_children(i)
while j != None:
(interf, active) = self.br_treestore.get(j, self.BR_INT_ROW, self.BR_ACT_ROW)
if interf not in self.devices:
self.msg("Interface not found")
j = self.br_treestore.iter_next(j)
continue
ifs.append({ "dev" : interf,
"act" : active })
j = self.br_treestore.iter_next(j)
i = self.br_treestore.iter_next(i)
config.append({ "act" : act,
"dev" : dev,
"mac" : mac,
"ip" : ip,
"mask": mask,
"ifs" : ifs })
return config
def read_l2_config(self):
src = [ { "act" : i[self.L2_SRC_ACTIVE_ROW],
"dev" : i[self.L2_SRC_INT_ROW],
"ip" : i[self.L2_SRC_DST_IP_ROW],
"mac" : i[self.L2_SRC_TO_MAC_ROW]
} for i in self.l2_src_liststore ]
dst = [ { "act" : i[self.L2_DST_ACTIVE_ROW],
"dev" : i[self.L2_DST_INT_ROW],
"ip" : i[self.L2_DST_SRC_IP_ROW],
"mac" : i[self.L2_DST_TO_MAC_ROW]
} for i in self.l2_dst_liststore ]
return { "src" : src, "dst" : dst }
def read_l3_config(self):
src = [ { "act" : i[self.L3_SRC_ACTIVE_ROW],
"dev" : i[self.L3_SRC_INT_ROW],
"src" : i[self.L3_SRC_SRC_ROW],
"dst" : i[self.L3_SRC_DST_ROW],
"proto": i[self.L3_SRC_PROTO_ROW],
"dport": i[self.L3_SRC_DPORT_ROW],
"to" : i[self.L3_SRC_TO_ROW]
} for i in self.l3_src_liststore ]
dst = [ { "act" : i[self.L3_DST_ACTIVE_ROW],
"dev" : i[self.L3_DST_INT_ROW],
"src" : i[self.L3_DST_SRC_ROW],
"dst" : i[self.L3_DST_DST_ROW],
"proto": i[self.L3_DST_PROTO_ROW],
"dport": i[self.L3_DST_DPORT_ROW],
"to" : i[self.L3_DST_TO_ROW]
} for i in self.l3_dst_liststore ]
return { "src" : src, "dst" : dst }
def load_config(self, filename):
parser = ConfigParser.RawConfigParser()
try:
parser.read(filename)
except Exception, e:
self.log("Can't read config: %s" %e)
return
br_config = []
l2_config = { "src" : [], "dst" : []}
l3_config = { "src" : [], "dst" : []}
for i in parser.sections():
if i.startswith("br_"):
br_config.append({ "act" : parser.getboolean(i, "active"),
"dev" : parser.get(i, "device"),
"mac" : parser.get(i, "mac"),
"ip" : parser.get(i, "address"),
"mask": parser.get(i, "netmask"),
"ifs" : [ { "dev" : j.split("_")[1],
"act" : parser.get(i, j) }
for j in parser.options(i) if j.startswith("if_") ]
})
elif i.startswith("l2_src"):
l2_config["src"].append( { "act" : parser.getboolean(i, "active"),
"dev" : parser.get(i, "device"),
"ip" : parser.get(i, "address"),
"mac" : parser.get(i, "mac") })
elif i.startswith("l2_dst_"):
l2_config["dst"].append( { "act" : parser.getboolean(i, "active"),
"dev" : parser.get(i, "device"),
"ip" : parser.get(i, "address"),
"mac" : parser.get(i, "mac") })
elif i.startswith("l3_src"):
l3_config["src"].append( { "act" : parser.getboolean(i, "active"),
"dev" : parser.get(i, "device"),
"src" : parser.get(i, "source"),
"dst" : parser.get(i, "destination"),
"proto": parser.get(i, "protocol"),
"dport": parser.getint(i, "dport"),
"to" : parser.get(i, "to") })
elif i.startswith("l3_dst"):
l3_config["dst"].append( { "act" : parser.getboolean(i, "active"),
"dev" : parser.get(i, "device"),
"src" : parser.get(i, "source"),
"dst" : parser.get(i, "destination"),
"proto": parser.get(i, "protocol"),
"dport": parser.getint(i, "dport"),
"to" : parser.get(i, "to") })
self.set_br_config(br_config)
self.set_l2_config(l2_config)
self.set_l3_config(l3_config)
def set_br_config(self, config):
for i in config:
br = self.br_treestore.append(None, [i["dev"], i["mac"], i["ip"], i["mask"], i["act"], True, True, True, True])
for j in i["ifs"]:
self.br_treestore.append(br, [j["dev"], "", "", "", j["act"], True, False, False, False])
def set_l2_config(self, config):
for i in config["src"]:
self.l2_src_liststore.append([i["dev"], i["ip"], i["mac"], i["act"]])
for i in config["dst"]:
self.l2_dst_liststore.append([i["dev"], i["ip"], i["mac"], i["act"]])
def set_l3_config(self, config):
for i in config["src"]:
self.l3_src_liststore.append([i["dev"], i["src"], i["dst"], i["proto"], i["dport"], i["to"], i["act"]])
for i in config["dst"]:
self.l3_dst_liststore.append([i["dev"], i["src"], i["dst"], i["proto"], i["dport"], i["to"], i["act"]])
def save_config(self, filename):
parser = ConfigParser.RawConfigParser()
br_config = self.read_br_config()
for i in br_config:
parser.add_section("br_%s" % i["dev"])
parser.set("br_%s" % i["dev"], "active", i["act"])
parser.set("br_%s" % i["dev"], "device", i["dev"])
parser.set("br_%s" % i["dev"], "mac", i["mac"])
parser.set("br_%s" % i["dev"], "address", i["ip"])
parser.set("br_%s" % i["dev"], "netmask", i["mask"])
for j in i["ifs"]:
parser.set("br_%s" % i["dev"], "if_%s" % j["dev"], j["act"])
l2_config = self.read_l2_config()
for i in l2_config["src"]:
m = hashlib.sha256()
m.update(i["dev"] + i["ip"] + i["mac"])
uid = m.hexdigest()
parser.add_section("l2_src_%s" % uid)
parser.set("l2_src_%s" % uid, "active", i["act"])
parser.set("l2_src_%s" % uid, "device", i["dev"])
parser.set("l2_src_%s" % uid, "address", i["ip"])
parser.set("l2_src_%s" % uid, "mac", i["mac"])
for i in l2_config["dst"]:
m = hashlib.sha256()
m.update(i["dev"] + i["ip"] + i["mac"])
uid = m.hexdigest()
parser.add_section("l2_dst_%s" % uid)
parser.set("l2_dst_%s" % uid, "active", i["act"])
parser.set("l2_dst_%s" % uid, "device", i["dev"])
parser.set("l2_dst_%s" % uid, "address", i["ip"])
parser.set("l2_dst_%s" % uid, "mac", i["mac"])
l3_config = self.read_l3_config()
for i in l3_config["src"]:
m = hashlib.sha256()
m.update(i["dev"] + i["src"] + i["dst"] + i["proto"] + "%d" % i["dport"])
uid = m.hexdigest()
parser.add_section("l3_src_%s" % uid)
parser.set("l3_src_%s" % uid, "active", i["act"])
parser.set("l3_src_%s" % uid, "device", i["dev"])
parser.set("l3_src_%s" % uid, "source", i["src"])
parser.set("l3_src_%s" % uid, "destination", i["dst"])
parser.set("l3_src_%s" % uid, "protocol", i["proto"])
parser.set("l3_src_%s" % uid, "dport", i["dport"])
parser.set("l3_src_%s" % uid, "to", i["to"])
for i in l3_config["dst"]:
m = hashlib.sha256()
m.update(i["dev"] + i["src"] + i["dst"] + i["proto"] + "%d" % i["dport"])
uid = m.hexdigest()
parser.add_section("l3_dst_%s" % uid)
parser.set("l3_dst_%s" % uid, "active", i["act"])
parser.set("l3_dst_%s" % uid, "device", i["dev"])
parser.set("l3_dst_%s" % uid, "source", i["src"])
parser.set("l3_dst_%s" % uid, "destination", i["dst"])
parser.set("l3_dst_%s" % uid, "protocol", i["proto"])
parser.set("l3_dst_%s" % uid, "dport", i["dport"])
parser.set("l3_dst_%s" % uid, "to", i["to"])
if os.path.exists(filename):
#ask for replacement
pass
with open(filename, 'wb') as configfile:
parser.write(configfile)
self.log("Saved bridge configuration to '%s'" % filename)
def execute_br(self):
br_config = self.read_br_config()
for i in br_config:
if i["act"]:
dev = i["dev"]
self.log("Creating bridge interface '%s'" % dev)
cmd = "brctl addbr %s" % dev
if not self.DEBUG:
os.system(cmd)
else:
self.log(cmd)
for j in i["ifs"]:
if j["act"]:
d = j["dev"]
self.log("Adding interface '%s' to bridge '%s'" % (d, dev))
cmd = "ip link set dev %s up" % d
if not self.DEBUG:
os.system(cmd)
else:
self.log(cmd)
cmd = "brctl addif %s %s" % (dev, d)
if not self.DEBUG:
os.system(cmd)
else:
self.log(cmd)
mac = i["mac"]
self.log("Setting MAC address '%s' on bridge '%s'" % (mac, dev))
cmd = "ip link set dev %s address %s" % (dev, mac)
if not self.DEBUG:
os.system(cmd)
else:
self.log(cmd)
ip = i["ip"]
mask = len(IPy.IP(i["mask"]).strBin().replace("0", ""))
self.log("Setting IP address '%s' on bridge '%s'" % (ip, dev))
cmd = "ip addr add %s/%d dev %s" % (ip, mask, dev)
if not self.DEBUG:
os.system(cmd)
else:
self.log(cmd)
self.log("Setting IPv6 link local address 'fe80::%s' on bridge '%s'" % (mac, dev))
cmd = "ip addr add fe80::%s/64 dev %s" % (mac, dev)
if not self.DEBUG:
os.system(cmd)
else:
self.log(cmd)
self.log("Setting link on bridge '%s' up" % dev)
cmd = "ip link set dev %s up" % dev
if not self.DEBUG:
os.system(cmd)
else:
self.log(cmd)
def unexecute_br(self):
br_config = self.read_br_config()
for i in br_config:
if i["act"]:
dev = i["dev"]
self.log("Setting link on bridge '%s' down" % dev)
cmd = "ip link set dev %s down" % dev
if not self.DEBUG:
os.system(cmd)
else:
self.log(cmd)
ip = i["ip"]
mask = len(IPy.IP(i["mask"]).strBin().replace("0", ""))
self.log("Removing IP address '%s' from bridge '%s'" % (ip, dev))
cmd = "ip addr del %s/%d dev %s" % (ip, mask, dev)
if not self.DEBUG:
os.system(cmd)
else:
self.log(cmd)
for j in i["ifs"]:
if j["act"]:
d = j["dev"]
self.log("Removing interface '%s' from bridge '%s'" % (d, dev))
cmd = "brctl delif %s %s" % (dev, d)
if not self.DEBUG:
os.system(cmd)
else:
self.log(cmd)
self.log("Removing bridge interface '%s'" % dev)
cmd = "brctl delbr %s" % dev
if not self.DEBUG:
os.system(cmd)
else:
self.log(cmd)
def execute_l2(self):
l2_config = self.read_l2_config()
for i in l2_config["src"]:
self.log("Activating L2 Source NAT on '%s'" % i["dev"])
cmd = "ebtables -t nat -A PREROUTING -p ip --ip-destination %s --in-interface %s -j snat --to-source %s" % (i["ip"], i["dev"], i["mac"])
if not self.DEBUG:
os.system(cmd)
else:
self.log(cmd)
for i in l2_config["dst"]:
self.log("Activating L2 Destination NAT on '%s'" % i["dev"])
cmd = "ebtables -t nat -A PREROUTING -p ip --ip-source %s --in-interface %s -j dnat --to-destination %s" % (i["ip"], i["dev"], i["mac"])
if not self.DEBUG:
os.system(cmd)
else:
self.log(cmd)
def unexecute_l2(self):
l2_config = self.read_l2_config()
for i in l2_config["src"]:
self.log("Deleting L2 Source NAT on '%s'" % i["dev"])
cmd = "ebtables -t nat -D PREROUTING -p ip --ip-destination %s --in-interface %s -j snat --to-source %s" % (i["ip"], i["dev"], i["mac"])
if not self.DEBUG:
os.system(cmd)
else:
self.log(cmd)
for i in l2_config["dst"]:
self.log("Deleting L2 Destination NAT on '%s'" % i["dev"])
cmd = "ebtables -t nat -D PREROUTING -p ip --ip-source %s --in-interface %s -j dnat --to-destination %s" % (i["ip"], i["dev"], i["mac"])
if not self.DEBUG:
os.system(cmd)
else:
self.log(cmd)
def execute_l3(self):
l3_config = self.read_l3_config()
for i in l3_config["src"]:
self.log("Activating L3 Source NAT on '%s'" % i["dev"])
cmd = "iptables -t nat -A POSTROUTING -p %s --dport %d -o %s -jSNAT --to %s --destination %s --source %s" % (i["proto"], i["dport"], i["dev"], i["to"], i["dst"], i["src"])
if not self.DEBUG:
os.system(cmd)
else:
self.log(cmd)
for i in l3_config["src"]:
self.log("Activating L3 Destination NAT on '%s'" % i["dev"])
cmd = "iptables -t nat -A PREROUTING -p %s --dport %d -i %s -jSNAT --to %s --destination %s --source %s" % (i["proto"], i["dport"], i["dev"], i["to"], i["dst"], i["src"])
if not self.DEBUG:
os.system(cmd)
else:
self.log(cmd)
def unexecute_l3(self):
l3_config = self.read_l3_config()
for i in l3_config["src"]:
self.log("Deleting L3 Source NAT on '%s'" % i["dev"])
cmd = "iptables -t nat -D POSTROUTING -p %s --dport %d -o %s -jSNAT --to %s --destination %s --source %s" % (i["proto"], i["dport"], i["dev"], i["to"], i["dst"], i["src"])
if not self.DEBUG:
os.system(cmd)
else:
self.log(cmd)
for i in l3_config["src"]:
self.log("Deleting L3 Destination NAT on '%s'" % i["dev"])
cmd = "iptables -t nat -D PREROUTING -p %s --dport %d -i %s -jSNAT --to %s --destination %s --source %s" % (i["proto"], i["dport"], i["dev"], i["to"], i["dst"], i["src"])
if not self.DEBUG:
os.system(cmd)
else:
self.log(cmd)
def select_device(self):
def on_network_combobox_changed(box, label):
if PLATFORM == "Windows":
descr = box.get_active_text()
dev = None
for i in self.devices:
if self.devices[i]['descr'] == descr:
dev = i
assert(dev != None)
else:
dev = box.get_active_text()
str = ""
if dev:
if len(self.devices[dev]['ip4']) > 0:
str += "\nIPv4:"
for i in self.devices[dev]['ip4']:
str += "\n\t%s\n\t\t%s" % (i['ip'], i['mask'])
else:
str += "\nNo IPv4 Address"
if len(self.devices[dev]['ip6']) > 0:
str += "\nIPv6:"
for i in self.devices[dev]['ip6']:
str += "\n\t%s\n\t\t%s" % (i['ip'], i['mask'])
else:
str += "\nNo IPv6 Address"
label.set_text(str)
dialog = gtk.MessageDialog(self.window, gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_QUESTION, gtk.BUTTONS_OK_CANCEL, "Select the interface to use")
box = gtk.combo_box_new_text()
for dev in self.devices:
if PLATFORM == "Windows":
box.append_text(self.devices[dev]['descr'])
else:
box.append_text(dev)
dialog.vbox.pack_start(box)
label = gtk.Label()
dialog.vbox.pack_start(label)
box.connect('changed', on_network_combobox_changed, label)
dialog.vbox.show_all()
box.set_active(0)
ret = dialog.run()
dialog.destroy()
interface = None
if ret == gtk.RESPONSE_OK:
if PLATFORM == "Windows":
descr = box.get_active_text()
for i in self.devices:
if self.devices[i]['descr'] == descr:
interface = i
assert(interface != None)
else:
interface = box.get_active_text()
return interface
def get_devices(self):
devices = {}
devs = loki.pcap.findalldevs()
for (name, descr, addr, flags) in devs:
try:
test = dnet.eth(name)
mac = test.get()
devices[name] = { 'mac' : mac, 'ip4' : [], 'ip6' : [], 'descr' : descr, 'flags' : flags }
except:
pass
else:
self.add_interfaces_store(name)
self.add_macaddresses_store(dnet.eth_ntoa(mac))
if len(addr) > 1:
for (ip, mask, net, gw) in addr:
try:
dnet.ip_aton(ip)
addr_dict = {}
addr_dict['ip'] = ip
self.add_addresses_store(ip)
addr_dict['mask'] = mask
self.add_netmasks_store(mask)
addr_dict['net'] = net
addr_dict['gw'] = gw
devices[name]['ip4'].append(addr_dict)
except:
pass
try:
dnet.ip6_aton(ip)
addr_dict = {}
addr_dict['ip'] = ip
self.add_addresses_store(ip)
addr_dict['mask'] = mask
self.add_netmasks_store(mask)
addr_dict['net'] = net
addr_dict['gw'] = gw
if ip.startswith("fe80:"):
addr_dict['linklocal'] = True
else:
addr_dict['linklocal'] = False
devices[name]['ip6'].append(addr_dict)
except:
pass
return devices
def msg(self, msg):
print msg
def log(self, msg):
print msg
class about_window(gtk.Window):
def __init__(self, parent):
gtk.Window.__init__(self)
self.set_title("About")
self.set_default_size(150, 70)
self.set_property("modal", True)
label = gtk.Label("This is %s version %s by Daniel Mende - [email protected]\nRunning on %s" % (parent.__class__.__name__, VERSION, PLATFORM))
button = gtk.Button(gtk.STOCK_CLOSE)
button.set_use_stock(True)
button.connect_object("clicked", gtk.Widget.destroy, self)
buttonbox = gtk.HButtonBox()
buttonbox.pack_start(button)
vbox = gtk.VBox()
vbox.pack_start(label, True, True, 0)
vbox.pack_start(buttonbox, False, False, 0)
self.add(vbox)
class log_window(gtk.Window):
def __init__(self, textbuffer):
gtk.Window.__init__(self)
self.set_title("Log")
self.set_default_size(300, 400)
textview = gtk.TextView(textbuffer)
textview.set_editable(False)
button = gtk.Button(gtk.STOCK_CLOSE)
button.set_use_stock(True)
button.connect_object("clicked", gtk.Widget.destroy, self)
buttonbox = gtk.HButtonBox()
buttonbox.pack_start(button)
scrolledwindow = gtk.ScrolledWindow()
scrolledwindow.add(textview)
scrolledwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_ALWAYS)
vbox = gtk.VBox()
vbox.pack_start(scrolledwindow, True, True, 0)
vbox.pack_start(buttonbox, False, False, 0)
self.add(vbox)
class module_preferences_window(gtk.Window):
NAME_ROW = 0
VALUE_ROW = 1
TYPE_ROW = 2
MIN_ROW = 3
MAX_ROW = 4
TOOLTIP_ROW = 5
def __init__(self, parent, mod_name, dict):
self.par = parent
self.mod_name = mod_name
self.dict = dict
gtk.Window.__init__(self)
self.set_title("%s Preferences" % mod_name.upper())
self.set_default_size(250, 350)
self.module_liststore = gtk.ListStore(str, str, str, int, int, str)
notebook = gtk.Notebook()
module_treeview = gtk.TreeView()
module_treeview.set_model(self.module_liststore)
module_treeview.set_headers_visible(True)
module_treeview.set_tooltip_column(self.TOOLTIP_ROW)
column = gtk.TreeViewColumn()
column.set_title("Name")
render_text = gtk.CellRendererText()
column.pack_start(render_text, expand=True)
column.add_attribute(render_text, 'text', self.NAME_ROW)
module_treeview.append_column(column)
column = gtk.TreeViewColumn()
column.set_title("Value")
render_text = gtk.CellRendererText()
render_text.set_property('editable', True)
render_text.connect('edited', self.edited_callback, self.module_liststore)
column.pack_start(render_text, expand=True)
column.add_attribute(render_text, 'text', self.VALUE_ROW)
module_treeview.append_column(column)
column = gtk.TreeViewColumn()
column.set_title("Type")
render_text = gtk.CellRendererText()
column.pack_start(render_text, expand=True)
column.add_attribute(render_text, 'text', self.TYPE_ROW)
module_treeview.append_column(column)
#~ column = gtk.TreeViewColumn()
#~ column.set_title("Min")
#~ render_text = gtk.CellRendererText()
#~ column.pack_start(render_text, expand=True)
#~ column.add_attribute(render_text, 'text', self.MIN_ROW)
#~ module_treeview.append_column(column)
#~ column = gtk.TreeViewColumn()
#~ column.set_title("Max")
#~ render_text = gtk.CellRendererText()
#~ column.pack_start(render_text, expand=True)
#~ column.add_attribute(render_text, 'text', self.MAX_ROW)
#~ module_treeview.append_column(column)
scrolledwindow = gtk.ScrolledWindow()
scrolledwindow.set_property("vscrollbar-policy", gtk.POLICY_AUTOMATIC)
scrolledwindow.set_property("hscrollbar-policy", gtk.POLICY_AUTOMATIC)
scrolledwindow.add_with_viewport(module_treeview)
vbox = gtk.VBox(False, 0)
vbox.pack_start(scrolledwindow, True, True, 0)
buttonbox = gtk.HButtonBox()
close = gtk.Button(gtk.STOCK_CLOSE)
close.set_use_stock(True)
close.connect_object("clicked", self.close_button_clicked, None)
buttonbox.pack_start(close)
save = gtk.Button(gtk.STOCK_SAVE)
save.set_use_stock(True)
save.connect_object("clicked", self.save_button_clicked, None)
buttonbox.pack_start(save)
apply = gtk.Button(gtk.STOCK_APPLY)
apply.set_use_stock(True)
apply.connect_object("clicked", self.apply_button_clicked, None)
buttonbox.pack_start(apply)
vbox.pack_start(buttonbox, False, False, 0)
self.add(vbox)
for name in dict:
self.module_liststore.append([name, str(dict[name]["value"]), dict[name]["type"], dict[name]["min"], dict[name]["max"], "Min: %s Max: %s" % (dict[name]["min"],dict[name]["max"] )])
def edited_callback(self, cell, path, new_text, model):
def int_(self, cell, path, new_text, model):
try:
val = int(new_text)
assert(val >= model[path][self.MIN_ROW])
assert(val <= model[path][self.MAX_ROW])
except:
pass
else:
model[path][self.VALUE_ROW] = new_text
self.dict[model[path][self.NAME_ROW]]["value"] = val
def str_(self, cell, path, new_text, model):
try:
assert(len(new_text) >= model[path][self.MIN_ROW])
assert(len(new_text) <= model[path][self.MAX_ROW])
except:
pass
else:
model[path][self.VALUE_ROW] = new_text
self.dict[model[path][self.NAME_ROW]]["value"] = new_text
def float_(self, cell, path, new_text, model):
try:
val = float(new_text)
assert(val >= model[path][self.MIN_ROW])
assert(val <= model[path][self.MAX_ROW])
except:
pass
else:
model[path][self.VALUE_ROW] = new_text
self.dict[model[path][self.NAME_ROW]]["value"] = val
{ "str" : str_,
"int" : int_,
"float" : float_ }[model[path][self.TYPE_ROW]](self, cell, path, new_text, model)
def close_button_clicked(self, btn):
gtk.Widget.destroy(self)
def save_button_clicked(self, btn):
self.apply_button_clicked(None)
config = ConfigParser.RawConfigParser()
config.add_section(self.mod_name)
for i in self.dict:
if self.dict[i]["type"] == "str":
config.set(self.mod_name, i, base64.b64encode(self.dict[i]["value"]))
else:
config.set(self.mod_name, i, self.dict[i]["value"])
path = CONFIG_PATH + "/"
if not os.path.exists(path):
os.mkdir(path, 0700)
with open(path + self.mod_name +".cfg", 'wb') as configfile:
config.write(configfile)
self.par.log("Saved %s configuration" % self.mod_name)
self.close_button_clicked(None)
def apply_button_clicked(self, btn):
(module, enabled) = self.par.modules[self.mod_name]
module.set_config_dict(self.dict)
class preference_window(gtk.Window):
MOD_NAME_ROW = 0
MOD_ENABLE_ROW = 1
MOD_RESET_ROW = 2
MOD_CONFIG_ROW = 3
def __init__(self, parent):
self.par = parent
gtk.Window.__init__(self)
self.set_title("Preferences")
self.set_default_size(300, 400)
#self.set_property("modal", True)
self.module_liststore = gtk.ListStore(str, bool, bool, bool)
notebook = gtk.Notebook()
module_treeview = gtk.TreeView()
module_treeview.set_model(self.module_liststore)
module_treeview.set_headers_visible(True)
column = gtk.TreeViewColumn()
column.set_title("Module")
render_text = gtk.CellRendererText()
column.pack_start(render_text, expand=True)
column.add_attribute(render_text, 'text', self.MOD_NAME_ROW)
module_treeview.append_column(column)
column = gtk.TreeViewColumn()
column.set_title("Enabled")
render_toggle = gtk.CellRendererToggle()
render_toggle.set_property('activatable', True)
render_toggle.connect('toggled', self.toggle_callback, self.module_liststore)
column.pack_start(render_toggle, expand=False)
column.add_attribute(render_toggle, "active", self.MOD_ENABLE_ROW)
module_treeview.append_column(column)
column = gtk.TreeViewColumn()
column.set_title("Reset")
render_toggle = gtk.CellRendererToggle()
render_toggle.set_property('activatable', True)
render_toggle.set_property('radio', True)
render_toggle.connect('toggled', self.reset_callback, self.module_liststore)
column.pack_start(render_toggle, expand=False)
column.add_attribute(render_toggle, 'active', self.MOD_RESET_ROW)
module_treeview.append_column(column)
column = gtk.TreeViewColumn()
column.set_title("Config")
render_toggle = gtk.CellRendererToggle()
render_toggle.set_property('activatable', True)
render_toggle.set_property('radio', True)
render_toggle.connect('toggled', self.config_callback, self.module_liststore)
column.pack_start(render_toggle, expand=False)
column.add_attribute(render_toggle, 'active', self.MOD_CONFIG_ROW)
module_treeview.append_column(column)
scrolledwindow = gtk.ScrolledWindow()
scrolledwindow.set_property("vscrollbar-policy", gtk.POLICY_AUTOMATIC)
scrolledwindow.set_property("hscrollbar-policy", gtk.POLICY_AUTOMATIC)
scrolledwindow.add_with_viewport(module_treeview)
notebook.append_page(scrolledwindow, tab_label=gtk.Label("Modules"))
vbox = gtk.VBox(False, 0)
filechooser = gtk.FileChooserButton("Select a Wordlist")
if not self.par.wordlist is None:
filechooser.set_filename(self.par.wordlist)
filechooser.connect('file-set', self.wordlist_callback)
frame = gtk.Frame("Wordlist")
frame.add(filechooser)
vbox.pack_start(frame, expand=False, fill=False)
vbox2 = gtk.VBox()
bf_checkbutton = gtk.CheckButton("Use Bruteforce")
bf_checkbutton.set_active(self.par.bruteforce)
bf_checkbutton.connect('toggled', self.bf_callback)
bf_full_checkbutton = gtk.CheckButton("Use full Charset")
bf_full_checkbutton.set_active(self.par.bruteforce_full)
bf_full_checkbutton.connect('toggled', self.bf_full_callback)
vbox2.pack_start(bf_checkbutton)
vbox2.pack_start(bf_full_checkbutton)
frame = gtk.Frame("Bruteforce")
frame.add(vbox2)
vbox.pack_start(frame, expand=False, fill=False)
threads_spinbutton = gtk.SpinButton()
threads_spinbutton.set_range(1, 1024)
threads_spinbutton.set_value(self.par.bruteforce_threads)
threads_spinbutton.set_increments(1, 16)
threads_spinbutton.set_numeric(True)
threads_spinbutton.connect('value-changed', self.threads_callback)
frame = gtk.Frame("Threads")
frame.add(threads_spinbutton)
vbox.pack_start(frame, expand=False, fill=False)
scrolledwindow = gtk.ScrolledWindow()
scrolledwindow.set_property("vscrollbar-policy", gtk.POLICY_AUTOMATIC)
scrolledwindow.set_property("hscrollbar-policy", gtk.POLICY_AUTOMATIC)
scrolledwindow.add_with_viewport(vbox)
notebook.append_page(scrolledwindow, tab_label=gtk.Label("Bruteforce"))
vbox = gtk.VBox(False, 0)
combo = gtk.combo_box_new_text()
for i in self.par.dot_prog_choices:
combo.insert_text(0, i)
if i == self.par.dot_prog:
combo.set_active(0)
combo.connect('changed', self.dot_callback)
frame = gtk.Frame("Graph Layout")
frame.add(combo)
vbox.pack_start(frame, expand=False, fill=False)
scrolledwindow = gtk.ScrolledWindow()
scrolledwindow.set_property("vscrollbar-policy", gtk.POLICY_AUTOMATIC)
scrolledwindow.set_property("hscrollbar-policy", gtk.POLICY_AUTOMATIC)
scrolledwindow.add_with_viewport(vbox)
notebook.append_page(scrolledwindow, tab_label=gtk.Label("Graph"))
vbox = gtk.VBox(False, 0)
vbox.pack_start(notebook, True, True, 0)
buttonbox = gtk.HButtonBox()
close = gtk.Button(gtk.STOCK_CLOSE)
close.set_use_stock(True)
close.connect_object("clicked", self.close_button_clicked, None)
buttonbox.pack_start(close)
vbox.pack_start(buttonbox, False, False, 0)
self.add(vbox)
modlist = self.par.modules.keys()
modlist.sort()
try:
for i in modlist:
(module, enabled) = self.par.modules[i]
if "get_config_dict" in dir(module) and "set_config_dict" in dir(module):
self.module_liststore.append([i, enabled, False, False])
else:
self.module_liststore.append([i, enabled, False, True])
except Exception, e:
print e
if DEBUG:
print '-'*60
traceback.print_exc(file=sys.stdout)
print '-'*60
print "failed to open module %s" % module
def dot_callback(self, combo):
self.par.dot_prog = combo.get_active_text()
def wordlist_callback(self, button):
self.par.wordlist = button.get_filename()
def bf_callback(self, button):
self.par.bruteforce = button.get_active()
def bf_full_callback(self, button):
self.par.bruteforce_full = button.get_active()
def threads_callback(self, button):
self.par.threads = button.get_value_as_int()
return True
def toggle_callback(self, cell, path, model):
model[path][self.MOD_ENABLE_ROW] = not model[path][self.MOD_ENABLE_ROW]
if model[path][self.MOD_ENABLE_ROW]:
self.par.init_module(model[path][self.MOD_NAME_ROW])
else:
self.par.shut_module(model[path][self.MOD_NAME_ROW])
def reset_callback(self, cell, path, model):
model[path][self.MOD_RESET_ROW] = not model[path][self.MOD_RESET_ROW]
if cell:
gobject.timeout_add(750, self.reset_callback, None, path, model)
self.par.shut_module(model[path][self.MOD_NAME_ROW])
self.par.load_module(model[path][self.MOD_NAME_ROW], model[path][self.MOD_ENABLE_ROW])
(module, enabled) = self.par.modules[model[path][self.MOD_NAME_ROW]]
if enabled:
self.par.init_module(model[path][self.MOD_NAME_ROW])
return False
def config_callback(self, cell, path, model):
if not model[path][self.MOD_CONFIG_ROW]:
name = model[path][self.MOD_NAME_ROW]
(module, enabled) = self.par.modules[name]
try:
dict = module.get_config_dict()
except Exception, e:
print e
if DEBUG:
print '-'*60
traceback.print_exc(file=sys.stdout)
print '-'*60
print "failed to load conf-dict from %s" % module
dict = None
wnd = module_preferences_window(self.par, name, dict)
wnd.show_all()
def close_button_clicked(self, arg):
gtk.Widget.destroy(self)
class loki_gtk(loki.codename_loki):
def __init__(self):
loki.codename_loki.__init__(self)
self.ui = 'gtk'
#gtk stuff
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.window.set_title(self.__class__.__name__)
self.window.set_default_size(800, 600)
#connect signal handlers
self.window.connect("delete_event", self.delete_event)
self.window.connect("destroy", self.destroy_event)
self.toolbar = gtk.Toolbar()
self.toolbar.set_tooltips(True)
self.quit_button = gtk.ToolButton(gtk.STOCK_QUIT)
self.quit_button.connect("clicked", self.on_quit_button_clicked)
self.quit_button.set_tooltip_text("QUIT")
self.toolbar.insert(self.quit_button, 0)
self.about_button = gtk.ToolButton(gtk.STOCK_ABOUT)
self.about_button.connect("clicked", self.on_about_button_clicked)
self.about_button.set_tooltip_text("ABOUT")
self.toolbar.insert(self.about_button, 0)
self.log_button = gtk.ToolButton(gtk.STOCK_EDIT)
self.log_button.connect("clicked", self.on_log_button_clicked)
self.log_button.set_tooltip_text("LOG")
self.toolbar.insert(self.log_button, 0)
self.toolbar.insert(gtk.SeparatorToolItem(), 0)
self.pref_button = gtk.ToolButton(gtk.STOCK_PREFERENCES)
self.pref_button.connect("clicked", self.on_pref_button_clicked)
self.pref_button.set_tooltip_text("PREFERENCES")
self.toolbar.insert(self.pref_button, 0)
self.network_button = gtk.ToolButton(gtk.STOCK_NETWORK)
self.network_button.connect("clicked", self.on_network_button_clicked)
self.network_button.set_tooltip_text("NETWORK")
self.toolbar.insert(self.network_button, 0)
self.toolbar.insert(gtk.SeparatorToolItem(), 0)
self.open_togglebutton = gtk.ToggleToolButton(gtk.STOCK_OPEN)
self.open_togglebutton.connect("toggled", self.on_open_togglebutton_toggled)
self.open_togglebutton.set_tooltip_text("OPEN")
self.toolbar.insert(self.open_togglebutton, 0)
self.run_togglebutton = gtk.ToggleToolButton(gtk.STOCK_EXECUTE)
self.run_togglebutton.connect("toggled", self.on_run_togglebutton_toogled)
self.run_togglebutton.set_tooltip_text("RUN")
self.toolbar.insert(self.run_togglebutton, 0)
self.vbox = gtk.VBox(False, 0)
self.vbox.pack_start(self.toolbar, False, False, 0)
self.notebook = gtk.Notebook()
self.vbox.pack_start(self.notebook, True, True, 0)
self.statusbar = gtk.Statusbar()
self.vbox.pack_start(self.statusbar, False, False, 0)
self.window.add(self.vbox)
self.log_textbuffer = gtk.TextBuffer()
self.log_window = log_window(self.log_textbuffer)
def main(self):
loki.codename_loki.main(self)
self.window.show_all()
gtk.main()
def load_all_modules(self, path=loki.DATA_DIR + loki.MODULE_PATH):
loki.codename_loki.load_all_modules(self, path)
for i in self.modules.keys():
if not "get_root" in dir(self.modules[i][0]):
del self.modules[i]
def init_module_ui(self, mod):
try:
root = mod.get_root()
except Exception, e:
print e
if DEBUG:
print '-'*60
traceback.print_exc(file=sys.stdout)
print '-'*60
print "failed to get root from module %s" % mod
root = None
if not root:
root = gtk.Label(mod.name)
group = getattr(mod, "group", "")
if group != "":
if group in self.groups:
ntb = self.groups[group]
else:
ntb = gtk.Notebook()
self.groups[group] = ntb
self.notebook.insert_page(ntb, gtk.Label(group), 0)
if root.get_parent():
root.reparent(ntb)
ntb.set_tab_label(root, gtk.Label(mod.name))
ntb.reorder_child(root, -1)
else:
ntb.insert_page(root, gtk.Label(mod.name), -1)
ntb.show_all()
else:
if root.get_parent():
root.reparent(self.notebook)
self.notebook.set_tab_label(root, gtk.Label(mod.name))
self.notebook.reorder_child(root, -1)
else:
self.notebook.insert_page(root, gtk.Label(mod.name), -1)
if self.run_togglebutton.get_active():
self.start_module(mod)
root.set_property("sensitive", True)
else:
root.set_property("sensitive", False)
def shut_module_ui(self, mod):
for i in self.groups:
ntb = self.groups[i]
for j in ntb:
if ntb.get_tab_label_text(j) == mod.name:
pos = ntb.page_num(j)
ntb.remove_page(pos)
#~ if ntb.get_n_pages() == 0:
#~ pos = self.notebook.page_num(ntb)
#~ self.notebook.remove_page(pos)
#~ del self.groups[i]
#~ print self.groups
break
else:
continue
break
for i in self.notebook:
if self.notebook.get_tab_label_text(i) == mod.name:
pos = self.notebook.page_num(i)
self.notebook.remove_page(pos)
break
def log(self, msg, module=None):
#if not gtk.Object.flags(self.statusbar) & gtk.IN_DESTRUCTION:
self.statusbar.push(self.msg_id, "[%i] %s" % (self.msg_id, msg))
if DEBUG:
print "[%i] %s" % (self.msg_id, msg)
self.log_textbuffer.insert(self.log_textbuffer.get_end_iter(), "[%i] %s\n" % (self.msg_id, msg))
self.msg_id += 1
if module:
if module not in self.module_active:
for i in self.notebook:
if self.notebook.get_tab_label_text(i) == module:
if self.notebook.page_num(i) == self.notebook.get_current_page():
break
self.module_active.append(module)
self.flash_label(module, self.notebook.get_tab_label(i), 5)
break
for i in self.groups:
ntb = self.groups[i]
for j in ntb:
if ntb.get_tab_label_text(j) == module:
if self.notebook.page_num(ntb) == self.notebook.get_current_page() and ntb.page_num(j) == ntb.get_current_page():
break
self.module_active.append(module)
self.flash_label(module, self.notebook.get_tab_label(ntb), 5)
self.module_active.append(self.notebook.get_tab_label_text(ntb))
self.flash_label(self.notebook.get_tab_label_text(ntb), ntb.get_tab_label(j), 5)
break
else:
continue
break
def flash_label(self, module, label, times):
if times > 0:
if label.get_property("sensitive"):
label.set_property("sensitive", False)
gobject.timeout_add(500, self.flash_label, module, label, times)
else:
label.set_property("sensitive", True)
gobject.timeout_add(500, self.flash_label, module, label, times - 1)
else:
self.module_active.remove(module)
def send_msg(self, msg):
dialog = gtk.MessageDialog(self.window, gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_INFO, gtk.BUTTONS_CLOSE, msg)
label = gtk.Label(msg)
dialog.vbox.pack_start(label, True, True, 0)
dialog.run()
dialog.destroy()
def quit(self, data):
self.on_quit_button_clicked(data)
### EVENTS ###
def on_run_togglebutton_toogled(self, btn):
if btn.get_active():
if not self.configured:
self.on_network_button_clicked(None)
if not self.configured:
btn.set_active(False)
return
self.pcap_thread = loki.pcap_thread(self, self.interface)
self.dnet_thread = loki.dnet_thread(self.interface)
self.log("Listening on %s" % (self.interface))
if PLATFORM != "Linux":
self.fw = dnet.fw()
for i in self.modules:
self.start_module(i)
for i in self.notebook:
if self.notebook.get_tab_label_text(i) in self.groups:
ntb = self.groups[self.notebook.get_tab_label_text(i)]
for j in ntb:
j.set_property("sensitive", True)
i.set_property("sensitive", True)
self.network_button.set_property("sensitive", False)
self.open_togglebutton.set_property("sensitive", False)
self.dnet_thread.start()
self.pcap_thread.start()
else:
for i in self.modules:
(mod, en) = self.modules[i]
mod.shut_mod()
for i in self.notebook:
if self.notebook.get_tab_label_text(i) in self.groups:
ntb = self.groups[self.notebook.get_tab_label_text(i)]
for j in ntb:
j.set_property("sensitive", False)
i.set_property("sensitive", False)
if self.pcap_thread:
self.pcap_thread.quit()
self.pcap_thread = None
if self.dnet_thread:
self.dnet_thread.quit()
self.dnet_thread = None
self.network_button.set_property("sensitive", True)
self.open_togglebutton.set_property("sensitive", True)
def on_open_togglebutton_toggled(self, btn):
if btn.get_active():
dialog = gtk.FileChooserDialog(title="Open", parent=self.window, action=gtk.FILE_CHOOSER_ACTION_OPEN, buttons=(gtk.STOCK_CANCEL,gtk.RESPONSE_CANCEL,gtk.STOCK_OPEN,gtk.RESPONSE_OK))
#dialog.set_current_folder()
filter = gtk.FileFilter()
filter.set_name("Pcap files")
filter.add_pattern("*.cap")
filter.add_pattern("*.pcap")
dialog.add_filter(filter)
filter = gtk.FileFilter()
filter.set_name("All files")
filter.add_pattern("*")
dialog.add_filter(filter)
response = dialog.run()
if response == gtk.RESPONSE_OK:
self.pcap_thread = loki.pcap_thread_offline(self, dialog.get_filename())
self.interface = "null"
self.ip = "0.0.0.0"
self.mask = "0.0.0.0"
self.ip6 = "::"
self.mask6 = "::"
self.ip6_ll = "::"
self.mask6_ll = "::"
for i in self.modules:
self.start_module(i)
for i in self.notebook:
if self.notebook.get_tab_label_text(i) in self.groups:
ntb = self.groups[self.notebook.get_tab_label_text(i)]
for j in ntb:
j.set_property("sensitive", True)
i.set_property("sensitive", True)
self.run_togglebutton.set_property("sensitive", False)
self.pcap_thread.start()
else:
btn.set_active(False)
dialog.destroy()
else:
for i in self.modules:
(mod, en) = self.modules[i]
mod.shut_mod()
for i in self.notebook:
if self.notebook.get_tab_label_text(i) in self.groups:
ntb = self.groups[self.notebook.get_tab_label_text(i)]
for j in ntb:
j.set_property("sensitive", False)
i.set_property("sensitive", False)
if self.pcap_thread:
self.pcap_thread.quit()
self.pcap_thread = None
self.run_togglebutton.set_property("sensitive", True)
def on_pref_button_clicked(self, data):
pref_window = preference_window(self)
pref_window.show_all()
def on_log_button_clicked(self, data):
l_window = log_window(self.log_textbuffer)
l_window.show_all()
def on_network_combobox_changed(self, box, label):
if PLATFORM == "Windows":
descr = box.get_active_text()
dev = None
for i in self.devices:
if self.devices[i]['descr'] == descr:
dev = i
assert(dev != None)
else:
dev = box.get_active_text()
str = ""
if dev:
if len(self.devices[dev]['ip4']) > 0:
str += "\nIPv4:"
for i in self.devices[dev]['ip4']:
str += "\n\t%s\n\t\t%s" % (i['ip'], i['mask'])
else:
str += "\nNo IPv4 Address"
if len(self.devices[dev]['ip6']) > 0:
str += "\nIPv6:"
for i in self.devices[dev]['ip6']:
str += "\n\t%s\n\t\t%s" % (i['ip'], i['mask'])
else:
str += "\nNo IPv6 Address"
label.set_text(str)
def on_advanced_network_button_clicked(self, data):
if PLATFORM == "Linux":
self.netcfg = network_window(self)
if os.path.exists(CONFIG_PATH + "/network.cfg"):
self.netcfg.load_config(CONFIG_PATH + "/network.cfg")
self.netcfg.window.show_all()
def on_network_button_clicked(self, data):
self.update_devices()
dialog = gtk.MessageDialog(self.window, gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_QUESTION, gtk.BUTTONS_OK_CANCEL, "Select the interface to use")
box = gtk.combo_box_new_text()
for dev in self.devices:
if PLATFORM == "Windows":
box.append_text(self.devices[dev]['descr'])
else:
box.append_text(dev)
dialog.vbox.pack_start(box)
label = gtk.Label()
dialog.vbox.pack_start(label)
box.connect('changed', self.on_network_combobox_changed, label)
button = gtk.Button("Advanced Interface Config")
dialog.vbox.pack_start(button)
button.connect('clicked', self.on_advanced_network_button_clicked)
if PLATFORM != "Linux":
button.set_property("sensitive", False)
dialog.vbox.show_all()
box.set_active(0)
ret = dialog.run()
dialog.destroy()
if ret == gtk.RESPONSE_OK:
if PLATFORM == "Windows":
self.interface = None
descr = box.get_active_text()
for i in self.devices:
if self.devices[i]['descr'] == descr:
self.interface = i
assert(self.interface != None)
else:
self.interface = box.get_active_text()
select4 = len(self.devices[self.interface]['ip4']) > 1
if select4:
dialog = gtk.MessageDialog(self.window, gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_QUESTION, gtk.BUTTONS_OK_CANCEL, "Select the interface to use")
label = gtk.Label("Select the IPv4 address to use:")
dialog.vbox.pack_start(label)
box4 = gtk.combo_box_new_text()
for i in self.devices[self.interface]['ip4']:
box4.append_text("%s %s" % (i['ip'], i['mask']))
dialog.vbox.pack_start(box4)
box4.set_active(0)
dialog.vbox.show_all()
ret = dialog.run()
dialog.destroy()
if ret != gtk.RESPONSE_OK:
return
active = box4.get_active()
self.ip = self.devices[self.interface]['ip4'][active]['ip']
self.mask = self.devices[self.interface]['ip4'][active]['mask']
else:
if len(self.devices[self.interface]['ip4']) > 0:
self.ip = self.devices[self.interface]['ip4'][0]['ip']
self.mask = self.devices[self.interface]['ip4'][0]['mask']
else:
self.ip = "0.0.0.0"
self.mask ="0.0.0.0"
select6 = len(self.devices[self.interface]['ip6']) > 1
v6done = False
if select6:
nl = 0
ip = None
mask = None
for i in self.devices[self.interface]['ip6']:
if i['linklocal']:
self.ip6_ll = i['ip']
self.mask6_ll = i['mask']
else:
ip = i['ip']
mask = i['mask']
nl += 1
if nl > 1:
dialog = gtk.MessageDialog(self.window, gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_QUESTION, gtk.BUTTONS_OK_CANCEL, "Select the interface to use")
label = gtk.Label("Select the IPv6 address to use:")
dialog.vbox.pack_start(label)
box6 = gtk.combo_box_new_text()
for i in self.devices[self.interface]['ip6']:
if not i['linklocal']:
box6.append_text("%s %s" % (i['ip'], i['mask']))
dialog.vbox.pack_start(box6)
box6.set_active(0)
dialog.vbox.show_all()
ret = dialog.run()
dialog.destroy()
if ret != gtk.RESPONSE_OK:
return
active = box6.get_active()
self.ip6 = self.devices[self.interface]['ip6'][active]['ip']
self.mask6 = self.devices[self.interface]['ip6'][active]['mask']
if self.ip6.startswith("fe80:"):
self.ip6_ll = self.ip6
self.mask6_ll = self.mask6
else:
self.ip6 = ip
self.mask6 = mask
select6 = False
v6done = True
else:
if not v6done:
if len(self.devices[self.interface]['ip6']) > 0:
self.ip6 = self.devices[self.interface]['ip6'][0]['ip']
self.mask6 = self.devices[self.interface]['ip6'][0]['mask']
if self.ip6.startswith("fe80:"):
self.ip6_ll = self.ip6
self.mask6_ll = self.mask6
else:
self.ip6 = "::"
self.mask6 ="::"
self.ip6_ll = "::"
self.mask6_ll = "::"
self.configured = True
def on_about_button_clicked(self, data):
window = about_window(self)
window.show_all()
def on_quit_button_clicked(self, data, foo=None):
self.delete_event(None, None)
self.destroy_event(None)
def delete_event(self, widget, event, data=None):
self.shutdown()
return False
def destroy_event(self, widget, data=None):
gtk.main_quit()
def error_callback(self, msg):
dialog = gtk.MessageDialog(gtk.Window(gtk.WINDOW_TOPLEVEL), gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_ERROR, gtk.BUTTONS_OK, msg)
ret = dialog.run()
dialog.destroy()
gtk.main_quit()
def error(self, msg):
gobject.timeout_add(100, self.error_callback, msg)
gtk.main()
if __name__ == '__main__':
app = loki_gtk()
loki.pcap = app.check()
signal.signal(signal.SIGINT, app.quit)
try:
app.main()
except Exception, e:
print e
if loki.DEBUG:
print '-'*60
traceback.print_exc(file=sys.stdout)
print '-'*60
app.shutdown()
except:
app.shutdown()
| bsd-3-clause | -6,927,535,572,498,970,000 | 43.57732 | 194 | 0.547377 | false |
sserrot/champion_relationships | venv/Lib/site-packages/networkx/classes/reportviews.py | 1 | 38439 | # Copyright (C) 2004-2019 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
#
# Authors: Aric Hagberg ([email protected]),
# Pieter Swart ([email protected]),
# Dan Schult([email protected])
"""
View Classes provide node, edge and degree "views" of a graph.
Views for nodes, edges and degree are provided for all base graph classes.
A view means a read-only object that is quick to create, automatically
updated when the graph changes, and provides basic access like `n in V`,
`for n in V`, `V[n]` and sometimes set operations.
The views are read-only iterable containers that are updated as the
graph is updated. As with dicts, the graph should not be updated
while iterating through the view. Views can be iterated multiple times.
Edge and Node views also allow data attribute lookup.
The resulting attribute dict is writable as `G.edges[3, 4]['color']='red'`
Degree views allow lookup of degree values for single nodes.
Weighted degree is supported with the `weight` argument.
NodeView
========
`V = G.nodes` (or `V = G.nodes()`) allows `len(V)`, `n in V`, set
operations e.g. "G.nodes & H.nodes", and `dd = G.nodes[n]`, where
`dd` is the node data dict. Iteration is over the nodes by default.
NodeDataView
============
To iterate over (node, data) pairs, use arguments to `G.nodes()`
to create a DataView e.g. `DV = G.nodes(data='color', default='red')`.
The DataView iterates as `for n, color in DV` and allows
`(n, 'red') in DV`. Using `DV = G.nodes(data=True)`, the DataViews
use the full datadict in writeable form also allowing contain testing as
`(n, {'color': 'red'}) in VD`. DataViews allow set operations when
data attributes are hashable.
DegreeView
==========
`V = G.degree` allows iteration over (node, degree) pairs as well
as lookup: `deg=V[n]`. There are many flavors of DegreeView
for In/Out/Directed/Multi. For Directed Graphs, `G.degree`
counts both in and out going edges. `G.out_degree` and
`G.in_degree` count only specific directions.
Weighted degree using edge data attributes is provide via
`V = G.degree(weight='attr_name')` where any string with the
attribute name can be used. `weight=None` is the default.
No set operations are implemented for degrees, use NodeView.
The argument `nbunch` restricts iteration to nodes in nbunch.
The DegreeView can still lookup any node even if nbunch is specified.
EdgeView
========
`V = G.edges` or `V = G.edges()` allows iteration over edges as well as
`e in V`, set operations and edge data lookup `dd = G.edges[2, 3]`.
Iteration is over 2-tuples `(u, v)` for Graph/DiGraph. For multigraphs
edges 3-tuples `(u, v, key)` are the default but 2-tuples can be obtained
via `V = G.edges(keys=False)`.
Set operations for directed graphs treat the edges as a set of 2-tuples.
For undirected graphs, 2-tuples are not a unique representation of edges.
So long as the set being compared to contains unique representations
of its edges, the set operations will act as expected. If the other
set contains both `(0, 1)` and `(1, 0)` however, the result of set
operations may contain both representations of the same edge.
EdgeDataView
============
Edge data can be reported using an EdgeDataView typically created
by calling an EdgeView: `DV = G.edges(data='weight', default=1)`.
The EdgeDataView allows iteration over edge tuples, membership checking
but no set operations.
Iteration depends on `data` and `default` and for multigraph `keys`
If `data is False` (the default) then iterate over 2-tuples `(u, v)`.
If `data is True` iterate over 3-tuples `(u, v, datadict)`.
Otherwise iterate over `(u, v, datadict.get(data, default))`.
For Multigraphs, if `keys is True`, replace `u, v` with `u, v, key`
to create 3-tuples and 4-tuples.
The argument `nbunch` restricts edges to those incident to nodes in nbunch.
"""
from collections.abc import Mapping, Set, Iterable
import networkx as nx
__all__ = ['NodeView', 'NodeDataView',
'EdgeView', 'OutEdgeView', 'InEdgeView',
'EdgeDataView', 'OutEdgeDataView', 'InEdgeDataView',
'MultiEdgeView', 'OutMultiEdgeView', 'InMultiEdgeView',
'MultiEdgeDataView', 'OutMultiEdgeDataView', 'InMultiEdgeDataView',
'DegreeView', 'DiDegreeView', 'InDegreeView', 'OutDegreeView',
'MultiDegreeView', 'DiMultiDegreeView',
'InMultiDegreeView', 'OutMultiDegreeView']
# NodeViews
class NodeView(Mapping, Set):
"""A NodeView class to act as G.nodes for a NetworkX Graph
Set operations act on the nodes without considering data.
Iteration is over nodes. Node data can be looked up like a dict.
Use NodeDataView to iterate over node data or to specify a data
attribute for lookup. NodeDataView is created by calling the NodeView.
Parameters
----------
graph : NetworkX graph-like class
Examples
--------
>>> G = nx.path_graph(3)
>>> NV = G.nodes()
>>> 2 in NV
True
>>> for n in NV: print(n)
0
1
2
>>> assert(NV & {1, 2, 3} == {1, 2})
>>> G.add_node(2, color='blue')
>>> NV[2]
{'color': 'blue'}
>>> G.add_node(8, color='red')
>>> NDV = G.nodes(data=True)
>>> (2, NV[2]) in NDV
True
>>> for n, dd in NDV: print((n, dd.get('color', 'aqua')))
(0, 'aqua')
(1, 'aqua')
(2, 'blue')
(8, 'red')
>>> NDV[2] == NV[2]
True
>>> NVdata = G.nodes(data='color', default='aqua')
>>> (2, NVdata[2]) in NVdata
True
>>> for n, dd in NVdata: print((n, dd))
(0, 'aqua')
(1, 'aqua')
(2, 'blue')
(8, 'red')
>>> NVdata[2] == NV[2] # NVdata gets 'color', NV gets datadict
False
"""
__slots__ = '_nodes',
def __getstate__(self):
return {'_nodes': self._nodes}
def __setstate__(self, state):
self._nodes = state['_nodes']
def __init__(self, graph):
self._nodes = graph._node
# Mapping methods
def __len__(self):
return len(self._nodes)
def __iter__(self):
return iter(self._nodes)
def __getitem__(self, n):
return self._nodes[n]
# Set methods
def __contains__(self, n):
return n in self._nodes
@classmethod
def _from_iterable(cls, it):
return set(it)
# DataView method
def __call__(self, data=False, default=None):
if data is False:
return self
return NodeDataView(self._nodes, data, default)
def data(self, data=True, default=None):
if data is False:
return self
return NodeDataView(self._nodes, data, default)
def __str__(self):
return str(list(self))
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, tuple(self))
class NodeDataView(Set):
"""A DataView class for nodes of a NetworkX Graph
The main use for this class is to iterate through node-data pairs.
The data can be the entire data-dictionary for each node, or it
can be a specific attribute (with default) for each node.
Set operations are enabled with NodeDataView, but don't work in
cases where the data is not hashable. Use with caution.
Typically, set operations on nodes use NodeView, not NodeDataView.
That is, they use `G.nodes` instead of `G.nodes(data='foo')`.
Parameters
==========
graph : NetworkX graph-like class
data : bool or string (default=False)
default : object (default=None)
"""
__slots__ = ('_nodes', '_data', '_default')
def __getstate__(self):
return {'_nodes': self._nodes,
'_data': self._data,
'_default': self._default}
def __setstate__(self, state):
self._nodes = state['_nodes']
self._data = state['_data']
self._default = state['_default']
def __init__(self, nodedict, data=False, default=None):
self._nodes = nodedict
self._data = data
self._default = default
@classmethod
def _from_iterable(cls, it):
try:
return set(it)
except TypeError as err:
if "unhashable" in str(err):
msg = " : Could be b/c data=True or your values are unhashable"
raise TypeError(str(err) + msg)
raise
def __len__(self):
return len(self._nodes)
def __iter__(self):
data = self._data
if data is False:
return iter(self._nodes)
if data is True:
return iter(self._nodes.items())
return ((n, dd[data] if data in dd else self._default)
for n, dd in self._nodes.items())
def __contains__(self, n):
try:
node_in = n in self._nodes
except TypeError:
n, d = n
return n in self._nodes and self[n] == d
if node_in is True:
return node_in
try:
n, d = n
except (TypeError, ValueError):
return False
return n in self._nodes and self[n] == d
def __getitem__(self, n):
ddict = self._nodes[n]
data = self._data
if data is False or data is True:
return ddict
return ddict[data] if data in ddict else self._default
def __str__(self):
return str(list(self))
def __repr__(self):
if self._data is False:
return '%s(%r)' % (self.__class__.__name__, tuple(self))
if self._data is True:
return '%s(%r)' % (self.__class__.__name__, dict(self))
return '%s(%r, data=%r)' % \
(self.__class__.__name__, dict(self), self._data)
# DegreeViews
class DiDegreeView(object):
"""A View class for degree of nodes in a NetworkX Graph
The functionality is like dict.items() with (node, degree) pairs.
Additional functionality includes read-only lookup of node degree,
and calling with optional features nbunch (for only a subset of nodes)
and weight (use edge weights to compute degree).
Parameters
==========
graph : NetworkX graph-like class
nbunch : node, container of nodes, or None meaning all nodes (default=None)
weight : bool or string (default=None)
Notes
-----
DegreeView can still lookup any node even if nbunch is specified.
Examples
--------
>>> G = nx.path_graph(3)
>>> DV = G.degree()
>>> assert(DV[2] == 1)
>>> assert(sum(deg for n, deg in DV) == 4)
>>> DVweight = G.degree(weight="span")
>>> G.add_edge(1, 2, span=34)
>>> DVweight[2]
34
>>> DVweight[0] # default edge weight is 1
1
>>> sum(span for n, span in DVweight) # sum weighted degrees
70
>>> DVnbunch = G.degree(nbunch=(1, 2))
>>> assert(len(list(DVnbunch)) == 2) # iteration over nbunch only
"""
def __init__(self, G, nbunch=None, weight=None):
self._graph = G
self._succ = G._succ if hasattr(G, "_succ") else G._adj
self._pred = G._pred if hasattr(G, "_pred") else G._adj
self._nodes = self._succ if nbunch is None \
else list(G.nbunch_iter(nbunch))
self._weight = weight
def __call__(self, nbunch=None, weight=None):
if nbunch is None:
if weight == self._weight:
return self
return self.__class__(self._graph, None, weight)
try:
if nbunch in self._nodes:
if weight == self._weight:
return self[nbunch]
return self.__class__(self._graph, None, weight)[nbunch]
except TypeError:
pass
return self.__class__(self._graph, nbunch, weight)
def __getitem__(self, n):
weight = self._weight
succs = self._succ[n]
preds = self._pred[n]
if weight is None:
return len(succs) + len(preds)
return sum(dd.get(weight, 1) for dd in succs.values()) + \
sum(dd.get(weight, 1) for dd in preds.values())
def __iter__(self):
weight = self._weight
if weight is None:
for n in self._nodes:
succs = self._succ[n]
preds = self._pred[n]
yield (n, len(succs) + len(preds))
else:
for n in self._nodes:
succs = self._succ[n]
preds = self._pred[n]
deg = sum(dd.get(weight, 1) for dd in succs.values()) \
+ sum(dd.get(weight, 1) for dd in preds.values())
yield (n, deg)
def __len__(self):
return len(self._nodes)
def __str__(self):
return str(list(self))
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, dict(self))
class DegreeView(DiDegreeView):
"""A DegreeView class to act as G.degree for a NetworkX Graph
Typical usage focuses on iteration over `(node, degree)` pairs.
The degree is by default the number of edges incident to the node.
Optional argument `weight` enables weighted degree using the edge
attribute named in the `weight` argument. Reporting and iteration
can also be restricted to a subset of nodes using `nbunch`.
Additional functionality include node lookup so that `G.degree[n]`
reported the (possibly weighted) degree of node `n`. Calling the
view creates a view with different arguments `nbunch` or `weight`.
Parameters
==========
graph : NetworkX graph-like class
nbunch : node, container of nodes, or None meaning all nodes (default=None)
weight : string or None (default=None)
Notes
-----
DegreeView can still lookup any node even if nbunch is specified.
Examples
--------
>>> G = nx.path_graph(3)
>>> DV = G.degree()
>>> assert(DV[2] == 1)
>>> assert(G.degree[2] == 1)
>>> assert(sum(deg for n, deg in DV) == 4)
>>> DVweight = G.degree(weight="span")
>>> G.add_edge(1, 2, span=34)
>>> DVweight[2]
34
>>> DVweight[0] # default edge weight is 1
1
>>> sum(span for n, span in DVweight) # sum weighted degrees
70
>>> DVnbunch = G.degree(nbunch=(1, 2))
>>> assert(len(list(DVnbunch)) == 2) # iteration over nbunch only
"""
def __getitem__(self, n):
weight = self._weight
nbrs = self._succ[n]
if weight is None:
return len(nbrs) + (n in nbrs)
return sum(dd.get(weight, 1) for dd in nbrs.values()) + \
(n in nbrs and nbrs[n].get(weight, 1))
def __iter__(self):
weight = self._weight
if weight is None:
for n in self._nodes:
nbrs = self._succ[n]
yield (n, len(nbrs) + (n in nbrs))
else:
for n in self._nodes:
nbrs = self._succ[n]
deg = sum(dd.get(weight, 1) for dd in nbrs.values()) + \
(n in nbrs and nbrs[n].get(weight, 1))
yield (n, deg)
class OutDegreeView(DiDegreeView):
"""A DegreeView class to report out_degree for a DiGraph; See DegreeView"""
def __getitem__(self, n):
weight = self._weight
nbrs = self._succ[n]
if self._weight is None:
return len(nbrs)
return sum(dd.get(self._weight, 1) for dd in nbrs.values())
def __iter__(self):
weight = self._weight
if weight is None:
for n in self._nodes:
succs = self._succ[n]
yield (n, len(succs))
else:
for n in self._nodes:
succs = self._succ[n]
deg = sum(dd.get(weight, 1) for dd in succs.values())
yield (n, deg)
class InDegreeView(DiDegreeView):
"""A DegreeView class to report in_degree for a DiGraph; See DegreeView"""
def __getitem__(self, n):
weight = self._weight
nbrs = self._pred[n]
if weight is None:
return len(nbrs)
return sum(dd.get(weight, 1) for dd in nbrs.values())
def __iter__(self):
weight = self._weight
if weight is None:
for n in self._nodes:
preds = self._pred[n]
yield (n, len(preds))
else:
for n in self._nodes:
preds = self._pred[n]
deg = sum(dd.get(weight, 1) for dd in preds.values())
yield (n, deg)
class MultiDegreeView(DiDegreeView):
"""A DegreeView class for undirected multigraphs; See DegreeView"""
def __getitem__(self, n):
weight = self._weight
nbrs = self._succ[n]
if weight is None:
return sum(len(keys) for keys in nbrs.values()) + \
(n in nbrs and len(nbrs[n]))
# edge weighted graph - degree is sum of nbr edge weights
deg = sum(d.get(weight, 1) for key_dict in nbrs.values()
for d in key_dict.values())
if n in nbrs:
deg += sum(d.get(weight, 1) for d in nbrs[n].values())
return deg
def __iter__(self):
weight = self._weight
if weight is None:
for n in self._nodes:
nbrs = self._succ[n]
deg = sum(len(keys) for keys in nbrs.values()) + \
(n in nbrs and len(nbrs[n]))
yield (n, deg)
else:
for n in self._nodes:
nbrs = self._succ[n]
deg = sum(d.get(weight, 1) for key_dict in nbrs.values()
for d in key_dict.values())
if n in nbrs:
deg += sum(d.get(weight, 1) for d in nbrs[n].values())
yield (n, deg)
class DiMultiDegreeView(DiDegreeView):
"""A DegreeView class for MultiDiGraph; See DegreeView"""
def __getitem__(self, n):
weight = self._weight
succs = self._succ[n]
preds = self._pred[n]
if weight is None:
return sum(len(keys) for keys in succs.values()) + \
sum(len(keys) for keys in preds.values())
# edge weighted graph - degree is sum of nbr edge weights
deg = sum(d.get(weight, 1) for key_dict in succs.values()
for d in key_dict.values()) + \
sum(d.get(weight, 1) for key_dict in preds.values()
for d in key_dict.values())
return deg
def __iter__(self):
weight = self._weight
if weight is None:
for n in self._nodes:
succs = self._succ[n]
preds = self._pred[n]
deg = sum(len(keys) for keys in succs.values()) + \
sum(len(keys) for keys in preds.values())
yield (n, deg)
else:
for n in self._nodes:
succs = self._succ[n]
preds = self._pred[n]
deg = sum(d.get(weight, 1) for key_dict in succs.values()
for d in key_dict.values()) + \
sum(d.get(weight, 1) for key_dict in preds.values()
for d in key_dict.values())
yield (n, deg)
class InMultiDegreeView(DiDegreeView):
"""A DegreeView class for inward degree of MultiDiGraph; See DegreeView"""
def __getitem__(self, n):
weight = self._weight
nbrs = self._pred[n]
if weight is None:
return sum(len(data) for data in nbrs.values())
# edge weighted graph - degree is sum of nbr edge weights
return sum(d.get(weight, 1) for key_dict in nbrs.values()
for d in key_dict.values())
def __iter__(self):
weight = self._weight
if weight is None:
for n in self._nodes:
nbrs = self._pred[n]
deg = sum(len(data) for data in nbrs.values())
yield (n, deg)
else:
for n in self._nodes:
nbrs = self._pred[n]
deg = sum(d.get(weight, 1) for key_dict in nbrs.values()
for d in key_dict.values())
yield (n, deg)
class OutMultiDegreeView(DiDegreeView):
"""A DegreeView class for outward degree of MultiDiGraph; See DegreeView"""
def __getitem__(self, n):
weight = self._weight
nbrs = self._succ[n]
if weight is None:
return sum(len(data) for data in nbrs.values())
# edge weighted graph - degree is sum of nbr edge weights
return sum(d.get(weight, 1) for key_dict in nbrs.values()
for d in key_dict.values())
def __iter__(self):
weight = self._weight
if weight is None:
for n in self._nodes:
nbrs = self._succ[n]
deg = sum(len(data) for data in nbrs.values())
yield (n, deg)
else:
for n in self._nodes:
nbrs = self._succ[n]
deg = sum(d.get(weight, 1) for key_dict in nbrs.values()
for d in key_dict.values())
yield (n, deg)
# EdgeDataViews
class OutEdgeDataView(object):
"""EdgeDataView for outward edges of DiGraph; See EdgeDataView"""
__slots__ = ('_viewer', '_nbunch', '_data', '_default',
'_adjdict', '_nodes_nbrs', '_report')
def __getstate__(self):
return {'viewer': self._viewer,
'nbunch': self._nbunch,
'data': self._data,
'default': self._default}
def __setstate__(self, state):
self.__init__(**state)
def __init__(self, viewer, nbunch=None, data=False, default=None):
self._viewer = viewer
adjdict = self._adjdict = viewer._adjdict
if nbunch is None:
self._nodes_nbrs = adjdict.items
else:
nbunch = list(viewer._graph.nbunch_iter(nbunch))
self._nodes_nbrs = lambda: [(n, adjdict[n]) for n in nbunch]
self._nbunch = nbunch
self._data = data
self._default = default
# Set _report based on data and default
if data is True:
self._report = lambda n, nbr, dd: (n, nbr, dd)
elif data is False:
self._report = lambda n, nbr, dd: (n, nbr)
else: # data is attribute name
self._report = lambda n, nbr, dd: \
(n, nbr, dd[data]) if data in dd else (n, nbr, default)
def __len__(self):
return sum(len(nbrs) for n, nbrs in self._nodes_nbrs())
def __iter__(self):
return (self._report(n, nbr, dd) for n, nbrs in self._nodes_nbrs()
for nbr, dd in nbrs.items())
def __contains__(self, e):
try:
u, v = e[:2]
ddict = self._adjdict[u][v]
except KeyError:
return False
return e == self._report(u, v, ddict)
def __str__(self):
return str(list(self))
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, list(self))
class EdgeDataView(OutEdgeDataView):
"""A EdgeDataView class for edges of Graph
This view is primarily used to iterate over the edges reporting
edges as node-tuples with edge data optionally reported. The
argument `nbunch` allows restriction to edges incident to nodes
in that container/singleton. The default (nbunch=None)
reports all edges. The arguments `data` and `default` control
what edge data is reported. The default `data is False` reports
only node-tuples for each edge. If `data is True` the entire edge
data dict is returned. Otherwise `data` is assumed to hold the name
of the edge attribute to report with default `default` if that
edge attribute is not present.
Parameters
----------
nbunch : container of nodes, node or None (default None)
data : False, True or string (default False)
default : default value (default None)
Examples
--------
>>> G = nx.path_graph(3)
>>> G.add_edge(1, 2, foo='bar')
>>> list(G.edges(data='foo', default='biz'))
[(0, 1, 'biz'), (1, 2, 'bar')]
>>> assert((0, 1, 'biz') in G.edges(data='foo', default='biz'))
"""
__slots__ = ()
def __len__(self):
return sum(1 for e in self)
def __iter__(self):
seen = {}
for n, nbrs in self._nodes_nbrs():
for nbr, dd in nbrs.items():
if nbr not in seen:
yield self._report(n, nbr, dd)
seen[n] = 1
del seen
def __contains__(self, e):
try:
u, v = e[:2]
ddict = self._adjdict[u][v]
except KeyError:
try:
ddict = self._adjdict[v][u]
except KeyError:
return False
return e == self._report(u, v, ddict)
class InEdgeDataView(OutEdgeDataView):
"""An EdgeDataView class for outward edges of DiGraph; See EdgeDataView"""
__slots__ = ()
def __iter__(self):
return (self._report(nbr, n, dd) for n, nbrs in self._nodes_nbrs()
for nbr, dd in nbrs.items())
def __contains__(self, e):
try:
u, v = e[:2]
ddict = self._adjdict[v][u]
except KeyError:
return False
return e == self._report(u, v, ddict)
class OutMultiEdgeDataView(OutEdgeDataView):
"""An EdgeDataView for outward edges of MultiDiGraph; See EdgeDataView"""
__slots__ = ('keys',)
def __getstate__(self):
return {'viewer': self._viewer,
'nbunch': self._nbunch,
'keys': self.keys,
'data': self._data,
'default': self._default}
def __setstate__(self, state):
self.__init__(**state)
def __init__(self, viewer, nbunch=None,
data=False, keys=False, default=None):
self._viewer = viewer
adjdict = self._adjdict = viewer._adjdict
self.keys = keys
if nbunch is None:
self._nodes_nbrs = adjdict.items
else:
nbunch = list(viewer._graph.nbunch_iter(nbunch))
self._nodes_nbrs = lambda: [(n, adjdict[n]) for n in nbunch]
self._nbunch = nbunch
self._data = data
self._default = default
# Set _report based on data and default
if data is True:
if keys is True:
self._report = lambda n, nbr, k, dd: (n, nbr, k, dd)
else:
self._report = lambda n, nbr, k, dd: (n, nbr, dd)
elif data is False:
if keys is True:
self._report = lambda n, nbr, k, dd: (n, nbr, k)
else:
self._report = lambda n, nbr, k, dd: (n, nbr)
else: # data is attribute name
if keys is True:
self._report = lambda n, nbr, k, dd: (n, nbr, k, dd[data]) \
if data in dd else (n, nbr, k, default)
else:
self._report = lambda n, nbr, k, dd: (n, nbr, dd[data]) \
if data in dd else (n, nbr, default)
def __len__(self):
return sum(1 for e in self)
def __iter__(self):
return (self._report(n, nbr, k, dd) for n, nbrs in self._nodes_nbrs()
for nbr, kd in nbrs.items() for k, dd in kd.items())
def __contains__(self, e):
u, v = e[:2]
try:
kdict = self._adjdict[u][v]
except KeyError:
return False
if self.keys is True:
k = e[2]
try:
dd = kdict[k]
except KeyError:
return False
return e == self._report(u, v, k, dd)
for k, dd in kdict.items():
if e == self._report(u, v, k, dd):
return True
return False
class MultiEdgeDataView(OutMultiEdgeDataView):
"""An EdgeDataView class for edges of MultiGraph; See EdgeDataView"""
__slots__ = ()
def __iter__(self):
seen = {}
for n, nbrs in self._nodes_nbrs():
for nbr, kd in nbrs.items():
if nbr not in seen:
for k, dd in kd.items():
yield self._report(n, nbr, k, dd)
seen[n] = 1
del seen
def __contains__(self, e):
u, v = e[:2]
try:
kdict = self._adjdict[u][v]
except KeyError:
try:
kdict = self._adjdict[v][u]
except KeyError:
return False
if self.keys is True:
k = e[2]
try:
dd = kdict[k]
except KeyError:
return False
return e == self._report(u, v, k, dd)
for k, dd in kdict.items():
if e == self._report(u, v, k, dd):
return True
return False
class InMultiEdgeDataView(OutMultiEdgeDataView):
"""An EdgeDataView for inward edges of MultiDiGraph; See EdgeDataView"""
__slots__ = ()
def __iter__(self):
return (self._report(nbr, n, k, dd) for n, nbrs in self._nodes_nbrs()
for nbr, kd in nbrs.items() for k, dd in kd.items())
def __contains__(self, e):
u, v = e[:2]
try:
kdict = self._adjdict[v][u]
except KeyError:
return False
if self.keys is True:
k = e[2]
dd = kdict[k]
return e == self._report(u, v, k, dd)
for k, dd in kdict.items():
if e == self._report(u, v, k, dd):
return True
return False
# EdgeViews have set operations and no data reported
class OutEdgeView(Set, Mapping):
"""A EdgeView class for outward edges of a DiGraph"""
__slots__ = ('_adjdict', '_graph', '_nodes_nbrs')
def __getstate__(self):
return {'_graph': self._graph}
def __setstate__(self, state):
self._graph = G = state['_graph']
self._adjdict = G._succ if hasattr(G, "succ") else G._adj
self._nodes_nbrs = self._adjdict.items
@classmethod
def _from_iterable(cls, it):
return set(it)
dataview = OutEdgeDataView
def __init__(self, G):
self._graph = G
self._adjdict = G._succ if hasattr(G, "succ") else G._adj
self._nodes_nbrs = self._adjdict.items
# Set methods
def __len__(self):
return sum(len(nbrs) for n, nbrs in self._nodes_nbrs())
def __iter__(self):
for n, nbrs in self._nodes_nbrs():
for nbr in nbrs:
yield (n, nbr)
def __contains__(self, e):
try:
u, v = e
return v in self._adjdict[u]
except KeyError:
return False
# Mapping Methods
def __getitem__(self, e):
u, v = e
return self._adjdict[u][v]
# EdgeDataView methods
def __call__(self, nbunch=None, data=False, default=None):
if nbunch is None and data is False:
return self
return self.dataview(self, nbunch, data, default)
def data(self, data=True, default=None, nbunch=None):
if nbunch is None and data is False:
return self
return self.dataview(self, nbunch, data, default)
# String Methods
def __str__(self):
return str(list(self))
def __repr__(self):
return "{0.__class__.__name__}({1!r})".format(self, list(self))
class EdgeView(OutEdgeView):
"""A EdgeView class for edges of a Graph
This densely packed View allows iteration over edges, data lookup
like a dict and set operations on edges represented by node-tuples.
In addition, edge data can be controlled by calling this object
possibly creating an EdgeDataView. Typically edges are iterated over
and reported as `(u, v)` node tuples or `(u, v, key)` node/key tuples
for multigraphs. Those edge representations can also be using to
lookup the data dict for any edge. Set operations also are available
where those tuples are the elements of the set.
Calling this object with optional arguments `data`, `default` and `keys`
controls the form of the tuple (see EdgeDataView). Optional argument
`nbunch` allows restriction to edges only involving certain nodes.
If `data is False` (the default) then iterate over 2-tuples `(u, v)`.
If `data is True` iterate over 3-tuples `(u, v, datadict)`.
Otherwise iterate over `(u, v, datadict.get(data, default))`.
For Multigraphs, if `keys is True`, replace `u, v` with `u, v, key` above.
Parameters
==========
graph : NetworkX graph-like class
nbunch : (default= all nodes in graph) only report edges with these nodes
keys : (only for MultiGraph. default=False) report edge key in tuple
data : bool or string (default=False) see above
default : object (default=None)
Examples
========
>>> G = nx.path_graph(4)
>>> EV = G.edges()
>>> (2, 3) in EV
True
>>> for u, v in EV: print((u, v))
(0, 1)
(1, 2)
(2, 3)
>>> assert(EV & {(1, 2), (3, 4)} == {(1, 2)})
>>> EVdata = G.edges(data='color', default='aqua')
>>> G.add_edge(2, 3, color='blue')
>>> assert((2, 3, 'blue') in EVdata)
>>> for u, v, c in EVdata: print("({}, {}) has color: {}".format(u, v, c))
(0, 1) has color: aqua
(1, 2) has color: aqua
(2, 3) has color: blue
>>> EVnbunch = G.edges(nbunch=2)
>>> assert((2, 3) in EVnbunch)
>>> assert((0, 1) in EVnbunch) # nbunch is ignored in __contains__
>>> for u, v in EVnbunch: assert(u == 2 or v == 2)
>>> MG = nx.path_graph(4, create_using=nx.MultiGraph)
>>> EVmulti = MG.edges(keys=True)
>>> (2, 3, 0) in EVmulti
True
>>> (2, 3) in EVmulti # 2-tuples work even when keys is True
True
>>> key = MG.add_edge(2, 3)
>>> for u, v, k in EVmulti: print((u, v, k))
(0, 1, 0)
(1, 2, 0)
(2, 3, 0)
(2, 3, 1)
"""
__slots__ = ()
dataview = EdgeDataView
def __len__(self):
num_nbrs = (len(nbrs) + (n in nbrs) for n, nbrs in self._nodes_nbrs())
return sum(num_nbrs) // 2
def __iter__(self):
seen = {}
for n, nbrs in self._nodes_nbrs():
for nbr in list(nbrs):
if nbr not in seen:
yield (n, nbr)
seen[n] = 1
del seen
def __contains__(self, e):
try:
u, v = e[:2]
return v in self._adjdict[u] or u in self._adjdict[v]
except (KeyError, ValueError):
return False
class InEdgeView(OutEdgeView):
"""A EdgeView class for inward edges of a DiGraph"""
__slots__ = ()
def __setstate__(self, state):
self._graph = G = state['_graph']
self._adjdict = G._pred if hasattr(G, "pred") else G._adj
self._nodes_nbrs = self._adjdict.items
dataview = InEdgeDataView
def __init__(self, G):
self._graph = G
self._adjdict = G._pred if hasattr(G, "pred") else G._adj
self._nodes_nbrs = self._adjdict.items
def __iter__(self):
for n, nbrs in self._nodes_nbrs():
for nbr in nbrs:
yield (nbr, n)
def __contains__(self, e):
try:
u, v = e
return u in self._adjdict[v]
except KeyError:
return False
def __getitem__(self, e):
u, v = e
return self._adjdict[v][u]
class OutMultiEdgeView(OutEdgeView):
"""A EdgeView class for outward edges of a MultiDiGraph"""
__slots__ = ()
dataview = OutMultiEdgeDataView
def __len__(self):
return sum(len(kdict) for n, nbrs in self._nodes_nbrs()
for nbr, kdict in nbrs.items())
def __iter__(self):
for n, nbrs in self._nodes_nbrs():
for nbr, kdict in nbrs.items():
for key in kdict:
yield (n, nbr, key)
def __contains__(self, e):
N = len(e)
if N == 3:
u, v, k = e
elif N == 2:
u, v = e
k = 0
else:
raise ValueError("MultiEdge must have length 2 or 3")
try:
return k in self._adjdict[u][v]
except KeyError:
return False
def __getitem__(self, e):
u, v, k = e
return self._adjdict[u][v][k]
def __call__(self, nbunch=None, data=False, keys=False, default=None):
if nbunch is None and data is False and keys is True:
return self
return self.dataview(self, nbunch, data, keys, default)
def data(self, data=True, keys=False, default=None, nbunch=None):
if nbunch is None and data is False and keys is True:
return self
return self.dataview(self, nbunch, data, keys, default)
class MultiEdgeView(OutMultiEdgeView):
"""A EdgeView class for edges of a MultiGraph"""
__slots__ = ()
dataview = MultiEdgeDataView
def __len__(self):
return sum(1 for e in self)
def __iter__(self):
seen = {}
for n, nbrs in self._nodes_nbrs():
for nbr, kd in nbrs.items():
if nbr not in seen:
for k, dd in kd.items():
yield (n, nbr, k)
seen[n] = 1
del seen
class InMultiEdgeView(OutMultiEdgeView):
"""A EdgeView class for inward edges of a MultiDiGraph"""
__slots__ = ()
def __setstate__(self, state):
self._graph = G = state['_graph']
self._adjdict = G._pred if hasattr(G, "pred") else G._adj
self._nodes_nbrs = self._adjdict.items
dataview = InMultiEdgeDataView
def __init__(self, G):
self._graph = G
self._adjdict = G._pred if hasattr(G, "pred") else G._adj
self._nodes_nbrs = self._adjdict.items
def __iter__(self):
for n, nbrs in self._nodes_nbrs():
for nbr, kdict in nbrs.items():
for key in kdict:
yield (nbr, n, key)
def __contains__(self, e):
N = len(e)
if N == 3:
u, v, k = e
elif N == 2:
u, v = e
k = 0
else:
raise ValueError("MultiEdge must have length 2 or 3")
try:
return k in self._adjdict[v][u]
except KeyError:
return False
def __getitem__(self, e):
u, v, k = e
return self._adjdict[v][u][k]
| mit | -2,740,391,334,213,308,400 | 31.63073 | 79 | 0.547829 | false |
jocave/snapcraft | snapcraft/tests/test_commands_search.py | 1 | 3029 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2016 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import fixtures
from snapcraft import main, tests
from snapcraft.tests import fixture_setup
class SearchCommandTestCase(tests.TestCase):
def test_searching_for_a_part_that_exists(self):
fake_terminal = fixture_setup.FakeTerminal()
self.useFixture(fake_terminal)
main.main(['search', 'curl'])
expected_output = """PART NAME DESCRIPTION
curl test entry for curl
"""
self.assertEqual(fake_terminal.getvalue(), expected_output)
def test_empty_search_searches_all(self):
fake_terminal = fixture_setup.FakeTerminal()
self.useFixture(fake_terminal)
main.main(['search'])
output = fake_terminal.getvalue()
self.assertEqual(
output.split('\n')[0], 'PART NAME DESCRIPTION')
self.assertTrue('part1 test entry for part1' in output)
self.assertTrue('curl test entry for curl' in output)
self.assertTrue(
'long-described-part this is a repetitive description '
'this is a repetitive de...' in output)
def test_search_trims_long_descriptions(self):
fake_terminal = fixture_setup.FakeTerminal()
self.useFixture(fake_terminal)
main.main(['search', 'long-described-part'])
expected_output = (
'PART NAME DESCRIPTION\n'
'long-described-part this is a repetitive description this is a '
'repetitive de...\n')
self.assertEqual(fake_terminal.getvalue(), expected_output)
def test_searching_for_a_part_that_doesnt_exist_helps_out(self):
self.useFixture(fixture_setup.FakeTerminal())
fake_logger = fixtures.FakeLogger(level=logging.INFO)
self.useFixture(fake_logger)
main.main(['search', 'part that does not exist'])
self.assertEqual(
fake_logger.output,
'No matches found, try to run `snapcraft update` to refresh the '
'remote parts cache.\n')
def test_search_on_non_tty(self):
fake_terminal = fixture_setup.FakeTerminal(isatty=False)
self.useFixture(fake_terminal)
main.main(['search', 'curl'])
expected_output = """PART NAME DESCRIPTION
curl test entry for curl
"""
self.assertEqual(fake_terminal.getvalue(), expected_output)
| gpl-3.0 | -5,133,452,250,119,446,000 | 33.816092 | 78 | 0.656983 | false |
bitmazk/django-libs | django_libs/settings/django_settings.py | 1 | 1334 | """Useful default settings that you might want to use in all your projects."""
import re
IGNORABLE_404_URLS = [
re.compile(r'\..*$'),
re.compile(r'^/media/'),
re.compile(r'^/static/'),
]
IGNORABLE_404_USER_AGENTS = [
re.compile(r'AhrefsBot', re.I),
re.compile(r'BacklinkCrawler', re.I),
re.compile(r'Baiduspider', re.I),
re.compile(r'bingbot', re.I),
re.compile(r'BLEXBot', re.I),
re.compile(r'Cliqzbot', re.I),
re.compile(r'coccoc', re.I),
re.compile(r'DotBot', re.I),
re.compile(r'EasouSpider', re.I),
re.compile(r'Exabot', re.I),
re.compile(r'FacebookBot', re.I),
re.compile(r'Feedfetcher-Google', re.I),
re.compile(r'Googlebot', re.I),
re.compile(r'Jorgee', re.I),
re.compile(r'Mail.RU_Bot', re.I),
re.compile(r'mindUpBot', re.I),
re.compile(r'MJ12bot', re.I),
re.compile(r'msnbot', re.I),
re.compile(r'publiclibraryarchive.org', re.I),
re.compile(r'RU_Bot', re.I),
re.compile(r'savetheworldheritage.org', re.I),
re.compile(r'seoscanners', re.I),
re.compile(r'spbot', re.I),
re.compile(r'Test Certificate Info', re.I),
re.compile(r'Twitterbot', re.I),
re.compile(r'WinHttp.WinHttpRequest.5', re.I),
re.compile(r'XoviBot', re.I),
re.compile(r'Yahoo! Slurp', re.I),
re.compile(r'YandexBot', re.I),
]
| mit | -9,182,139,276,276,231,000 | 30.761905 | 78 | 0.61994 | false |
os-cloud-storage/openstack-workload-disaster-recovery | contrib/horizon/openstack_dashboard/dashboards/project/draas_restore/tables.py | 1 | 3037 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
'''-------------------------------------------------------------------------
Copyright IBM Corp. 2015, 2015 All Rights Reserved
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
Limitations under the License.
-------------------------------------------------------------------------'''
import logging
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import tables
from openstack_dashboard import api
from horizon import messages
LOG = logging.getLogger(__name__)
DELETABLE_STATES = ("available", "error")
class RestorePolicyExecution(tables.Action):
name = "restore_execution"
verbose_name = _("Restore")
classes = ("btn-create")
def single(self, data_table, request, execution_name):
try:
dr_client = api.dragon.dragonclient(request)
LOG.debug("dragon prepare parameters: %s " % (execution_name))
dr_client.dr.recover(execution_name)
messages.success(request,
"Triggered restore of point in time: %s \n" % execution_name +
" Check Heat deployment.")
except Exception as e:
logging.exception(e)
messages.error(request, "Failed to trigger restore operation")
class SelectRestorePolicyAction(tables.LinkAction):
name = "choose_policy"
verbose_name = _("Select")
url = "horizon:project:draas_restore:policy"
classes = ("btn-launch")
def get_link_url(self, datum):
policy_name = datum['name']
return reverse(self.url, kwargs={'policy_name': policy_name})
class DRRestoreBackupsTable(tables.DataTable):
def get_object_id(self, container):
return container['name']
policy = tables.Column("name",
verbose_name=_("Policy Name"))
timestamp = tables.Column("timestamp",
verbose_name=_("Last Run"))
class Meta:
name = "backups"
verbose_name = _("Policies to Restore")
status_columns = []
row_actions = [SelectRestorePolicyAction]
class DRRestorePolicyExecutionTable(tables.DataTable):
def get_object_id(self, container):
return container['id']
execution = tables.Column('name',
verbose_name=_("Policy"))
timestamp = tables.Column('timestamp',
verbose_name=_("Timestamp"))
class Meta:
name = "backups"
verbose_name = _("Points in Time")
status_columns = []
row_actions = [RestorePolicyExecution]
| apache-2.0 | 2,222,686,669,548,437,200 | 32.373626 | 78 | 0.626934 | false |
EduPepperPDTesting/pepper2013-testing | lms/djangoapps/administration/pepreg.py | 1 | 62497 | from mitxmako.shortcuts import render_to_response, render_to_string
from django.http import HttpResponse
import json
from models import PepRegTraining, PepRegInstructor, PepRegStudent
from django import db
from datetime import datetime, timedelta, date
from pytz import UTC
from django.contrib.auth.models import User
import urllib2
from courseware.courses import (get_courses, get_course_with_access,
get_courses_by_university, sort_by_announcement)
from django.utils import timezone
from django.contrib.auth.decorators import login_required
from django.contrib.auth.decorators import user_passes_test
from permissions.utils import check_access_level, check_user_perms
from StringIO import StringIO
import xlsxwriter
from student.models import UserTestGroup, CourseEnrollment, UserProfile, District, State, School
from training.models import TrainingUsers
from xmodule.modulestore.django import modulestore
import pymongo
from django.conf import settings
import calendar
from django.utils.timezone import datetime, now, timedelta, utc
from django.utils.translation import ugettext_lazy as _
from dateutil.relativedelta import relativedelta
from django.core.mail import send_mail
from django.core.mail import EmailMultiAlternatives
from student.models import (Registration, UserProfile, TestCenterUser, TestCenterUserForm,
TestCenterRegistration, TestCenterRegistrationForm, State,
PendingNameChange, PendingEmailChange, District,
CourseEnrollment, unique_id_for_user,
get_testcenter_registration, CourseEnrollmentAllowed)
from io import BytesIO
from reportlab.lib.utils import ImageReader
from reportlab.pdfgen import canvas
from reportlab.lib import colors
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.platypus import Paragraph
from reportlab.lib.pagesizes import A4
from reportlab.pdfbase.pdfmetrics import stringWidth
from xmodule.remindstore import myactivitystore
import logging
@login_required
def index(request):
# courses = get_courses(request.user, request.META.get('HTTP_HOST'))
# courses = sorted(courses, key=lambda course: course.display_name.lower())
tmp = "administration/pepreg.html";
try:
courses = get_courses_drop(request.user.profile.district.state.name, request.user.profile.district.code)
except:
tmp = "administration/pepreg_district_school_null.html";
courses = {};
return render_to_response(tmp, {"courses": courses})
def build_filters(columns, filters):
"""
Builds the filters for the PepConn report data
:param columns: the columns in this table
:param filters: the filters requested
:return: the arguments to pass to filter()
"""
kwargs = dict()
args = None
# Iterate through the filters.
for column, value in filters.iteritems():
# For the numerical columns, just filter that column by the passed value.
if not column == 'all':
c = int(column)
# If the column is an integer value, convert the search term.
out_value = value
if columns[c][2] == 'int' and value.isdigit():
out_value = int(value)
# Build the actual kwargs to pass to filer(). in this case, we need the column selector ([0]) as well as the
# type of selection to make ([1] - '__iexact').
kwargs[columns[c][0] + columns[c][1]] = out_value
# If this is a search for all, we need to do an OR search, so we build one with Q objects.
else:
args_list = list()
for key, data in columns.iteritems():
# [2] holds the column type (int, str, or False to ignore).
if data[2]:
# If the column is an integer value, convert the search term (as long as the string is only digits).
out_value = value
if data[2] == 'int':
if value.isdigit():
out_value = int(value)
else:
out_value = None
if out_value is not None:
# Create the Q object and add it to the list.
args_list.append(Q(**{data[0] + data[1]: out_value}))
# Start the list with the first object, then add the rest with ORs.
args = args_list.pop()
for item in args_list:
args |= item
return args, kwargs
def get_post_array(post, name, max=None):
"""
Gets array values from a POST.
"""
output = dict()
for key in post.keys():
value = urllib2.unquote(post.get(key))
if key.startswith(name + '[') and not value == 'undefined':
start = key.find('[')
i = key[start + 1:-1]
if max and int(i) > max:
i = 'all'
output.update({i: value})
return output
def build_sorts(columns, sorts):
"""
Builds the sorts for the PepConn report data
:param columns: the columns in this table
:param sorts: the sorts requested
:return: the arguments to pass to order_by()
"""
order = list()
# Iterate through the passed sorts.
for column, sort in sorts.iteritems():
# Default to an ASC search, but if the sort is 1, change it to DESC by adding a -.
pre = ''
if bool(int(sort)):
pre = '-'
# We just need the column selector out of the columns, not the type.
order.append(pre + columns[int(column)][0])
return order
def reach_limit(training):
return training.max_registration > 0 and PepRegStudent.objects.filter(
training=training).count() >= training.max_registration
def instructor_names(training):
names = []
for instructor in PepRegInstructor.objects.filter(training=training):
names.append("%s %s" % (instructor.instructor.first_name, instructor.instructor.last_name))
return names
def rows(request):
columns = {
1: ['district__state__name', '__iexact', 'str'],
2: ['district__name', '__iexact', 'str'],
3: ['subject', '__iexact', 'str'],
4: ['pepper_course', '__iexact', 'str'],
5: ['name', '__iexact', 'str'],
6: ['description', '__iexact', 'str'],
7: ['training_date', '__iexact', False],
8: ['training_time_start', '__iexact', 'str'],
9: ['geo_location', '__iexact', 'str'],
10: ['credits', '__iexact', 'int']
}
sorts = get_post_array(request.GET, 'col')
order = build_sorts(columns, sorts)
if not order:
order = ["-id"]
filters = get_post_array(request.GET, 'fcol')
page = int(request.GET['page'])
size = int(request.GET['size'])
start = page * size
end = start + size
if filters.get('7'):
filters['7'] = datetime.strptime(filters['7'], '%m/%d/%Y').strftime('%Y-%m-%d')
# limit to district trainings for none-system
is_no_System = False
if check_access_level(request.user, 'pepreg', 'add_new_training') != "System":
#filters[1] = request.user.profile.district.state.name
#filters[2] = request.user.profile.district.name
is_no_System = True
if len(filters):
args, kwargs = build_filters(columns, filters)
if args:
trainings = PepRegTraining.objects.prefetch_related().filter(args, **kwargs).order_by(*order)
else:
trainings = PepRegTraining.objects.prefetch_related().filter(**kwargs).order_by(*order)
else:
trainings = PepRegTraining.objects.prefetch_related().all().order_by(*order)
tmp_school_id = 0
try:
tmp_school_id = request.user.profile.school.id
except:
tmp_school_id = 0
trainings_set = list()
for item in trainings:
if(not(is_no_System)):
trainings_set.append(item)
else:
is_belong = PepRegInstructor.objects.filter(instructor=request.user,
training=item).exists() or item.user_create == request.user
if(is_belong):
trainings_set.append(item)
elif(item.district.name == request.user.profile.district.name):
try:
if(not(item.school_id) or item.school_id == -1 or item.school_id == tmp_school_id):
trainings_set.append(item)
except:
pass
count = len(trainings_set)
rows = list()
for item in trainings_set[start:end]:
arrive = "1" if datetime.now(UTC).date() >= item.training_date else "0"
allow = "1" if item.allow_registration else "0"
rl = "1" if reach_limit(item) else "0"
remain = item.max_registration - PepRegStudent.objects.filter(
training=item).count() if item.max_registration > 0 else -1
status = ""
all_edit = "0"
all_delete = "0"
if PepRegStudent.objects.filter(student=request.user, training=item).exists():
status = PepRegStudent.objects.get(student=request.user, training=item).student_status
if item.user_create == request.user:
all_edit = "1"
all_delete = "1"
else:
if PepRegInstructor.objects.filter(instructor=request.user, training=item).exists():
for pi in PepRegInstructor.objects.filter(instructor=request.user, training=item):
if pi.all_edit:
all_edit = "1";
if pi.all_delete:
all_delete = "1";
break;
is_belong = PepRegInstructor.objects.filter(instructor=request.user,
training=item).exists() or item.user_create == request.user
if check_access_level(request.user, 'pepreg', 'add_new_training') == 'System' or is_belong:
managing = "true"
else:
managing = ""
geo_location_shorter = " ".join(item.geo_location.split(",")[:3])
row = [
"",
item.district.state.name if item.district else "",
item.district.name if item.district else "",
item.subject,
item.pepper_course,
item.name,
item.description,
str('{d:%m/%d/%Y}'.format(d=item.training_date)),
str('{d:%I:%M %p}'.format(d=item.training_time_start)).lstrip('0'),
str('{d:%I:%M %p}'.format(d=item.training_time_end)).lstrip('0'),
"<span class='classroom'>%s</span><br><span class='geo_location'>%s</span><input type='hidden' value='%s'><input type='hidden' name='row_geo_location' value='%s'>" % (
item.classroom, geo_location_shorter, item.geo_props, item.geo_location),
item.credits,
"<br>".join(instructor_names(item)),
"%s %s" % (item.user_create.first_name, item.user_create.last_name),
"",
"<input type=hidden value=%s name=id> \
<input type=hidden value=%s name=managing> \
<input type=hidden value=%s name=all_edit> \
<input type=hidden value=%s name=all_delete> \
<input type=hidden value=%s,%s,%s,%s,%s,%s,%s name=status>" % (
item.id, managing, all_edit, all_delete, arrive, status, allow,
item.attendancel_id, rl, "1" if item.allow_student_attendance else "0",
remain
)
]
rows.append(row)
json_out = [count]
json_out.append(rows)
return HttpResponse(json.dumps(json_out), content_type="application/json")
def save_training(request):
try:
id = request.POST.get("id", None)
if id:
training = PepRegTraining.objects.get(id=id)
PepRegInstructor.objects.filter(training=training).delete()
else:
training = PepRegTraining()
training.date_create = datetime.now(UTC)
training.user_create = request.user
training.type = request.POST.get("type", "")
training.district_id = request.POST.get("district_id")
if(request.POST.get("school_id")):
training.school_id = request.POST.get("school_id")
training.name = request.POST.get("name", "")
training.description = request.POST.get("description", "")
if training.type == "pepper_course":
training.pepper_course = request.POST.get("pepper_course", "")
else:
training.pepper_course = ""
training.credits = request.POST.get("credits", 0)
training.attendancel_id = request.POST.get("attendancel_id", "")
training.subject = request.POST.get("subject")
training.training_date = request.POST.get("training_date", "")
training.training_time_start = request.POST.get("training_time_start", "")
training.training_time_end = request.POST.get("training_time_end", "")
training.classroom = request.POST.get("classroom", "")
training.geo_location = request.POST.get("geo_location", "")
training.geo_props = request.POST.get("geo_props", "")
training.allow_registration = request.POST.get("allow_registration", False)
training.max_registration = request.POST.get("max_registration", 0)
training.allow_attendance = request.POST.get("allow_attendance", False)
training.allow_student_attendance = request.POST.get("allow_student_attendance", False)
training.allow_validation = request.POST.get("allow_validation", False)
training.user_modify = request.user
training.date_modify = datetime.now(UTC)
training.save()
emails_get = request.POST.get("instructor_emails");
if(emails_get):
for emails in request.POST.get("instructor_emails", "").split(","):
tmp1 = emails.split("::");
email = tmp1[0];
all_edit = True if tmp1[1] == "1" else False;
all_delete = True if tmp1[2] == "1" else False;
if User.objects.filter(email=email).exists():
pi = PepRegInstructor()
pi.training = training
pi.instructor = User.objects.get(email=email)
pi.date_create = datetime.now(UTC)
pi.user_create = request.user
pi.all_edit = all_edit;
pi.all_delete = all_delete;
pi.save()
except Exception as e:
db.transaction.rollback()
return HttpResponse(json.dumps({'success': False, 'error': '%s' % e}), content_type="application/json")
return HttpResponse(json.dumps({'success': True}), content_type="application/json")
def delete_training(request):
try:
id = request.POST.get("id", None)
training = PepRegTraining.objects.get(id=id)
tid = training.id
tname = training.name
tdate = training.training_date
PepRegInstructor.objects.filter(training=training).delete()
PepRegStudent.objects.filter(training=training).delete()
TrainingUsers.objects.filter(training=training).delete()
training.delete()
ma_db = myactivitystore()
ma_db.set_item_pd(tid, tname, str(tdate))
except Exception as e:
db.transaction.rollback()
return HttpResponse(json.dumps({'success': False, 'error': '%s' % e}), content_type="application/json")
return HttpResponse(json.dumps({'success': True}), content_type="application/json")
def training_json(request):
item = PepRegTraining.objects.get(id=request.GET.get("id"))
instructor_emails = []
for pi in PepRegInstructor.objects.filter(training=item):
all_edit = "1" if pi.all_edit else "0"
all_delete = "1" if pi.all_delete else "0"
instructor_emails.append(pi.instructor.email + "::" + all_edit + "::" + all_delete)
arrive = "1" if datetime.now(UTC).date() >= item.training_date else "0"
data = {
"id": item.id,
"type": item.type,
"district_id": item.district_id,
"school_id": item.school_id,
"name": item.name,
"description": item.description,
"pepper_course": item.pepper_course,
"subject": item.subject,
"training_date": str('{d:%m/%d/%Y}'.format(d=item.training_date)),
"training_time_start": str('{d:%I:%M %p}'.format(d=item.training_time_start)).lstrip('0'),
"training_time_end": str('{d:%I:%M %p}'.format(d=item.training_time_end)).lstrip('0'),
"classroom": item.classroom,
"geo_location": item.geo_location,
"geo_props": item.geo_props,
"credits": item.credits,
"attendancel_id": item.attendancel_id,
"allow_registration": item.allow_registration,
"max_registration": '' if (item.max_registration == 0 or item.allow_registration == False) else item.max_registration,
"allow_attendance": item.allow_attendance,
"allow_validation": item.allow_validation,
"instructor_emails": instructor_emails,
"arrive": arrive
}
return HttpResponse(json.dumps(data), content_type="application/json")
def getCalendarInfo(request):
name_dict = {};
name_dict["title"] = now().strftime("%B %Y");
name_dict["year"] = now().year;
name_dict["month"] = now().month;
return HttpResponse(json.dumps(name_dict), content_type="application/json");
def getCalendarMonth(request):
SHIFT_WEEKSTART = 0;
_year = request.GET.get('year');
_month = request.GET.get('month');
_year_n = request.GET.get('year_n');
_month_n = request.GET.get('month_n');
_day = request.GET.get('day');
_day_n = request.GET.get('day_n');
_getrange = request.GET.get('daterange'); #akogan
_catype = request.GET.get('catype');
_cal_view = request.GET.get('calview')
if (_year):
_year = int(_year);
if (_month):
_month = int(_month);
if (_year_n):
_year_n = int(_year_n);
if (_month_n):
_month_n = int(_month_n);
if (_day):
_day = int(_day);
if (_day_n):
_day_n = int(_day_n);
if not _getrange:
_getrange = "0"
if not(_catype):
_catype = "0";
firstweekday = 0 + SHIFT_WEEKSTART
while firstweekday < 0:
firstweekday += 7
while firstweekday > 6:
firstweekday -= 7
start = datetime(year=_year, month=_month, day=1, tzinfo=utc) # 2016-08-01
end = datetime(year=_year, month=_month, day=1, tzinfo=utc) + relativedelta(months=1) # 2016-09-01
name_dict = {"title": start.strftime("%B %Y")};
columns = {
# 1: ['district__state__name', '__iexact', 'str'],
2: ['district__name', '__iexact', 'str']
}
filters = get_post_array(request.GET, 'fcol')
#filters[1] = request.user.profile.district.state.name
filters[2] = request.user.profile.district.name
if len(filters):
args, kwargs = build_filters(columns, filters)
if args:
all_occurrences = PepRegTraining.objects.prefetch_related().filter(args, **kwargs)
else:
all_occurrences = PepRegTraining.objects.prefetch_related().filter(**kwargs)
else:
all_occurrences = PepRegTraining.objects.prefetch_related().all()
cal = calendar.Calendar()
cal.setfirstweekday(firstweekday)
current_day = datetime(year=_year_n, month=_month_n, day=_day_n, tzinfo=utc) # 2016-08-01
try:
tmp_school_id = request.user.profile.school.id
except:
tmp_school_id = 0
daterangelist = []
#akogan datetime.
if(_getrange=="0"):
daterange = cal.itermonthdays(_year, _month)
elif(_getrange=="1" or _getrange=="3"):
weekNumber = date(year=_year, month=_month, day=_day).isocalendar()[1]
daterange = getweekdays(_year, weekNumber, _getrange)
else:
getDay = datetime(year=_year, month=_month, day=_day)
daterangelist.append(getDay)
if not daterangelist:
daterangelist = list(daterange)
userObj = request.user
request.session['user_obj'] = userObj
# akogan
if(_cal_view == 'screen'):
name_dict["table_tr_content"] = build_screen_rows(request, _year, _month, _catype, all_occurrences, current_day, tmp_school_id, daterangelist)
elif(_cal_view == 'print'):
name_dict["table_tr_content"] = build_print_rows(request, _year, _month, _catype, all_occurrences, current_day, tmp_school_id, daterangelist)
return HttpResponse(json.dumps(name_dict), content_type="application/json")
#akogan
def getweekdays(year, weekNumber, getrange):
firstday = datetime.strptime('%04d-%02d-1' % (year, weekNumber), '%Y-%W-%w')
if date(year, 1, 4).isoweekday() > 4:
firstday -= timedelta(days=7)
i = 0
while (i < 7):
yieldDay = firstday + timedelta(days=i)
if(yieldDay.isoweekday() in (6, 7) and getrange == "3"):
yield 0
else:
yield yieldDay
i += 1
def build_print_rows(request, _year, _month, _catype, all_occurrences, current_day, tmp_school_id, daterangelist):
print_row = [[]]
i = 0
array_length = len(all_occurrences)
for item in all_occurrences:
training_start_time = str('{d:%I:%M %p}'.format(d=item.training_time_start)).lstrip('0')
print_row[i].append(item.name)
print_row[i].append(item.description)
print_row[i].append(item.training_date)
print_row[i].append(training_start_time)
print_row[i].append(item.classroom)
print_row[i].append(item.geo_location)
if(i < array_length - 1):
i += 1
print_row.append([])
if(print_row):
i = 0
table_tr_content = ""
while(i < array_length):
table_tr_content += "<tr class='printview'>"
table_tr_content += "<td style='position: relative; height: 100%; width: auto; border: 1px #ccc solid;'>" + str(print_row[i][0]) + "<br/>" + str(print_row[i][1]) +"</td>"
table_tr_content += "<td style='position: relative; height: 100%; width: auto; border: 1px #ccc solid;'>" + str(print_row[i][2]) + "</td>"
table_tr_content += "<td style='position: relative; height: 100%; width: auto; border: 1px #ccc solid;'>" + str(print_row[i][3]) + "</td>"
table_tr_content += "<td style='position: relative; height: 100%; width: auto; border: 1px #ccc solid;'>" + str(print_row[i][4]) + "<br/>" + str(print_row[i][5]) + "</td>"
table_tr_content += "</tr>"
i += 1
return table_tr_content
#akogan
def build_screen_rows(request, year, month, catype, all_occurrences, current_day, tmp_school_id, daterange):
isweek = 1 if len(daterange) == 7 else 0
isday = 1 if len(daterange) == 1 else 0
rangedates = [[]]
week = 0
for day in daterange:
current = False
occurrences = []
trainingStartTime = ""
trainingEndTime = ""
trainingStartHour = ""
trainingEndHour = ""
trainingStartHours = []
trainingEndHours = []
if day:
if (isweek or isday):
date = utc.localize(day)
else:
date = datetime(year=year, month=month, day=day, tzinfo=utc)
for item in all_occurrences:
if (item.training_date == date.date()):
if (item.school_id and item.school_id != -1 and item.school_id != tmp_school_id):
continue;
arrive = "1" if datetime.now(UTC).date() >= item.training_date else "0"
allow = "1" if item.allow_registration else "0"
r_l = "1" if reach_limit(item) else "0"
allow_student_attendance = "1" if item.allow_student_attendance else "0"
attendancel_id = item.attendancel_id
status = ""
try:
userObj = request.session.get('user_obj', None)
if PepRegStudent.objects.filter(student=userObj, training=item).exists():
status = PepRegStudent.objects.get(student=userObj, training=item).student_status
except:
status = ""
trainingStartTime = str('{d:%I:%M %p}'.format(d=item.training_time_start)).lstrip('0')
trainingEndTime = str('{d:%I:%M %p}'.format(d=item.training_time_end)).lstrip('0')
itemData = ""
if isday:
trainingStartMinutes = int(trainingStartTime[-5:-3])
if(trainingStartMinutes)<30:
trainingStartHour = trainingStartTime[0:-5] + "00" + trainingStartTime[-3:]
else:
trainingStartHour = trainingStartTime[0:-5] + "30" + trainingStartTime[-3:]
trainingEndMinutes = int(trainingEndTime[-5:-3])
if (trainingEndMinutes) < 30:
trainingEndHour = trainingEndTime[0:-5] + "00" + trainingEndTime[-3:]
else:
trainingEndHour = trainingEndTime[0:-5] + "30" + trainingEndTime[-3:]
trainingStartHours.append(trainingStartHour)
trainingEndHours.append(trainingEndHour)
itemData = "<br/><div>From: " + trainingStartTime + "<br/>\nTo: " + trainingEndTime
#
titlex = item.name + "::" + trainingStartTime + "::" + trainingEndTime
if item.classroom:
titlex = titlex + "::" + item.classroom
if isday: itemData += "<br/>\nClassroom: " + item.classroom
if item.geo_location:
titlex = titlex + "::" + item.geo_location
if isday: itemData += "<br/>\nLocation: " + item.geo_location
if isday: itemData += "</div>"
if (arrive == "0" and allow == "0"):
if (catype == "0" or catype == "4"):
occurrences.append("<span class='alert al_4' titlex='" + titlex + "'>" + item.name + "</span>"+itemData);
elif (arrive == "0" and allow == "1"):
if (status == "" and r_l == "1"):
if (catype == "0" or catype == "5"):
occurrences.append("<span class='alert al_7' titlex='" + titlex + "'>" + item.name + "</span>"+itemData);
else:
if (status == "Registered"):
# checked true
if (catype == "0" or catype == "3"):
tmp_ch = "<input type = 'checkbox' class ='calendar_check_would' training_id='" + str(item.id) + "' checked /> ";
occurrences.append("<label class='alert al_6' titlex='" + titlex + "'>" + tmp_ch + "<span>" + item.name + "</span>"+itemData+"</label>");
else:
# checked false
if (catype == "0" or catype == "2"):
tmp_ch = "<input type = 'checkbox' class ='calendar_check_would' training_id='" + str(item.id) + "' /> ";
occurrences.append("<label class='alert al_5' titlex='" + titlex + "'>" + tmp_ch + "<span>" + item.name + "</span>"+itemData+"</label>");
elif (arrive == "1" and status == "" and allow == "1"):
# The registration date has passed for this training
pass
elif (arrive == "1" and allow_student_attendance == "0"):
# Instructor records attendance.
pass
elif (arrive == "1" and allow_student_attendance == "1"):
if (status == "Attended" or status == "Validated"):
# checked true
if (catype == "0" or catype == "1"):
tmp_ch = "<input type = 'checkbox' class ='calendar_check_attended' training_id='" + str(item.id) + "' attendancel_id='" + attendancel_id + "' checked /> ";
occurrences.append("<label class='alert al_3' titlex='" + titlex + "'>" + tmp_ch + "<span>" + item.name + "</span>"+itemData+"</label>");
else:
# checked false
if (catype == "0" or catype == "3"):
tmp_ch = "<input type = 'checkbox' class ='calendar_check_attended' training_id='" + str(item.id) + "' attendancel_id='" + attendancel_id + "' /> ";
occurrences.append("<label class='alert al_6' titlex='" + titlex + "'>" + tmp_ch + "<span>" + item.name + "</span>"+itemData+"</label>");
if date.__str__() == current_day.__str__():
current = True
rangedates[week].append([day, occurrences, current, trainingStartHours, trainingEndHours])
if (not isweek and not isday):
if len(rangedates[week]) == 7:
rangedates.append([])
week += 1
table_tr_content = ""
if isweek:
colstyle = "style='min-height: 355px !important;'"
elif isday:
colstyle = "style='min-height: 590px !important;'"
else:
colstyle = "style='min-height: 60px;'"
if isday:
dayHours = []
for p in range(2):
for i in range(0, 13):
if((p == 0 and i < 6) or i == 0): continue
if (p == 0 and i < 12):
d = "AM"
elif (p == 1 and i < 12) or (p == 0 and i == 12):
d = "PM"
getHour = str(i) if i > 0 else "12"
if ((p == 0) or i < 6): getHalfHour = getHour + ":30 " + d
getHour += ":00 " + d
dayHours.append(getHour)
if ((p == 0) or i < 6): dayHours.append(getHalfHour)
if (p == 1 and i == 6): break
weekLen = len(rangedates) - 2
for weekNum, week in enumerate(rangedates):
if((not isweek and not isday) and weekNum == weekLen):
addBorder = "border-bottom: 1px #ccc solid;"
else:
addBorder = ""
table_tr_content += "<tr class='calendar-tr-tmp'>"
if isday:
table_tr_content += "<td style='position: relative; height: 100%; width: -moz-calc(2.5%) !important; width: -webkit-calc(2.5%) !important; width: calc(2.5%) !important;'>" \
"<div style='display: flex; flex-direction: column; justify-content: space-between; position: absolute; top:0px; bottom:0px; left:0px; width: 100%;'>";
for dayHour in dayHours:
table_tr_content += "<div style='display: block; width: 100%; box-sizing: border-box; height: 27px; padding: 5px; border-bottom: 1px solid #ccc; text-align: right; padding-right: 50px;'>" + dayHour + "</div>"
table_tr_content += "</div></td>";
for day in week:
if(isweek or isday):
if day[0] != 0: day[0]=day[0].day
class_name = "";
cell_border = "border-right: 1px solid #ccc;border-bottom: 1px solid #ccc;"
if (day[0] == 0):
class_name = "calendarium-empty";
cell_border = ""
elif (day[2]):
class_name = "calendarium-current";
else:
class_name = "calendarium-day";
if(not isday and day[0]):
if(isweek and week[0][0] > day[0]):
nextMonth = "true"
else:
nextMonth = "false"
if(type(week[6][0]) is not datetime):
dateToCompare = week[6][0]
else:
dateToCompare = week[6][0].day
if (isweek and dateToCompare < day[0]):
prevMonth = "true"
else:
prevMonth = "false"
clickFunc = " onclick='pickDayOnClick(event, " + str(day[0]) + ", " + nextMonth + "," + prevMonth + "," + str(dateToCompare) + ")'"
else:
clickFunc = ""
if(not (day[0] == 0 and isweek)):
table_tr_content += "<td class='" + class_name + "' style='position: relative; height: 100%;"+cell_border+"'" + clickFunc +">"
if (day[0]):
table_tr_content += "<div class='calendarium-relative' "+ colstyle +"><span class='calendarium-date'>" + str(day[0]) + "</span>";
if not isday:
for tmp1 in day[1]:
table_tr_content += tmp1;
if isday:
table_tr_content += "<div style='display: flex; flex-direction: column; justify-content: space-between; position: absolute; top:0px; bottom:0px; left:0px; width: 100%;'>";
for dayHour in dayHours:
divAdded = 0
if day[1]:
i = 0
table_tr_content += "<div class='training-row' style='display: block; width: 100%; box-sizing: border-box; padding: 0px; padding-left: 5px; border-bottom: 1px solid #ccc; height: 24px !important; text-align: right;' id='" + dayHour + "'> "
divAdded = 1
for tmp1 in day[1]:
if(day[4][i] != "" and (day[3][i] != day[4][i])):
h = 0
endHour = 0
startHour = int(day[3][i][:day[3][i].index(":")])
startHourAMPM = day[3][i][-2:]
startHour = startHour if(startHourAMPM == "AM" and startHour >= 6) else 6
if((startHourAMPM == "PM" and (startHour == 12 or startHour <= 6)) or (startHourAMPM == "AM" and startHour >= 6)):
endHour = int(day[4][i][:day[4][i].index(":")])
endHourAMPM = day[4][i][-2:]
h = startHour
hourAMPM = "AM"
if(startHourAMPM != endHourAMPM):
endHourLast = endHour if(endHour == 12 or endHour <= 6) else 6
endHour = 12
else:
endHour = endHour if(endHourAMPM == "AM" or endHour == 12 or endHour <= 6) else 6
endHourLast = endHour
while(h <= endHour):
fullHour = str(h) + ":00 " + hourAMPM
midHour = str(h) + ":30 " + hourAMPM
firstHalfHour = int(day[3][i][day[3][i].index(":")+1:day[3][i].index(" ")]) < 30
if ((fullHour == dayHour and firstHalfHour) or (midHour == dayHour and not firstHalfHour)): break
h += 1
if(h == endHour and endHour != endHourLast):
h = 1
endHour = endHourLast
hourAMPM = "PM"
if (h <= endHour):
t = day[3][i][-2:]
dh = day[3][i][:day[3][i].index(":")] if len(day[3][i][:day[3][i].index(":")]) == 2 else "0" + day[3][i][:day[3][i].index(":")]
table_tr_content += "<span class='" + t + " " + dh + " span-" + str(i) + "'>" + tmp1 + "</span>"
i += 1
if ( not divAdded ):
table_tr_content += "<div class='training-row' style='display: block; width: 100%; box-sizing: border-box; padding: 5px; border-bottom: 1px solid #ccc; height: 26px !important; text-align: right;' id='" + dayHour + "'> "
table_tr_content += "</div>"
table_tr_content += "</div>"
table_tr_content += "</div>";
table_tr_content += "</td>";
table_tr_content += "</tr>";
return table_tr_content;
def remove_student(student):
if student.training.type == "pepper_course":
CourseEnrollment.unenroll(student.student, student.training.pepper_course)
CourseEnrollmentAllowed.objects.filter(email=student.student.email,
course_id=student.training.pepper_course).delete()
student.delete()
def register(request):
try:
join = request.POST.get("join", "false") == "true"
training_id = request.POST.get("training_id")
user_id = request.POST.get("user_id")
training = PepRegTraining.objects.get(id=training_id)
if user_id:
student_user = User.objects.get(id=int(user_id))
else:
student_user = request.user
if join:
if reach_limit(training):
raise Exception("Maximum number of users have registered for this training.")
try:
student = PepRegStudent.objects.get(training_id=training_id, student=student_user)
except:
student = PepRegStudent()
student.user_create = request.user
student.date_create = datetime.now(UTC)
student.student = student_user
student.student_status = "Registered"
student.training_id = int(training_id)
student.user_modify = request.user
student.date_modify = datetime.now(UTC)
student.save()
ma_db = myactivitystore()
my_activity = {"GroupType": "PDPlanner", "EventType": "PDTraining_registration", "ActivityDateTime": datetime.utcnow(), "UsrCre": request.user.id,
"URLValues": {"training_id": training.id},
"TokenValues": {"training_id": training.id},
"LogoValues": {"training_id": training.id}}
ma_db.insert_item(my_activity)
if training.type == "pepper_course":
cea, created = CourseEnrollmentAllowed.objects.get_or_create(email=student_user.email,
course_id=training.pepper_course)
cea.is_active = True
cea.save()
CourseEnrollment.enroll(student_user, training.pepper_course)
#akogan
mem = TrainingUsers.objects.filter(user=student_user, training=training)
if not mem.exists():
tu = TrainingUsers(user=student_user, training=training)
tu.save()
else:
student = PepRegStudent.objects.get(training_id=training_id, student=student_user)
remove_student(student)
#akogan
mem = TrainingUsers.objects.filter(user=student_user, training=training)
if mem.exists():
mem.delete()
except Exception as e:
return HttpResponse(json.dumps({'success': False, 'error': '%s' % e}), content_type="application/json")
return HttpResponse(json.dumps({'success': True, 'training_id': training_id}), content_type="application/json")
def set_student_attended(request):
try:
training_id = int(request.POST.get("training_id"))
student_id = int(request.POST.get("student_id"))
yn = request.POST.get("yn", False)
training = PepRegTraining.objects.get(id=training_id)
try:
student = PepRegStudent.objects.get(training_id=training_id, student_id=student_id)
except:
student = PepRegStudent()
student.user_create = request.user
student.date_create = datetime.now(UTC)
student.training = training
student.student = request.user
student.user_modify = request.user
student.date_modify = datetime.now(UTC)
if yn == "true":
student.student_status = "Attended"
if not training.allow_validation:
student.student_credit = training.credits
student.save()
else:
if training.allow_registration:
student.student_status = "Registered"
student.student_credit = 0
student.save()
else:
student.delete()
student = None
if student:
data = {"id": student.id,
"email": student.student.email,
"status": student.student_status,
"is_attended": student.student_status == "Validated" or student.student_status == "Attended",
"is_validated": student.student_status == "Validated",
"student_credit": student.student_credit,
"student_id": student.student_id,
}
else:
data = None
except Exception as e:
db.transaction.rollback()
return HttpResponse(json.dumps({'success': False, 'error': '%s' % e}), content_type="application/json")
return HttpResponse(json.dumps({'success': True, 'data': data}), content_type="application/json")
def set_student_validated(request):
try:
training_id = int(request.POST.get("training_id"))
student_id = int(request.POST.get("student_id"))
yn = request.POST.get("yn", False)
training = PepRegTraining.objects.get(id=training_id)
student = PepRegStudent.objects.get(training_id=training_id, student_id=student_id)
student.user_modify = request.user
student.date_modify = datetime.now(UTC)
if yn == "true":
student.student_status = "Validated"
student.student_credit = training.credits
student.save()
else:
student.student_status = "Attended"
student.student_credit = 0
student.save()
data = {"id": student.id,
"email": student.student.email,
"status": student.student_status,
"is_attended": student.student_status == "Validated" or student.student_status == "Attended",
"is_validated": student.student_status == "Validated",
"student_credit": student.student_credit,
"student_id": student.student_id,
}
except Exception as e:
return HttpResponse(json.dumps({'success': False, 'error': '%s' % e}), content_type="application/json")
return HttpResponse(json.dumps({'success': True, 'data': data}), content_type="application/json")
def student_list(request):
print "student_list"
logging.warning('student_list')
try:
training_id = request.POST.get("training_id")
print training_id
training = PepRegTraining.objects.get(id=training_id)
students = PepRegStudent.objects.filter(training_id=training_id)
arrive = datetime.now(UTC).date() >= training.training_date
student_limit = reach_limit(training) # akogan
rows = []
for item in students:
rows.append({
"id": item.id,
"email": item.student.email,
"status": item.student_status,
"is_attended": item.student_status == "Validated" or item.student_status == "Attended",
"is_validated": item.student_status == "Validated",
"student_credit": item.student_credit,
"student_id": item.student_id,
})
except Exception as e:
return HttpResponse(json.dumps({'success': False, 'error': '%s' % e}), content_type="application/json")
last_date = training.last_date
if last_date:
last_date = str('{d:%m/%d/%Y}'.format(d=training.last_date));
return HttpResponse(json.dumps({'success': True,
'rows': rows,
'allow_attendance': training.allow_attendance,
'allow_validation': training.allow_validation,
'allow_registration': training.allow_registration,
'training_id': training.id,
'training_name': training.name,
'last_date': last_date,
'training_type': training.type,
'training_date': str('{d:%m/%d/%Y}'.format(d=training.training_date)),
'arrive': arrive,
'student_limit': student_limit # akogan
}),
content_type="application/json")
def get_courses_drop(state_name, district_code):
matches_state = [None, "ALL", district_code]
matches_district = [None, "ALL", state_name]
courses = modulestore().collection.find({'_id.category': 'course', 'metadata.display_state': state_name})
if courses.count() > 0:
matches_state.append('ALL')
courses = modulestore().collection.find({'_id.category': 'course', 'metadata.display_district': district_code})
if courses.count() > 0:
matches_district.append('ALL')
flt = {
'_id.category': 'course',
'metadata.display_state': {'$in': matches_district},
'metadata.display_district': {'$in': matches_state}
}
courses = modulestore().collection.find(flt).sort("metadata.display_name", pymongo.ASCENDING)
courses = modulestore()._load_items(list(courses), 0)
return courses
def show_map(request):
training_id = request.GET.get("training_id")
district_id = request.GET.get("district_id")
if (district_id):
r = list()
district = District.objects.get(id=district_id)
if district:
data = School.objects.filter(district=district).order_by('name')
for item in data:
r.append({'id': item.id, 'name': item.name, 'code': item.code});
return HttpResponse(json.dumps(r), content_type="application/json")
else:
training = PepRegTraining.objects.get(id=training_id)
return render_to_response('administration/pepreg_map.html', {"training": training})
def delete_student(request):
try:
id = int(request.POST.get("id"))
user = PepRegStudent.objects.get(id=id).student
remove_student(PepRegStudent.objects.get(id=id))
TrainingUsers.objects.filter(user=user).delete()
except Exception as e:
db.transaction.rollback()
return HttpResponse(json.dumps({'success': False, 'error': '%s' % e}), content_type="application/json")
return HttpResponse(json.dumps({'success': True}), content_type="application/json")
def download_students_excel(request):
training_id = request.GET.get("training_id")
last_date = request.GET.get("last_date")
flag_pdf = request.GET.get("pdf")
training = PepRegTraining.objects.get(id=training_id)
if(last_date):
name_dict = {};
_res = "0";
try:
EMAIL_TEMPLATE_DICT = {'training_email': ('emails/training_student_email_subject.txt', 'emails/training_student_email_message.txt')}
subject_template, message_template = EMAIL_TEMPLATE_DICT.get("training_email", (None, None))
email_students = [];
for reg_stu in PepRegStudent.objects.filter(training_id=training_id, student_status="Registered"):
userx = User.objects.get(id=reg_stu.student_id)
email_students.append(userx.email);
param_dict = {};
param_dict["training_name"] = training.name;
param_dict["training_date"] = str('{d:%m-%d-%Y}'.format(d=training.training_date));
param_dict["first_name"] = userx.first_name;
param_dict["last_name"] = userx.last_name;
param_dict["district_name"] = training.district.name;
param_dict["training_time_start"] = str('{d:%I:%M %p}'.format(d=training.training_time_start)).lstrip('0');
if training.classroom == "" and training.geo_location == "":
param_dict["classroom"] = "";
param_dict["geo_location"] = "";
elif not training.classroom == "" and training.geo_location == "":
param_dict["classroom"] = training.classroom;
param_dict["geo_location"] = "";
elif training.classroom == "" and not training.geo_location == "":
param_dict["classroom"] = "";
param_dict["geo_location"] = training.geo_location;
else:
param_dict["classroom"] = training.classroom + ", ";
param_dict["geo_location"] = training.geo_location;
subject = render_to_string(subject_template, param_dict)
message = render_to_string(message_template, param_dict)
# _res = send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, [userx.email], fail_silently=False)
msg = EmailMultiAlternatives(subject, message, settings.DEFAULT_FROM_EMAIL, [userx.email])
msg.content_subtype = "html"
msg.send()
training.last_date = last_date;
training.save()
_res = "1";
except Exception as e:
_res = '%s' % e
name_dict["_res"] = _res;
return HttpResponse(json.dumps(name_dict), content_type="application/json");
elif(flag_pdf):
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename="' + training.name + flag_pdf + '.pdf"'
buffer = BytesIO()
c = canvas.Canvas(buffer, pagesize=A4)
# ------------------------------------------------------------------------------------logo
try:
logo = ImageReader("https://" + request.get_host() + '/static/images/pd_pdf2.png')
except:
logo = ImageReader("http://" + request.get_host() + '/static/images/pd_pdf2.png')
c.drawImage(logo, 330, 750, 200, 73);
c.setFont("Helvetica", 20)
c.drawString(370, 710, "PD Planner")
c.drawString(370, 680, "SignUp")
styleSheet = getSampleStyleSheet()
style = styleSheet['BodyText']
style.fontName = "Helvetica"
style.fontSize = 16
style.leading = 15
training_name = "Training Name: " + training.name
content_width = stringWidth(training_name, "Helvetica", 16)
p = Paragraph(training_name, style)
w1 = 520
h1 = 800
w2, h2 = p.wrap(w1, h1)
p.drawOn(c, 50, 625)
c.setFont("Helvetica", 16)
c.drawString(50, 600, "Training Date: " + str('{d:%m/%d/%Y}'.format(d = training.training_date)))
# c.drawString(50, 578, "Instructor:")
instructor_y = 575
tmp_flag = 0;
instructor_name = "Instructor: ";
for reg_stu in PepRegInstructor.objects.filter(training_id=training_id):
if tmp_flag == 0:
tmp_flag += 1;
instructor_name += reg_stu.instructor.first_name + " " + reg_stu.instructor.last_name;
else:
instructor_name += ", " + reg_stu.instructor.first_name + " " + reg_stu.instructor.last_name;
style1 = styleSheet['BodyText']
style1.fontName = "Helvetica"
style1.fontSize = 16
style1.leading = 15
p1 = Paragraph(instructor_name, style1)
w2 = 520
h2 = 800
w2, h2 = p1.wrap(w2, h2)
if (h2 == 15):
p1.drawOn(c, 50, instructor_y + 3)
elif (h2 == 30):
p1.drawOn(c, 50, instructor_y - 13)
elif (h2 == 45):
p1.drawOn(c, 50, instructor_y - 23)
# ------------------------------------------------------------------------------------head
c.setFillColor(colors.lawngreen) # C7,F4,65
base_table_y = 510;
c.rect(10, base_table_y, 80, 30, fill=1)
c.rect(90, base_table_y, 80, 30, fill=1)
c.rect(170, base_table_y, 90, 30, fill=1)
c.rect(260, base_table_y, 90, 30, fill=1)
c.rect(350, base_table_y, 70, 30, fill=1)
c.rect(420, base_table_y, 160, 30, fill=1)
c.setStrokeColor(colors.black)
c.setFillColor(colors.black) # C7,F4,65
c.setFont("Helvetica", 10)
c.drawCentredString(50, base_table_y + 10, "First Name")
c.drawCentredString(130, base_table_y + 10, "Last Name")
c.drawCentredString(215, base_table_y + 10, "Email Address")
c.drawCentredString(305, base_table_y + 10, "School Site")
c.drawCentredString(385, base_table_y + 10, "Employee ID")
c.drawCentredString(505, base_table_y + 10, "Signature")
# ------------------------------------------------------------------------------------tr
base_font_size = 8;
ty = base_table_y;
student_index = 0;
studentList = PepRegStudent.objects.filter(training_id=training_id);
lastpos = len(studentList) - 1;
table_style = styleSheet['BodyText']
table_style.fontName = "Helvetica"
table_style.fontSize = base_font_size
table_style.leading = 10
c.setFont("Helvetica", base_font_size)
for reg_stu in studentList:
tr_height = 30
pro = UserProfile.objects.get(user_id=reg_stu.student.id)
if (pro):
tmp_name = "";
try:
tmp_name = pro.school.name
except:
tmp_name = ""
if (tmp_name.find("Elementary") > -1):
tmp_name = tmp_name.split("Elementary")[0];
elif (tmp_name.find("Middle") > -1):
tmp_name = tmp_name.split("Middle")[0];
elif (tmp_name.find("High") > -1):
tmp_name = tmp_name.split("High")[0];
tmp_email_width = stringWidth(tmp_name, "Helvetica", base_font_size)
if (tmp_email_width > 80):
p = Paragraph(tmp_name, table_style)
w2, h2 = p.wrap(80, 100)
h2 += 10
if (h2 > tr_height):
tr_height = h2
p.drawOn(c, 265, ty - tr_height + 5)
else:
c.drawCentredString(305, ty - 15, tmp_name)
ty -= tr_height;
c.rect(10, ty, 80, tr_height, fill=0)
c.rect(90, ty, 80, tr_height, fill=0)
c.rect(170, ty, 90, tr_height, fill=0)
c.rect(260, ty, 90, tr_height, fill=0)
c.rect(350, ty, 70, tr_height, fill=0)
c.rect(420, ty, 160, tr_height, fill=0)
if (reg_stu.student.first_name):
tmp_email_width = stringWidth(reg_stu.student.first_name, "Helvetica", base_font_size)
if (tmp_email_width > 75):
frist_tmp1 = int(len(reg_stu.student.first_name) / 3)
while 1:
frist_tmp2 = stringWidth(reg_stu.student.first_name[0: frist_tmp1], "Helvetica", base_font_size)
if(frist_tmp2 > 70):
break;
else:
frist_tmp1 += 1
c.drawString(13, ty + tr_height - 13, reg_stu.student.first_name[0: frist_tmp1])
c.drawString(13, ty + tr_height - 23, reg_stu.student.first_name[frist_tmp1:])
else:
c.drawCentredString(50, ty + tr_height - 15, reg_stu.student.first_name)
if (reg_stu.student.last_name):
tmp_email_width = stringWidth(reg_stu.student.last_name, "Helvetica", base_font_size)
if (tmp_email_width > 75):
frist_tmp1 = int(len(reg_stu.student.last_name) / 3)
while 1:
frist_tmp2 = stringWidth(reg_stu.student.last_name[0: frist_tmp1], "Helvetica", base_font_size)
if (frist_tmp2 > 70):
break;
else:
frist_tmp1 += 2
c.drawString(93, ty + tr_height - 13, reg_stu.student.last_name[0: frist_tmp1])
c.drawString(93, ty + tr_height - 23, reg_stu.student.last_name[frist_tmp1:])
else:
c.drawCentredString(130, ty + tr_height - 15, reg_stu.student.last_name)
if (reg_stu.student.email):
tmp_email_width = stringWidth(reg_stu.student.email, "Helvetica", base_font_size)
if (tmp_email_width > 80):
frist_tmp1 = len(reg_stu.student.email) / 2
while 1:
frist_tmp2 = stringWidth(reg_stu.student.email[0: frist_tmp1], "Helvetica", base_font_size)
if (frist_tmp2 > 80):
break;
else:
frist_tmp1 += 2
c.drawString(173, ty + tr_height - 13, reg_stu.student.email[0: frist_tmp1])
c.drawString(173, ty + tr_height - 23, reg_stu.student.email[frist_tmp1:])
else:
c.drawCentredString(215, ty + tr_height - 15, reg_stu.student.email)
if student_index == lastpos:
c.showPage()
else:
if (ty < 60):
ty = 790;
pdf_first_flag = False;
c.showPage()
c.setStrokeColor(colors.black)
c.setFillColor(colors.black) # C7,F4,65s
c.setFont("Helvetica", base_font_size)
student_index += 1;
c.save()
pdf = buffer.getvalue()
buffer.close()
response.write(pdf)
return response
else:
students = PepRegStudent.objects.filter(training_id=training_id)
output = StringIO()
workbook = xlsxwriter.Workbook(output, {'in_memory': True})
worksheet = workbook.add_worksheet()
FIELDS = ["email", "status", "attendance", "validation", "credits"]
TITLES = ["User", "Status", "Attendance", "Validation", "Credits"]
for i, k in enumerate(TITLES):
worksheet.write(0, i, k)
row = 1
for item in students:
if training.allow_attendance:
attendance = "Y" if (item.student_status == "Validated" or item.student_status == "Attended") else "N"
else:
attendance = ""
if training.allow_validation:
validation = "Y" if (item.student_status == "Validated") else "N"
else:
validation = ""
data_row = {'email': item.student.email,
'status': item.student_status,
'attendance': attendance,
'validation': validation,
'credits': item.student_credit
}
for i, k in enumerate(FIELDS):
worksheet.write(row, i, data_row[k])
row = row + 1
response = HttpResponse(content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
response['Content-Disposition'] = 'attachment; filename=%s_users.xlsx' % (training.name)
workbook.close()
response.write(output.getvalue())
return response
def download_students_pdf(request):
training_id = request.GET.get("training_id")
training = PepRegTraining.objects.get(id=training_id)
students = PepRegStudent.objects.filter(training_id=training_id)
output = StringIO()
workbook = xlsxwriter.Workbook(output, {'in_memory': True})
worksheet = workbook.add_worksheet()
FIELDS = ["email", "status", "attendance", "validation", "credits"]
TITLES = ["User", "Status", "Attendance", "Validation", "Credits"]
for i, k in enumerate(TITLES):
worksheet.write(0, i, k)
row = 1
for item in students:
if training.allow_attendance:
attendance = "Y" if (item.student_status == "Validated" or item.student_status == "Attended") else "N"
else:
attendance = ""
if training.allow_validation:
validation = "Y" if (item.student_status == "Validated") else "N"
else:
validation = ""
data_row = {'email': item.student.email,
'status': item.student_status,
'attendance': attendance,
'validation': validation,
'credits': item.student_credit
}
for i, k in enumerate(FIELDS):
worksheet.write(row, i, data_row[k])
row = row + 1
response = HttpResponse(content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
response['Content-Disposition'] = 'attachment; filename=%s_users.xlsx' % (training.name)
workbook.close()
response.write(output.getvalue())
return response
| agpl-3.0 | -2,855,808,094,077,198,000 | 40.972465 | 276 | 0.534442 | false |
CNAMmaster/Tweeples | tool_collect_tweets.py | 1 | 5792 | #!/usr/bin/env python
# Sample script that collects tweets matching a string.
#
# This file is part of the Tweeples collection of scripts.
#
# Tweeples is free software: you can redistribute it and/or modify it
# under the terms of the BSD license. For the full terms of the license
# see the file `COPYING' in this directory.
'''Collect tweets matching a text pattern and store them
continuously in JSON-formatted lines of a local file.'''
__author__ = 'Giorgos Keramidas <[email protected]>'
import argparse
import errno
import json
import os
import sys
import twitter
from twitter.api import TwitterHTTPError
from util import error, message, warning
def search(text, max_pages=10, results_per_page=100):
"""Generator for searching 'text' in Twitter content
Search the public Twitter timeline for tweets matching a 'text' string,
which can also be a hash tag, and yield a batch of matched tweets every
time we have some results.
Args:
text str, the text to search for in Twitter. This can
be a plain text string or a '#hashtag' to look
for tweets of this topic only.
max_pages int, maximum number of result 'pages' to obtain
from Twitter's backlog of archived tweets. When
not specified, default to 10 pages.
results_per_page int, maximum number of results per page to fetch
from Twitter's backlog of archived tweets. When
not specified, default to 100 tweets per page.
Returns:
An array of dicts. Every dict in the returned array is a 'result' from
twitter.Twitter.search and represents a single tweet.
"""
while True:
t = twitter.Twitter(domain="search.twitter.com")
for page in range(1, max_pages + 1):
yield t.search(q=text, rpp=results_per_page, page=page)['results']
def preload_tweets(filename):
"""Preload previously seen tweets from a text file.
Args:
filename str, Name of the file where we preload tweets from.
Returns:
A set() containing all the numeric 'id' attributes of tweets we have
already seen.
"""
if not filename:
return set()
try:
seen = set()
for id in (tweet['id'] for tweet in
(json.loads(line) for line in
file(filename, 'r').readlines())):
seen.add(id)
except Exception, e:
seen = set() # Avoid returning partial results on error
return seen
def streamsearch(ofile, text, max_pages=10, results_per_page=100):
"""Stream the results of searching for 'text' to the 'ofile' output file
Args:
ofile str, the name of a file where we will write any tweets
we find. Tweets are written in JSON format, with every
tweet being stored in a separate line as a Python dict.
text str, the text to search for in Twitter. This can
be a plain text string or a '#hashtag' to look
for tweets of this topic only.
max_pages int, maximum number of result 'pages' to obtain
from Twitter's backlog of archived tweets. When
not specified, default to 10 pages.
results_per_page int, maximum number of results per page to fetch
from Twitter's backlog of archived tweets. When
not specified, default to 100 tweets per page.
Returns:
None
"""
# Load the id of already seen tweets, if there are any.
ofilename = ofile or 'standard output'
seen = ofile and preload_tweets(ofile) or set()
if seen:
message('%d tweets preloaded from %s', len(seen), ofilename)
try:
ostream = ofile and file(ofile, 'a+') or sys.stdout
for matches in search(text, max_pages=max_pages,
results_per_page=results_per_page):
newmatches = 0
for tweet in matches:
(tid, tuser, text) = (tweet['id'], tweet['from_user'],
tweet['text'])
if not tid in seen:
newmatches += 1
seen.add(tid)
print >> ostream, json.dumps(tweet)
if newmatches > 0:
message('%d new tweets logged at %s', newmatches, ofilename)
ostream.close()
except IOError, e:
if ostream and ostream != sys.stdout:
ostream.close()
warning('Error writing at file "%s". %s', ofilename, e)
if __name__ == '__main__':
json_filename = None # Where to store matching tweets
lookup_text = None # Text to search for
# Parse command-line args for output file name.
parser = argparse.ArgumentParser(description=(
'Collect tweets matching a text pattern and store them'
'continuously in JSON-formatted lines of a local file.'))
parser.add_argument('-o', '--output', metavar='FILE', type=str,
default=None, help='output file name')
parser.add_argument('TEXT', nargs='+', type=str, default=None,
help='text to search for in tweet content')
args = parser.parse_args()
json_filename = args.output # Where to store matching tweets
lookup_text = ' '.join(args.TEXT) # Text to search for
# Keep searching for tweets, until manually interrupted.
while True:
try:
streamsearch(json_filename, lookup_text)
except TwitterHTTPError, e:
warning('Skipping HTTP error %s [...]', str(e).split('\n')[0])
pass
| bsd-2-clause | 1,034,474,566,967,908,600 | 39.222222 | 79 | 0.597203 | false |
EuropeanSocialInnovationDatabase/ESID-main | ESIDcrawlers/ESIDcrawlers/spiders/GeneralWebsite.py | 1 | 9315 |
from HTMLParser import HTMLParser
import MySQLdb
from scrapy.linkextractors import LinkExtractor
from lxml.html.clean import Cleaner
from lxml.html.soupparser import fromstring
from lxml.etree import tostring
import re
import BeautifulSoup
from scrapy.spiders import Rule, CrawlSpider
from database_access import *
import requests
import json
from urlparse import urlparse, urljoin
from pymongo import MongoClient
class SIScrapedItem():
RelatedTo = ""
Name = ""
DatabaseID = ""
Date = ""
URL = ""
Content = ""
Text = ""
PageTitle = ""
class MLStripper(HTMLParser):
def __init__(self):
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ' '.join(self.fed)
def strip_tags(html):
s = MLStripper()
s.feed(html)
return s.get_data()
class GeneralWebsitespider(CrawlSpider):
name = "GeneralWebsite"
allowed_domains = ["fablab.hochschule-rhein-waal.de"]
start_urls = []
def __init__(self, crawl_pages=True, moreparams=None, *args, **kwargs):
super(GeneralWebsitespider, self).__init__(*args, **kwargs)
# Set the class member from here
if (crawl_pages is True):
# Then recompile the Rules
self.allowed_domains = self.get_allowed()
self.start_urls = self.get_starting_urls()
regs = []
for dom in self.start_urls:
regs.append(dom+".*")
GeneralWebsitespider.rules = (Rule(LinkExtractor(allow=regs), callback="parse_start_url", follow=True),)
super(GeneralWebsitespider, self)._compile_rules()
self.moreparams = moreparams
def get_allowed(self):
allowed_domains = []
pattern = "[a-zA-Z0-9]*[\.]{0,1}[a-zA-Z0-9]+[\.][a-zA-Z0-9]{0,4}"
self.db = MySQLdb.connect(host, username, password, database, charset='utf8')
self.cursor = self.db.cursor()
sql = "Select idActors,ActorName,ActorWebsite from Actors where DataSources_idDataSources>16"
self.cursor.execute(sql)
results = self.cursor.fetchall()
for res in results:
ArtWeb = res[2]
if ArtWeb== None or "vk.com" in ArtWeb.lower() or "youtube" in ArtWeb.lower() or "twitter" in ArtWeb.lower() or "linkedin" in ArtWeb.lower() \
or "vimeo" in ArtWeb.lower() or "instagram" in ArtWeb.lower() or "plus.google" in ArtWeb.lower() or "facebook.com" in ArtWeb.lower() \
or "pinterest" in ArtWeb.lower() or "meetup" in ArtWeb.lower() or "wikipedia" in ArtWeb.lower():
continue
parsed_uri = urlparse(ArtWeb)
domain = '{uri.netloc}/'.format(uri=parsed_uri).replace("/","").replace("www.","")
prog = re.compile(pattern)
result = prog.match(domain)
if result == None:
continue
allowed_domains.append(domain)
sql = "Select idProjects,ProjectName,ProjectWebpage from Projects where DataSources_idDataSources>16"
self.cursor.execute(sql)
results = self.cursor.fetchall()
for res in results:
ArtWeb = res[2]
if ArtWeb == None or "vk.com" in ArtWeb.lower() or "youtube" in ArtWeb.lower() or "twitter" in ArtWeb.lower() or "linkedin" in ArtWeb.lower() \
or "vimeo" in ArtWeb.lower() or "instagram" in ArtWeb.lower() or "plus.google" in ArtWeb.lower() or "facebook.com" in ArtWeb.lower()\
or "pinterest" in ArtWeb.lower() or "meetup" in ArtWeb.lower()or "wikipedia" in ArtWeb.lower():
continue
parsed_uri = urlparse(ArtWeb)
try:
domain = '{uri.netloc}/'.format(uri=parsed_uri).replace("/", "").replace("www.", "")
prog = re.compile(pattern)
result = prog.match(domain)
if result == None:
continue
allowed_domains.append(domain)
except:
print "Domain with special character: "+ArtWeb
return allowed_domains
def get_starting_urls(self):
pattern = "http[s]{0,1}://[a-zA-Z0-9]*[\.]{0,1}[a-zA-Z0-9]+[\.][a-zA-Z0-9]{0,4}"
start_urls = []
self.db = MySQLdb.connect(host, username, password, database, charset='utf8')
self.cursor = self.db.cursor()
sql = "Select idActors,ActorName,ActorWebsite from Actors where DataSources_idDataSources>16"
self.cursor.execute(sql)
results = self.cursor.fetchall()
urls = []
for res in results:
idActor = res[0]
ArtName = res[1]
ArtWeb = res[2]
if ArtWeb== None or "vk.com" in ArtWeb.lower() or "youtube" in ArtWeb.lower() or "twitter" in ArtWeb.lower() or "linkedin" in ArtWeb.lower() \
or "vimeo" in ArtWeb.lower() or "instagram" in ArtWeb.lower() or "plus.google" in ArtWeb.lower() or "pinterest" in ArtWeb.lower() :
continue
prog = re.compile(pattern)
result = prog.match(ArtWeb.lower())
if result == None:
continue
start_urls.append(ArtWeb)
sql = "Select idProjects,ProjectName,ProjectWebpage from Projects where DataSources_idDataSources>16"
self.cursor.execute(sql)
results = self.cursor.fetchall()
urls = []
for res in results:
idActor = res[0]
ArtName = res[1]
ArtWeb = res[2]
if ArtWeb == None or "vk.com" in ArtWeb.lower() or "youtube" in ArtWeb.lower() or "twitter" in ArtWeb.lower() or "linkedin" in ArtWeb.lower() \
or "vimeo" in ArtWeb.lower() or "instagram" in ArtWeb.lower() or "plus.google" in ArtWeb.lower():
continue
prog = re.compile(pattern)
result = prog.match(ArtWeb.lower())
if result == None:
continue
start_urls.append(ArtWeb)
return start_urls
def parse_start_url(self, response):
source_page = response.url
print source_page
if "facebook.com" in source_page or "vk.com" in source_page or "youtube.com" in source_page or "twitter.com" in source_page or \
"linkedin" in source_page or "vimeo" in source_page or "instagram" in source_page or "google" in source_page or "github" in source_page\
or "pinterest" in source_page:
return
all_page_links = response.xpath('//a/@href').extract()
item = SIScrapedItem()
item.URL = source_page
ptitle = response.xpath('//title').extract()
if len(ptitle)>0:
item.PageTitle = strip_tags(response.xpath('//title').extract()[0])
else:
item.PageTitle = ""
item.Content = response.body
try:
s = fromstring(response.body)
cleaner = Cleaner()
cleaner.javascript = True # This is True because we want to activate the javascript filter
cleaner.style = True
s2 = cleaner.clean_html(s)
inner_html = tostring(s2)
item.Text = strip_tags(inner_html)
except:
inner_html = BeautifulSoup.BeautifulSoup(response.body).text
item.Text = strip_tags(inner_html)
parsed_uri = urlparse(item.URL)
domain = '{uri.netloc}/'.format(uri=parsed_uri).replace("/", "").replace("www.", "")
isActor = False
find_act_sql = "Select idActors,ActorName,ActorWebsite from Actors where ActorWebsite like '%" + domain + "%'"
self.cursor.execute(find_act_sql)
results = self.cursor.fetchall()
isActor = False
for res in results:
item.RelatedTo = "Actor"
item.DatabaseID = res[0]
item.Name = res[1]
isActor = True
print "This is Actor with domain "+domain
print item.Name
if isActor == False:
find_pro_sql = "Select idProjects,ProjectName,ProjectWebpage from Projects where ProjectWebpage like '%" + domain + "%'"
self.cursor.execute(find_pro_sql)
results = self.cursor.fetchall()
for res in results:
item.RelatedTo = "Project"
item.DatabaseID = res[0]
item.Name = res[1]
print "This is Project with domain " + domain
print item.Name
if(item.DatabaseID == None or item.DatabaseID==""):
return
client = MongoClient()
db = client.ESID
result = db.projects_actors2.insert_one(
{
"relatedTo": item.RelatedTo,
"mysql_databaseID": str(item.DatabaseID),
"name": item.Name,
"url": item.URL,
"page_title": item.PageTitle,
"content": item.Content,
"text": item.Text
}
)
# Convert each Relative page link to Absolute page link -> /abc.html -> www.domain.com/abc.html and then send Request object
# for relative_link in all_page_links:
#
# print "relative link procesed:" + relative_link
# parsed_uri = urlparse(source_page)
yield item | gpl-3.0 | -9,137,522,277,027,694,000 | 40.039648 | 160 | 0.575309 | false |
uros-sipetic/PyLOH | bin/seg2bed.py | 1 | 2149 | #!/usr/bin/env python
#=======================================================================================================================
# Created on 2013-09-28
# @author: Yi Li
#
# PyLOH
# BICseq2bed.py
#
# Copyright (c) 2013 Yi Li <[email protected]>
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of GNU GPL v2.0 (see the file LICENSE included with the distribution).
#
#=======================================================================================================================
#
# Modified on 2014-08-11
#
# @author: Yi Li
#
#=======================================================================================================================
import argparse
CHROM_LIST = ['chr1', 'chr2', 'chr3', 'chr4', 'chr5', 'chr6', 'chr7', 'chr8',
'chr9', 'chr10', 'chr11', 'chr12', 'chr13', 'chr14', 'chr15',
'chr16', 'chr17', 'chr18', 'chr19', 'chr20', 'chr21', 'chr22',
'1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12',
'13', '14', '15', '16', '17', '18', '19', '20', '21', '22']
def main():
parser = argparse.ArgumentParser(description='Converting DNAcopy/BICseq segments file to BED file')
parser.add_argument('inseg', help='''Input segments file from DNAcopy/BICseq.''')
parser.add_argument('outBED', help='''Output bed file of segments.''')
parser.add_argument('--seg_length', default=1000000, type=int,
help='''Minimum length (bp) required for each segment. Default is 1e6.''')
args = parser.parse_args()
infile = open(args.inseg)
outfile = open(args.outBED, 'w')
for line in infile:
if line[0:5] == 'chrom':
continue
chrom, start, end = line.strip('\n').split('\t')[0:3]
seg_length = int(float(end)) - int(float(start)) + 1
if seg_length < args.seg_length or chrom not in CHROM_LIST:
continue
outfile.write('\t'.join([chrom, start, end]) + '\n')
infile.close()
outfile.close()
if __name__ == '__main__':
main()
| gpl-2.0 | -4,049,469,077,399,976,400 | 34.229508 | 120 | 0.455561 | false |
dwillmer/blaze | blaze/compute/tests/test_postgresql_compute.py | 1 | 4193 | import itertools
from datetime import timedelta
import pytest
sa = pytest.importorskip('sqlalchemy')
pytest.importorskip('psycopg2')
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from odo import odo, resource, drop, discover
from blaze import symbol, compute, concat
names = ('tbl%d' % i for i in itertools.count())
@pytest.fixture
def url():
return 'postgresql://postgres@localhost/test::%s' % next(names)
@pytest.yield_fixture
def sql(url):
try:
t = resource(url, dshape='var * {A: string, B: int64}')
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
t = odo([('a', 1), ('b', 2)], t)
try:
yield t
finally:
drop(t)
@pytest.yield_fixture
def sql_with_dts(url):
try:
t = resource(url, dshape='var * {A: datetime}')
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
t = odo([(d,) for d in pd.date_range('2014-01-01', '2014-02-01')], t)
try:
yield t
finally:
drop(t)
@pytest.yield_fixture
def sql_two_tables():
dshape = 'var * {a: int32}'
try:
t = resource(url(), dshape=dshape)
u = resource(url(), dshape=dshape)
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
try:
yield u, t
finally:
drop(t)
drop(u)
@pytest.yield_fixture
def sql_with_float(url):
try:
t = resource(url, dshape='var * {c: float64}')
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
try:
yield t
finally:
drop(t)
def test_postgres_create(sql):
assert odo(sql, list) == [('a', 1), ('b', 2)]
def test_postgres_isnan(sql_with_float):
data = (1.0,), (float('nan'),)
table = odo(data, sql_with_float)
sym = symbol('s', discover(data))
assert odo(compute(sym.isnan(), table), list) == [(False,), (True,)]
def test_insert_from_subselect(sql_with_float):
data = pd.DataFrame([{'c': 2.0}, {'c': 1.0}])
tbl = odo(data, sql_with_float)
s = symbol('s', discover(data))
odo(compute(s[s.c.isin((1.0, 2.0))].sort(), tbl), sql_with_float),
tm.assert_frame_equal(
odo(sql_with_float, pd.DataFrame).iloc[2:].reset_index(drop=True),
pd.DataFrame([{'c': 1.0}, {'c': 2.0}]),
)
def test_concat(sql_two_tables):
t_table, u_table = sql_two_tables
t_data = pd.DataFrame(np.arange(5), columns=['a'])
u_data = pd.DataFrame(np.arange(5, 10), columns=['a'])
odo(t_data, t_table)
odo(u_data, u_table)
t = symbol('t', discover(t_data))
u = symbol('u', discover(u_data))
tm.assert_frame_equal(
odo(
compute(concat(t, u).sort('a'), {t: t_table, u: u_table}),
pd.DataFrame,
),
pd.DataFrame(np.arange(10), columns=['a']),
)
def test_concat_invalid_axis(sql_two_tables):
t_table, u_table = sql_two_tables
t_data = pd.DataFrame(np.arange(5), columns=['a'])
u_data = pd.DataFrame(np.arange(5, 10), columns=['a'])
odo(t_data, t_table)
odo(u_data, u_table)
# We need to force the shape to not be a record here so we can
# create the `Concat` node with an axis=1.
t = symbol('t', '5 * 1 * int32')
u = symbol('u', '5 * 1 * int32')
with pytest.raises(ValueError) as e:
compute(concat(t, u, axis=1), {t: t_table, u: u_table})
# Preserve the suggestion to use merge.
assert "'merge'" in str(e.value)
def test_timedelta_arith(sql_with_dts):
delta = timedelta(days=1)
dates = pd.Series(pd.date_range('2014-01-01', '2014-02-01'))
sym = symbol('s', discover(dates))
assert (
odo(compute(sym + delta, sql_with_dts), pd.Series) == dates + delta
).all()
assert (
odo(compute(sym - delta, sql_with_dts), pd.Series) == dates - delta
).all()
def test_coerce_bool_and_sum(sql):
n = sql.name
t = symbol(n, discover(sql))
expr = (t.B > 1.0).coerce(to='int32').sum()
result = compute(expr, sql).scalar()
expected = odo(compute(t.B, sql), pd.Series).gt(1).sum()
assert result == expected
| bsd-3-clause | 2,335,371,108,966,097,000 | 25.371069 | 77 | 0.57429 | false |
pariahsoft/Dennis | commands/join.py | 1 | 2591 | ########################################
## Adventure Bot "Dennis" ##
## commands/join.py ##
## Copyright 2012-2013 PariahSoft LLC ##
########################################
## **********
## Permission is hereby granted, free of charge, to any person obtaining a copy
## of this software and associated documentation files (the "Software"), to
## deal in the Software without restriction, including without limitation the
## rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
## sell copies of the Software, and to permit persons to whom the Software is
## furnished to do so, subject to the following conditions:
##
## The above copyright notice and this permission notice shall be included in
## all copies or substantial portions of the Software.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
## FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
## IN THE SOFTWARE.
## **********
################
# Join Command #
################
import hashlib
from helpers import *
from database import get, put
from help import C_HELP
def C_JOIN(S, DB, sender, args):
if len(args) != 1:
C_HELP(S, DB, sender, ["join"])
return
rows = get(DB, "SELECT username FROM players WHERE username='{0}'".format(sender))
if not len(rows): # It's a new player.
put(DB, "INSERT INTO players (username, name, desc, room, pass) VALUES ('{0}', '{0}', ' ', '0', '{1}')".format(sender, hashlib.sha256(args[0]).hexdigest()))
send(S, sender, "Created new player \"{0}\". Your password is \"{1}\".".format(sender, args[0]))
passhash = get(DB, "SELECT pass FROM players WHERE username='{0}'".format(sender))
if passhash[0][0] == hashlib.sha256(args[0]).hexdigest(): # Authenticated successfully.
setonline(DB, sender, 1)
roomid = getroom(DB, sender)
enterroom(DB, roomid, sender) # Add player to their room.
playerinfo = playerstat(DB, sender)
send(S, sender, "Welcome, {0}.".format(playerinfo["name"])) # Greet player.
announce(S, DB, "{0} joined the game.".format(playerinfo["name"]))
announce_room(S, DB, roomid, "{0} entered the room.".format(playerinfo["name"]))
else: # Bad login.
send(S, sender, "Incorrect password for registered player.")
| mit | -1,338,686,518,646,774,300 | 40.790323 | 158 | 0.660363 | false |
czcorpus/kontext | lib/conclib/errors.py | 1 | 2348 | # Copyright(c) 2021 Charles University in Prague, Faculty of Arts,
# Institute of the Czech National Corpus
# Copyright(c) 2021 Tomas Machalek <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2
# dated June, 1991.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
from typing import Optional
import re
class ConcordanceException(Exception):
pass
class ConcordanceQuerySyntaxError(ConcordanceException):
pass
class ConcordanceQueryParamsError(ConcordanceException):
pass
class EmptyParallelCorporaIntersection(ConcordanceException):
pass
class UnknownConcordanceAction(ConcordanceException):
pass
class ConcCalculationStatusException(ConcordanceException):
def __init__(self, msg, orig_error=None):
super(ConcCalculationStatusException, self).__init__('{0}: {1}'.format(msg, orig_error))
self._orig_error = orig_error
@property
def orig_error(self):
return self._orig_error
class ConcNotFoundException(ConcordanceException):
pass
class BrokenConcordanceException(ConcordanceException):
pass
def extract_manatee_syntax_error(err: Exception) -> Optional[ConcordanceQuerySyntaxError]:
"""
Test and extract Manatee syntax error. In case of a match,
a normalized er
"""
msg = str(err)
if isinstance(err, RuntimeError) and ('syntax error' in msg or 'unexpected character' in msg):
srch = re.match(r'unexpected character(.*)at position (\d+)', msg)
if srch:
return ConcordanceQuerySyntaxError(
'Syntax error at position {}. Please check the query and its type.'.format(srch.group(2)))
else:
return ConcordanceQuerySyntaxError('Syntax error. Please check the query and its type')
return None
| gpl-2.0 | -3,160,155,209,115,803,600 | 29.894737 | 106 | 0.72615 | false |
tensorflow/agents | tf_agents/utils/common.py | 1 | 50736 | # coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utilities for TF-Agents."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as cs
import contextlib
import distutils.version
import functools
import importlib
import os
from typing import Dict, Optional, Text
from absl import logging
import numpy as np
import tensorflow as tf
from tf_agents.specs import tensor_spec
from tf_agents.trajectories import time_step as ts
from tf_agents.typing import types
from tf_agents.utils import nest_utils
from tf_agents.utils import object_identity
# pylint:disable=g-direct-tensorflow-import
from tensorflow.core.protobuf import struct_pb2 # TF internal
from tensorflow.python import tf2 as tf2_checker # TF internal
from tensorflow.python.eager import monitoring # TF internal
from tensorflow.python.saved_model import nested_structure_coder # TF internal
# pylint:enable=g-direct-tensorflow-import
try:
importlib.import_module('tf_agents.utils.allow_tf1')
except ImportError:
_TF1_MODE_ALLOWED = False
else:
_TF1_MODE_ALLOWED = True
tf_agents_gauge = monitoring.BoolGauge('/tensorflow/agents/agents',
'TF-Agents usage', 'method')
MISSING_RESOURCE_VARIABLES_ERROR = """
Resource variables are not enabled. Please enable them by adding the following
code to your main() method:
tf.compat.v1.enable_resource_variables()
For unit tests, subclass `tf_agents.utils.test_utils.TestCase`.
"""
def check_tf1_allowed():
"""Raises an error if running in TF1 (non-eager) mode and this is disabled."""
if _TF1_MODE_ALLOWED:
return
if not tf2_checker.enabled():
raise RuntimeError(
'You are using TF1 or running TF with eager mode disabled. '
'TF-Agents no longer supports TF1 mode (except for a shrinking list of '
'internal allowed users). If this negatively affects you, please '
'reach out to the TF-Agents team. Otherwise please use TF2.')
def resource_variables_enabled():
return tf.compat.v1.resource_variables_enabled()
_IN_LEGACY_TF1 = (
tf.__git_version__ != 'unknown'
and tf.__version__ != '1.15.0'
and (distutils.version.LooseVersion(tf.__version__) <=
distutils.version.LooseVersion('1.15.0.dev20190821')))
def in_legacy_tf1():
return _IN_LEGACY_TF1
def set_default_tf_function_parameters(*args, **kwargs):
"""Generates a decorator that sets default parameters for `tf.function`.
Args:
*args: default arguments for the `tf.function`.
**kwargs: default keyword arguments for the `tf.function`.
Returns:
Function decorator with preconfigured defaults for `tf.function`.
"""
def maybe_wrap(fn):
"""Helper function."""
wrapped = [None]
@functools.wraps(fn)
def preconfigured_function(*fn_args, **fn_kwargs):
if tf.executing_eagerly():
return fn(*fn_args, **fn_kwargs)
if wrapped[0] is None:
wrapped[0] = function(*((fn,) + args), **kwargs)
return wrapped[0](*fn_args, **fn_kwargs) # pylint: disable=not-callable
return preconfigured_function
return maybe_wrap
def function(*args, **kwargs):
"""Wrapper for tf.function with TF Agents-specific customizations.
Example:
```python
@common.function()
def my_eager_code(x, y):
...
```
Args:
*args: Args for tf.function.
**kwargs: Keyword args for tf.function.
Returns:
A tf.function wrapper.
"""
autograph = kwargs.pop('autograph', False)
experimental_relax_shapes = kwargs.pop('experimental_relax_shapes', True)
return tf.function( # allow-tf-function
*args,
autograph=autograph,
experimental_relax_shapes=experimental_relax_shapes,
**kwargs)
def has_eager_been_enabled():
"""Returns true iff in TF2 or in TF1 with eager execution enabled."""
with tf.init_scope():
return tf.executing_eagerly()
def function_in_tf1(*args, **kwargs):
"""Wrapper that returns common.function if using TF1.
This allows for code that assumes autodeps is available to be written once,
in the same way, for both TF1 and TF2.
Usage:
```python
train = function_in_tf1()(agent.train)
loss = train(experience)
```
Args:
*args: Arguments for common.function.
**kwargs: Keyword arguments for common.function.
Returns:
A callable that wraps a function.
"""
def maybe_wrap(fn):
"""Helper function."""
# We're in TF1 mode and want to wrap in common.function to get autodeps.
wrapped = [None]
@functools.wraps(fn)
def with_check_resource_vars(*fn_args, **fn_kwargs):
"""Helper function for calling common.function."""
check_tf1_allowed()
if has_eager_been_enabled():
# We're either in eager mode or in tf.function mode (no in-between); so
# autodep-like behavior is already expected of fn.
return fn(*fn_args, **fn_kwargs)
if not resource_variables_enabled():
raise RuntimeError(MISSING_RESOURCE_VARIABLES_ERROR)
if wrapped[0] is None:
wrapped[0] = function(*((fn,) + args), **kwargs)
return wrapped[0](*fn_args, **fn_kwargs) # pylint: disable=not-callable
return with_check_resource_vars
return maybe_wrap
def create_variable(name,
initial_value=0,
shape=(),
dtype=tf.int64,
use_local_variable=False,
trainable=False,
initializer=None,
unique_name=True):
"""Create a variable."""
check_tf1_allowed()
if has_eager_been_enabled():
if initializer is None:
if shape:
initial_value = tf.constant(initial_value, shape=shape, dtype=dtype)
else:
initial_value = tf.convert_to_tensor(initial_value, dtype=dtype)
else:
if callable(initializer):
initial_value = lambda: initializer(shape, dtype)
else:
initial_value = initializer
return tf.compat.v2.Variable(
initial_value, trainable=trainable, dtype=dtype, name=name)
collections = [tf.compat.v1.GraphKeys.GLOBAL_VARIABLES]
if use_local_variable:
collections = [tf.compat.v1.GraphKeys.LOCAL_VARIABLES]
if initializer is None:
initializer = tf.compat.v1.initializers.constant(initial_value, dtype=dtype)
if shape is None:
shape = tf.convert_to_tensor(initial_value).shape
if unique_name:
name = tf.compat.v1.get_default_graph().unique_name(name)
return tf.compat.v1.get_variable(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
collections=collections,
use_resource=True,
trainable=trainable)
def soft_variables_update(source_variables,
target_variables,
tau=1.0,
tau_non_trainable=None,
sort_variables_by_name=False):
"""Performs a soft/hard update of variables from the source to the target.
Note: **when using this function with TF DistributionStrategy**, the
`strategy.extended.update` call (below) needs to be done in a cross-replica
context, i.e. inside a merge_call. Please use the Periodically class above
that provides this wrapper for you.
For each variable v_t in target variables and its corresponding variable v_s
in source variables, a soft update is:
v_t = (1 - tau) * v_t + tau * v_s
When tau is 1.0 (the default), then it does a hard update:
v_t = v_s
Args:
source_variables: list of source variables.
target_variables: list of target variables.
tau: A float scalar in [0, 1]. When tau is 1.0 (the default), we do a hard
update. This is used for trainable variables.
tau_non_trainable: A float scalar in [0, 1] for non_trainable variables. If
None, will copy from tau.
sort_variables_by_name: A bool, when True would sort the variables by name
before doing the update.
Returns:
An operation that updates target variables from source variables.
Raises:
ValueError: if `tau not in [0, 1]`.
ValueError: if `len(source_variables) != len(target_variables)`.
ValueError: "Method requires being in cross-replica context,
use get_replica_context().merge_call()" if used inside replica context.
"""
if tau < 0 or tau > 1:
raise ValueError('Input `tau` should be in [0, 1].')
if tau_non_trainable is None:
tau_non_trainable = tau
if tau_non_trainable < 0 or tau_non_trainable > 1:
raise ValueError('Input `tau_non_trainable` should be in [0, 1].')
updates = []
op_name = 'soft_variables_update'
if tau == 0.0 or not source_variables or not target_variables:
return tf.no_op(name=op_name)
if len(source_variables) != len(target_variables):
raise ValueError(
'Source and target variable lists have different lengths: '
'{} vs. {}'.format(len(source_variables), len(target_variables)))
if sort_variables_by_name:
source_variables = sorted(source_variables, key=lambda x: x.name)
target_variables = sorted(target_variables, key=lambda x: x.name)
strategy = tf.distribute.get_strategy()
for (v_s, v_t) in zip(source_variables, target_variables):
v_t.shape.assert_is_compatible_with(v_s.shape)
def update_fn(v1, v2):
"""Update variables."""
# For not trainable variables do hard updates.
# This helps stabilaze BatchNorm moving averagees TODO(b/144455039)
if not v1.trainable:
current_tau = tau_non_trainable
else:
current_tau = tau
if current_tau == 1.0:
return v1.assign(v2)
else:
return v1.assign((1 - current_tau) * v1 + current_tau * v2)
# TODO(b/142508640): remove this when b/142802462 is fixed.
# Workaround for b/142508640, only use extended.update for
# MirroredVariable variables (which are trainable variables).
# For other types of variables (i.e. SyncOnReadVariables, for example
# batch norm stats) do a regular assign, which will cause a sync and
# broadcast from replica 0, so will have slower performance but will be
# correct and not cause a failure.
if tf.distribute.has_strategy() and v_t.trainable:
# Assignment happens independently on each replica,
# see b/140690837 #46.
update = strategy.extended.update(v_t, update_fn, args=(v_s,))
else:
update = update_fn(v_t, v_s)
updates.append(update)
return tf.group(*updates, name=op_name)
def join_scope(parent_scope, child_scope):
"""Joins a parent and child scope using `/`, checking for empty/none.
Args:
parent_scope: (string) parent/prefix scope.
child_scope: (string) child/suffix scope.
Returns:
joined scope: (string) parent and child scopes joined by /.
"""
if not parent_scope:
return child_scope
if not child_scope:
return parent_scope
return '/'.join([parent_scope, child_scope])
# TODO(b/138322868): Add an optional action_spec for validation.
def index_with_actions(q_values, actions, multi_dim_actions=False):
"""Index into q_values using actions.
Note: this supports multiple outer dimensions (e.g. time, batch etc).
Args:
q_values: A float tensor of shape [outer_dim1, ... outer_dimK, action_dim1,
..., action_dimJ].
actions: An int tensor of shape [outer_dim1, ... outer_dimK] if
multi_dim_actions=False [outer_dim1, ... outer_dimK, J] if
multi_dim_actions=True I.e. in the multidimensional case,
actions[outer_dim1, ... outer_dimK] is a vector [actions_1, ...,
actions_J] where each element actions_j is an action in the range [0,
num_actions_j). While in the single dimensional case, actions[outer_dim1,
... outer_dimK] is a scalar.
multi_dim_actions: whether the actions are multidimensional.
Returns:
A [outer_dim1, ... outer_dimK] tensor of q_values for the given actions.
Raises:
ValueError: If actions have unknown rank.
"""
if actions.shape.rank is None:
raise ValueError('actions should have known rank.')
batch_dims = actions.shape.rank
if multi_dim_actions:
# In the multidimensional case, the last dimension of actions indexes the
# vector of actions for each batch, so exclude it from the batch dimensions.
batch_dims -= 1
outer_shape = tf.shape(input=actions)
batch_indices = tf.meshgrid(
*[tf.range(outer_shape[i]) for i in range(batch_dims)], indexing='ij')
batch_indices = [tf.cast(tf.expand_dims(batch_index, -1), dtype=tf.int32)
for batch_index in batch_indices]
if not multi_dim_actions:
actions = tf.expand_dims(actions, -1)
# Cast actions to tf.int32 in order to avoid a TypeError in tf.concat.
actions = tf.cast(actions, dtype=tf.int32)
action_indices = tf.concat(batch_indices + [actions], -1)
return tf.gather_nd(q_values, action_indices)
def periodically(body, period, name='periodically'):
"""Periodically performs the tensorflow op in `body`.
The body tensorflow op will be executed every `period` times the periodically
op is executed. More specifically, with `n` the number of times the op has
been executed, the body will be executed when `n` is a non zero positive
multiple of `period` (i.e. there exist an integer `k > 0` such that
`k * period == n`).
If `period` is `None`, it will not perform any op and will return a
`tf.no_op()`.
If `period` is 1, it will just execute the body, and not create any counters
or conditionals.
Args:
body: callable that returns the tensorflow op to be performed every time an
internal counter is divisible by the period. The op must have no output
(for example, a tf.group()).
period: inverse frequency with which to perform the op.
name: name of the variable_scope.
Raises:
TypeError: if body is not a callable.
Returns:
An op that periodically performs the specified op.
"""
if tf.executing_eagerly():
if isinstance(period, tf.Variable):
return Periodically(body, period, name)
return EagerPeriodically(body, period)
else:
return Periodically(body, period, name)()
class Periodically(tf.Module):
"""Periodically performs the ops defined in `body`."""
def __init__(self, body, period, name='periodically'):
"""Periodically performs the ops defined in `body`.
The body tensorflow op will be executed every `period` times the
periodically op is executed. More specifically, with `n` the number of times
the op has been executed, the body will be executed when `n` is a non zero
positive multiple of `period` (i.e. there exist an integer `k > 0` such that
`k * period == n`).
If `period` is `None`, it will not perform any op and will return a
`tf.no_op()`.
If `period` is 1, it will just execute the body, and not create any counters
or conditionals.
Args:
body: callable that returns the tensorflow op to be performed every time
an internal counter is divisible by the period. The op must have no
output (for example, a tf.group()).
period: inverse frequency with which to perform the op. It can be a Tensor
or a Variable.
name: name of the object.
Raises:
TypeError: if body is not a callable.
Returns:
An op that periodically performs the specified op.
"""
super(Periodically, self).__init__(name=name)
if not callable(body):
raise TypeError('body must be callable.')
self._body = body
self._period = period
self._counter = create_variable(self.name + '/counter', 0)
def __call__(self):
def call(strategy=None):
del strategy # unused
if self._period is None:
return tf.no_op()
if self._period == 1:
return self._body()
period = tf.cast(self._period, self._counter.dtype)
remainder = tf.math.mod(self._counter.assign_add(1), period)
return tf.cond(
pred=tf.equal(remainder, 0), true_fn=self._body, false_fn=tf.no_op)
# TODO(b/129083817) add an explicit unit test to ensure correct behavior
ctx = tf.distribute.get_replica_context()
if ctx:
return tf.distribute.get_replica_context().merge_call(call)
else:
return call()
class EagerPeriodically(object):
"""EagerPeriodically performs the ops defined in `body`.
Only works in Eager mode.
"""
def __init__(self, body, period):
"""EagerPeriodically performs the ops defined in `body`.
Args:
body: callable that returns the tensorflow op to be performed every time
an internal counter is divisible by the period. The op must have no
output (for example, a tf.group()).
period: inverse frequency with which to perform the op. Must be a simple
python int/long.
Raises:
TypeError: if body is not a callable.
Returns:
An op that periodically performs the specified op.
"""
if not callable(body):
raise TypeError('body must be callable.')
self._body = body
self._period = period
self._counter = 0
def __call__(self):
if self._period is None:
return tf.no_op()
if self._period == 1:
return self._body()
self._counter += 1
if self._counter % self._period == 0:
self._body()
def clip_to_spec(value, spec):
"""Clips value to a given bounded tensor spec.
Args:
value: (tensor) value to be clipped.
spec: (BoundedTensorSpec) spec containing min. and max. values for clipping.
Returns:
clipped_value: (tensor) `value` clipped to be compatible with `spec`.
"""
return tf.clip_by_value(value, spec.minimum, spec.maximum)
def spec_means_and_magnitudes(action_spec):
"""Get the center and magnitude of the ranges in action spec."""
action_means = tf.nest.map_structure(
lambda spec: (spec.maximum + spec.minimum) / 2.0, action_spec)
action_magnitudes = tf.nest.map_structure(
lambda spec: (spec.maximum - spec.minimum) / 2.0, action_spec)
return np.array(
action_means, dtype=np.float32), np.array(
action_magnitudes, dtype=np.float32)
def scale_to_spec(tensor, spec):
"""Shapes and scales a batch into the given spec bounds.
Args:
tensor: A [batch x n] tensor with values in the range of [-1, 1].
spec: (BoundedTensorSpec) to use for scaling the action.
Returns:
A batch scaled the given spec bounds.
"""
tensor = tf.reshape(tensor, [-1] + spec.shape.as_list())
# Scale the tensor.
means, magnitudes = spec_means_and_magnitudes(spec)
tensor = means + magnitudes * tensor
# Set type.
return tf.cast(tensor, spec.dtype)
def ornstein_uhlenbeck_process(initial_value,
damping=0.15,
stddev=0.2,
seed=None,
scope='ornstein_uhlenbeck_noise'):
"""An op for generating noise from a zero-mean Ornstein-Uhlenbeck process.
The Ornstein-Uhlenbeck process is a process that generates temporally
correlated noise via a random walk with damping. This process describes
the velocity of a particle undergoing brownian motion in the presence of
friction. This can be useful for exploration in continuous action environments
with momentum.
The temporal update equation is:
`x_next = (1 - damping) * x + N(0, std_dev)`
Args:
initial_value: Initial value of the process.
damping: The rate at which the noise trajectory is damped towards the mean.
We must have 0 <= damping <= 1, where a value of 0 gives an undamped
random walk and a value of 1 gives uncorrelated Gaussian noise. Hence in
most applications a small non-zero value is appropriate.
stddev: Standard deviation of the Gaussian component.
seed: Seed for random number generation.
scope: Scope of the variables.
Returns:
An op that generates noise.
"""
if tf.executing_eagerly():
return OUProcess(initial_value, damping, stddev, seed, scope)
else:
return OUProcess(initial_value, damping, stddev, seed, scope)()
class OUProcess(tf.Module):
"""A zero-mean Ornstein-Uhlenbeck process."""
def __init__(self,
initial_value,
damping=0.15,
stddev=0.2,
seed=None,
scope='ornstein_uhlenbeck_noise'):
"""A Class for generating noise from a zero-mean Ornstein-Uhlenbeck process.
The Ornstein-Uhlenbeck process is a process that generates temporally
correlated noise via a random walk with damping. This process describes
the velocity of a particle undergoing brownian motion in the presence of
friction. This can be useful for exploration in continuous action
environments with momentum.
The temporal update equation is:
`x_next = (1 - damping) * x + N(0, std_dev)`
Args:
initial_value: Initial value of the process.
damping: The rate at which the noise trajectory is damped towards the
mean. We must have 0 <= damping <= 1, where a value of 0 gives an
undamped random walk and a value of 1 gives uncorrelated Gaussian noise.
Hence in most applications a small non-zero value is appropriate.
stddev: Standard deviation of the Gaussian component.
seed: Seed for random number generation.
scope: Scope of the variables.
"""
super(OUProcess, self).__init__()
self._damping = damping
self._stddev = stddev
self._seed = seed
with tf.name_scope(scope):
self._x = tf.compat.v2.Variable(
initial_value=initial_value, trainable=False)
def __call__(self):
noise = tf.random.normal(
shape=self._x.shape,
stddev=self._stddev,
dtype=self._x.dtype,
seed=self._seed)
return self._x.assign((1. - self._damping) * self._x + noise)
def log_probability(distributions, actions, action_spec):
"""Computes log probability of actions given distribution.
Args:
distributions: A possibly batched tuple of distributions.
actions: A possibly batched action tuple.
action_spec: A nested tuple representing the action spec.
Returns:
A Tensor representing the log probability of each action in the batch.
"""
outer_rank = nest_utils.get_outer_rank(actions, action_spec)
def _compute_log_prob(single_distribution, single_action):
# sum log-probs over everything but the batch
single_log_prob = single_distribution.log_prob(single_action)
rank = single_log_prob.shape.rank
reduce_dims = list(range(outer_rank, rank))
return tf.reduce_sum(
input_tensor=single_log_prob,
axis=reduce_dims)
nest_utils.assert_same_structure(distributions, actions)
log_probs = [
_compute_log_prob(dist, action)
for (dist, action
) in zip(tf.nest.flatten(distributions), tf.nest.flatten(actions))
]
# sum log-probs over action tuple
total_log_probs = tf.add_n(log_probs)
return total_log_probs
# TODO(ofirnachum): Move to distribution utils.
def entropy(distributions, action_spec):
"""Computes total entropy of distribution.
Args:
distributions: A possibly batched tuple of distributions.
action_spec: A nested tuple representing the action spec.
Returns:
A Tensor representing the entropy of each distribution in the batch.
Assumes actions are independent, so that marginal entropies of each action
may be summed.
"""
nested_modes = tf.nest.map_structure(lambda d: d.mode(), distributions)
outer_rank = nest_utils.get_outer_rank(nested_modes, action_spec)
def _compute_entropy(single_distribution):
entropies = single_distribution.entropy()
# Sum entropies over everything but the batch.
rank = entropies.shape.rank
reduce_dims = list(range(outer_rank, rank))
return tf.reduce_sum(input_tensor=entropies, axis=reduce_dims)
entropies = [
_compute_entropy(dist) for dist in tf.nest.flatten(distributions)
]
# Sum entropies over action tuple.
total_entropies = tf.add_n(entropies)
return total_entropies
def discounted_future_sum(values, gamma, num_steps):
"""Discounted future sum of batch-major values.
Args:
values: A Tensor of shape [batch_size, total_steps] and dtype float32.
gamma: A float discount value.
num_steps: A positive integer number of future steps to sum.
Returns:
A Tensor of shape [batch_size, total_steps], where each entry `(i, j)` is
the result of summing the entries of values starting from
`gamma^0 * values[i, j]` to
`gamma^(num_steps - 1) * values[i, j + num_steps - 1]`,
with zeros padded to values.
For example, values=[5, 6, 7], gamma=0.9, will result in sequence:
```python
[(5 * 0.9^0 + 6 * 0.9^1 + 7 * 0.9^2), (6 * 0.9^0 + 7 * 0.9^1), 7 * 0.9^0]
```
Raises:
ValueError: If values is not of rank 2.
"""
if values.get_shape().rank != 2:
raise ValueError('Input must be rank 2 tensor. Got %d.' %
values.get_shape().rank)
(batch_size, total_steps) = values.get_shape().as_list()
num_steps = tf.minimum(num_steps, total_steps)
discount_filter = tf.reshape(gamma**tf.cast(tf.range(num_steps), tf.float32),
[-1, 1, 1])
padded_values = tf.concat([values, tf.zeros([batch_size, num_steps - 1])], 1)
convolved_values = tf.squeeze(
tf.nn.conv1d(
input=tf.expand_dims(padded_values, -1),
filters=discount_filter,
stride=1,
padding='VALID'), -1)
return convolved_values
def discounted_future_sum_masked(values, gamma, num_steps, episode_lengths):
"""Discounted future sum of batch-major values.
Args:
values: A Tensor of shape [batch_size, total_steps] and dtype float32.
gamma: A float discount value.
num_steps: A positive integer number of future steps to sum.
episode_lengths: A vector shape [batch_size] with num_steps per episode.
Returns:
A Tensor of shape [batch_size, total_steps], where each entry is the
discounted sum as in discounted_future_sum, except with values after
the end of episode_lengths masked to 0.
Raises:
ValueError: If values is not of rank 2, or if total_steps is not defined.
"""
if values.shape.rank != 2:
raise ValueError('Input must be a rank 2 tensor. Got %d.' % values.shape)
total_steps = tf.compat.dimension_value(values.shape[1])
if total_steps is None:
raise ValueError('total_steps dimension in input '
'values[batch_size, total_steps] must be fully defined.')
episode_mask = tf.cast(
tf.sequence_mask(episode_lengths, total_steps), tf.float32)
values *= episode_mask
return discounted_future_sum(values, gamma, num_steps)
def shift_values(values, gamma, num_steps, final_values=None):
"""Shifts batch-major values in time by some amount.
Args:
values: A Tensor of shape [batch_size, total_steps] and dtype float32.
gamma: A float discount value.
num_steps: A nonnegative integer amount to shift values by.
final_values: A float32 Tensor of shape [batch_size] corresponding to the
values at step num_steps + 1. Defaults to None (all zeros).
Returns:
A Tensor of shape [batch_size, total_steps], where each entry (i, j) is
gamma^num_steps * values[i, j + num_steps] if j + num_steps < total_steps;
gamma^(total_steps - j) * final_values[i] otherwise.
Raises:
ValueError: If values is not of rank 2.
"""
if values.get_shape().rank != 2:
raise ValueError('Input must be rank 2 tensor. Got %d.' %
values.get_shape().rank)
(batch_size, total_steps) = values.get_shape().as_list()
num_steps = tf.minimum(num_steps, total_steps)
if final_values is None:
final_values = tf.zeros([batch_size])
padding_exponent = tf.expand_dims(
tf.cast(tf.range(num_steps, 0, -1), tf.float32), 0)
final_pad = tf.expand_dims(final_values, 1) * gamma**padding_exponent
return tf.concat([
gamma**tf.cast(num_steps, tf.float32) * values[:, num_steps:], final_pad
], 1)
def get_episode_mask(time_steps):
"""Create a mask that is 0.0 for all final steps, 1.0 elsewhere.
Args:
time_steps: A TimeStep namedtuple representing a batch of steps.
Returns:
A float32 Tensor with 0s where step_type == LAST and 1s otherwise.
"""
episode_mask = tf.cast(
tf.not_equal(time_steps.step_type, ts.StepType.LAST), tf.float32)
return episode_mask
def get_contiguous_sub_episodes(next_time_steps_discount):
"""Computes mask on sub-episodes which includes only contiguous components.
Args:
next_time_steps_discount: Tensor of shape [batch_size, total_steps]
corresponding to environment discounts on next time steps (i.e.
next_time_steps.discount).
Returns:
A float Tensor of shape [batch_size, total_steps] specifying mask including
only contiguous components. Each row will be of the form
[1.0] * a + [0.0] * b, where a >= 1 and b >= 0, and in which the initial
sequence of ones corresponds to a contiguous sub-episode.
"""
episode_end = tf.equal(next_time_steps_discount,
tf.constant(0, dtype=next_time_steps_discount.dtype))
mask = tf.math.cumprod(
1.0 - tf.cast(episode_end, tf.float32), axis=1, exclusive=True)
return mask
def convert_q_logits_to_values(logits, support):
"""Converts a set of Q-value logits into Q-values using the provided support.
Args:
logits: A Tensor representing the Q-value logits.
support: The support of the underlying distribution.
Returns:
A Tensor containing the expected Q-values.
"""
probabilities = tf.nn.softmax(logits)
return tf.reduce_sum(input_tensor=support * probabilities, axis=-1)
def generate_tensor_summaries(tag, tensor, step):
"""Generates various summaries of `tensor` such as histogram, max, min, etc.
Args:
tag: A namescope tag for the summaries.
tensor: The tensor to generate summaries of.
step: Variable to use for summaries.
"""
with tf.name_scope(tag):
tf.compat.v2.summary.histogram(name='histogram', data=tensor, step=step)
tf.compat.v2.summary.scalar(
name='mean', data=tf.reduce_mean(input_tensor=tensor), step=step)
tf.compat.v2.summary.scalar(
name='mean_abs',
data=tf.reduce_mean(input_tensor=tf.abs(tensor)),
step=step)
tf.compat.v2.summary.scalar(
name='max', data=tf.reduce_max(input_tensor=tensor), step=step)
tf.compat.v2.summary.scalar(
name='min', data=tf.reduce_min(input_tensor=tensor), step=step)
def summarize_tensor_dict(tensor_dict: Dict[Text, types.Tensor],
step: Optional[types.Tensor]):
"""Generates summaries of all tensors in `tensor_dict`.
Args:
tensor_dict: A dictionary {name, tensor} to summarize.
step: The global step
"""
for tag in tensor_dict:
generate_tensor_summaries(tag, tensor_dict[tag], step)
def compute_returns(rewards: types.Tensor,
discounts: types.Tensor,
time_major: bool = False):
"""Compute the return from each index in an episode.
Args:
rewards: Tensor `[T]`, `[B, T]`, `[T, B]` of per-timestep reward.
discounts: Tensor `[T]`, `[B, T]`, `[T, B]` of per-timestep discount factor.
Should be `0`. for final step of each episode.
time_major: Bool, when batched inputs setting it to `True`, inputs are
expected to be time-major: `[T, B]` otherwise, batch-major: `[B, T]`.
Returns:
Tensor of per-timestep cumulative returns.
"""
rewards.shape.assert_is_compatible_with(discounts.shape)
if (not rewards.shape.is_fully_defined() or
not discounts.shape.is_fully_defined()):
tf.debugging.assert_equal(tf.shape(input=rewards),
tf.shape(input=discounts))
def discounted_accumulate_rewards(next_step_return, reward_and_discount):
reward, discount = reward_and_discount
return next_step_return * discount + reward
# Support batched rewards and discount via transpose.
if rewards.shape.rank > 1 and not time_major:
rewards = tf.transpose(rewards, perm=[1, 0])
discounts = tf.transpose(discounts, perm=[1, 0])
# Cumulatively sum discounted reward R_t.
# R_t = r_t + discount * (r_t+1 + discount * (r_t+2 * discount( ...
# As discount is 0 for terminal states, ends of episode will not include
# reward from subsequent timesteps.
returns = tf.scan(
discounted_accumulate_rewards, [rewards, discounts],
initializer=tf.zeros_like(rewards[0]),
reverse=True)
# Reverse transpose if needed.
if returns.shape.rank > 1 and not time_major:
returns = tf.transpose(returns, perm=[1, 0])
return returns
def initialize_uninitialized_variables(session, var_list=None):
"""Initialize any pending variables that are uninitialized."""
if var_list is None:
var_list = tf.compat.v1.global_variables() + tf.compat.v1.local_variables()
is_initialized = session.run(
[tf.compat.v1.is_variable_initialized(v) for v in var_list])
uninitialized_vars = []
for flag, v in zip(is_initialized, var_list):
if not flag:
uninitialized_vars.append(v)
if uninitialized_vars:
logging.info('uninitialized_vars: %s',
', '.join([str(x) for x in uninitialized_vars]))
session.run(tf.compat.v1.variables_initializer(uninitialized_vars))
class Checkpointer(object):
"""Checkpoints training state, policy state, and replay_buffer state."""
def __init__(self, ckpt_dir, max_to_keep=20, **kwargs):
"""A class for making checkpoints.
If ckpt_dir doesn't exists it creates it.
Args:
ckpt_dir: The directory to save checkpoints.
max_to_keep: Maximum number of checkpoints to keep (if greater than the
max are saved, the oldest checkpoints are deleted).
**kwargs: Items to include in the checkpoint.
"""
self._checkpoint = tf.train.Checkpoint(**kwargs)
if not tf.io.gfile.exists(ckpt_dir):
tf.io.gfile.makedirs(ckpt_dir)
self._manager = tf.train.CheckpointManager(
self._checkpoint, directory=ckpt_dir, max_to_keep=max_to_keep)
if self._manager.latest_checkpoint is not None:
logging.info('Checkpoint available: %s', self._manager.latest_checkpoint)
self._checkpoint_exists = True
else:
logging.info('No checkpoint available at %s', ckpt_dir)
self._checkpoint_exists = False
self._load_status = self._checkpoint.restore(
self._manager.latest_checkpoint)
@property
def checkpoint_exists(self):
return self._checkpoint_exists
@property
def manager(self):
"""Returns the underlying tf.train.CheckpointManager."""
return self._manager
def initialize_or_restore(self, session=None):
"""Initialize or restore graph (based on checkpoint if exists)."""
self._load_status.initialize_or_restore(session)
return self._load_status
def save(self, global_step: tf.Tensor,
options: tf.train.CheckpointOptions = None):
"""Save state to checkpoint."""
saved_checkpoint = self._manager.save(
checkpoint_number=global_step, options=options)
self._checkpoint_exists = True
logging.info('%s', 'Saved checkpoint: {}'.format(saved_checkpoint))
def replicate(tensor, outer_shape):
"""Replicates a tensor so as to match the given outer shape.
Example:
- t = [[1, 2, 3], [4, 5, 6]] (shape = [2, 3])
- outer_shape = [2, 1]
The shape of the resulting tensor is: [2, 1, 2, 3]
and its content is: [[t], [t]]
Args:
tensor: A tf.Tensor.
outer_shape: Outer shape given as a 1D tensor of type list, numpy or
tf.Tensor.
Returns:
The replicated tensor.
Raises:
ValueError: when the outer shape is incorrect.
"""
outer_shape = tf.convert_to_tensor(value=outer_shape)
if len(outer_shape.shape) != 1:
raise ValueError('The outer shape must be a 1D tensor')
outer_ndims = int(outer_shape.shape[0])
tensor_ndims = len(tensor.shape)
# No need to replicate anything if there is no outer dim to add.
if outer_ndims == 0:
return tensor
# Calculate target shape of replicated tensor
target_shape = tf.concat([outer_shape, tf.shape(input=tensor)], axis=0)
# tf.tile expects `tensor` to be at least 1D
if tensor_ndims == 0:
tensor = tensor[None]
# Replicate tensor "t" along the 1st dimension.
tiled_tensor = tf.tile(tensor, [tf.reduce_prod(input_tensor=outer_shape)] +
[1] * (tensor_ndims - 1))
# Reshape to match outer_shape.
return tf.reshape(tiled_tensor, target_shape)
def assert_members_are_not_overridden(base_cls,
instance,
allowlist=(),
denylist=()):
"""Asserts public members of `base_cls` are not overridden in `instance`.
If both `allowlist` and `denylist` are empty, no public member of
`base_cls` can be overridden. If a `allowlist` is provided, only public
members in `allowlist` can be overridden. If a `denylist` is provided,
all public members except those in `denylist` can be overridden. Both
`allowlist` and `denylist` cannot be provided at the same, if so a
ValueError will be raised.
Args:
base_cls: A Base class.
instance: An instance of a subclass of `base_cls`.
allowlist: Optional list of `base_cls` members that can be overridden.
denylist: Optional list of `base_cls` members that cannot be overridden.
Raises:
ValueError if both allowlist and denylist are provided.
"""
if denylist and allowlist:
raise ValueError('Both `denylist` and `allowlist` cannot be provided.')
instance_type = type(instance)
subclass_members = set(instance_type.__dict__.keys())
public_members = set(
[m for m in base_cls.__dict__.keys() if not m.startswith('_')])
common_members = public_members & subclass_members
if allowlist:
common_members = common_members - set(allowlist)
elif denylist:
common_members = common_members & set(denylist)
overridden_members = [
m for m in common_members
if base_cls.__dict__[m] != instance_type.__dict__[m]
]
if overridden_members:
raise ValueError(
'Subclasses of {} cannot override most of its base members, but '
'{} overrides: {}'.format(base_cls, instance_type, overridden_members))
def element_wise_squared_loss(x, y):
return tf.compat.v1.losses.mean_squared_error(
x, y, reduction=tf.compat.v1.losses.Reduction.NONE)
def element_wise_huber_loss(x, y):
return tf.compat.v1.losses.huber_loss(
x, y, reduction=tf.compat.v1.losses.Reduction.NONE)
def transpose_batch_time(x):
"""Transposes the batch and time dimensions of a Tensor.
If the input tensor has rank < 2 it returns the original tensor. Retains as
much of the static shape information as possible.
Args:
x: A Tensor.
Returns:
x transposed along the first two dimensions.
"""
x_static_shape = x.get_shape()
if x_static_shape.rank is not None and x_static_shape.rank < 2:
return x
x_rank = tf.rank(x)
x_t = tf.transpose(a=x, perm=tf.concat(([1, 0], tf.range(2, x_rank)), axis=0))
x_t.set_shape(
tf.TensorShape(
[x_static_shape.dims[1].value,
x_static_shape.dims[0].value]).concatenate(x_static_shape[2:]))
return x_t
def save_spec(spec, file_path):
"""Saves the given spec nest as a StructProto.
**Note**: Currently this will convert BoundedTensorSpecs into regular
TensorSpecs.
Args:
spec: A nested structure of TensorSpecs.
file_path: Path to save the encoded spec to.
"""
signature_encoder = nested_structure_coder.StructureCoder()
spec = tensor_spec.from_spec(spec)
spec_proto = signature_encoder.encode_structure(spec)
dir_path = os.path.dirname(file_path)
if not tf.io.gfile.exists(dir_path):
tf.io.gfile.makedirs(dir_path)
with tf.compat.v2.io.gfile.GFile(file_path, 'wb') as gfile:
gfile.write(spec_proto.SerializeToString())
def load_spec(file_path):
"""Loads a data spec from a file.
**Note**: Types for Named tuple classes will not match. Users need to convert
to these manually:
# Convert from:
# 'tensorflow.python.saved_model.nested_structure_coder.Trajectory'
# to proper TrajectorySpec.
# trajectory_spec = trajectory.Trajectory(*spec)
Args:
file_path: Path to the saved data spec.
Returns:
A nested structure of TensorSpecs.
"""
with tf.compat.v2.io.gfile.GFile(file_path, 'rb') as gfile:
signature_proto = struct_pb2.StructuredValue.FromString(gfile.read())
signature_encoder = nested_structure_coder.StructureCoder()
return signature_encoder.decode_proto(signature_proto)
def extract_shared_variables(variables_1, variables_2):
"""Separates shared variables from the given collections.
Args:
variables_1: An iterable of Variables
variables_2: An iterable of Variables
Returns:
A Tuple of ObjectIdentitySets described by the set operations
```
(variables_1 - variables_2,
variables_2 - variables_1,
variables_1 & variables_2)
```
"""
var_refs1 = object_identity.ObjectIdentitySet(variables_1)
var_refs2 = object_identity.ObjectIdentitySet(variables_2)
shared_vars = var_refs1.intersection(var_refs2)
return (var_refs1.difference(shared_vars), var_refs2.difference(shared_vars),
shared_vars)
def check_no_shared_variables(network_1, network_2):
"""Checks that there are no shared trainable variables in the two networks.
Args:
network_1: A network.Network.
network_2: A network.Network.
Raises:
ValueError: if there are any common trainable variables.
ValueError: if one of the networks has not yet been built
(e.g. user must call `create_variables`).
"""
variables_1 = object_identity.ObjectIdentitySet(network_1.trainable_variables)
variables_2 = object_identity.ObjectIdentitySet(network_2.trainable_variables)
shared_variables = variables_1 & variables_2
if shared_variables:
raise ValueError(
'After making a copy of network \'{}\' to create a target '
'network \'{}\', the target network shares weights with '
'the original network. This is not allowed. If '
'you want explicitly share weights with the target network, or '
'if your input network shares weights with others, please '
'provide a target network which explicitly, selectively, shares '
'layers/weights with the input network. If you are not intending to '
'share weights make sure all the weights are created inside the Network'
' since a copy will be created by creating a new Network with the same '
'args but a new name. Shared variables found: '
'\'{}\'.'.format(
network_1.name, network_2.name,
[x.name for x in shared_variables]))
def check_matching_networks(network_1, network_2):
"""Check that two networks have matching input specs and variables.
Args:
network_1: A network.Network.
network_2: A network.Network.
Raises:
ValueError: if the networks differ in input_spec, variables (number, dtype,
or shape).
ValueError: if either of the networks has not been built yet
(e.g. user must call `create_variables`).
"""
if network_1.input_tensor_spec != network_2.input_tensor_spec:
raise ValueError('Input tensor specs of network and target network '
'do not match: {} vs. {}.'.format(
network_1.input_tensor_spec,
network_2.input_tensor_spec))
if len(network_1.variables) != len(network_2.variables):
raise ValueError(
'Variables lengths do not match between Q network and target network: '
'{} vs. {}'.format(network_1.variables, network_2.variables))
for v1, v2 in zip(network_1.variables, network_2.variables):
if v1.dtype != v2.dtype or v1.shape != v2.shape:
raise ValueError(
'Variable dtypes or shapes do not match: {} vs. {}'.format(v1, v2))
def maybe_copy_target_network_with_checks(network, target_network=None,
name=None,
input_spec=None):
"""Copies the network into target if None and checks for shared variables."""
if target_network is None:
target_network = network.copy(name=name)
target_network.create_variables(input_spec)
# Copy may have been shallow, and variables may inadvertently be shared
# between the target and the original networks. This would be an unusual
# setup, so we throw an error to protect users from accidentally doing so.
# If you explicitly want this to be enabled, please open a feature request
# with the team.
check_no_shared_variables(network, target_network)
check_matching_networks(network, target_network)
return target_network
AggregatedLosses = cs.namedtuple(
'AggregatedLosses',
['total_loss', # Total loss = weighted + regularization
'weighted', # Weighted sum of per_example_loss by sample_weight.
'regularization', # Total of regularization losses.
])
def aggregate_losses(per_example_loss=None,
sample_weight=None,
global_batch_size=None,
regularization_loss=None):
"""Aggregates and scales per example loss and regularization losses.
If `global_batch_size` is given it would be used for scaling, otherwise it
would use the batch_dim of per_example_loss and number of replicas.
Args:
per_example_loss: Per-example loss [B] or [B, T, ...].
sample_weight: Optional weighting for each example, Tensor shaped [B] or
[B, T, ...], or a scalar float.
global_batch_size: Optional global batch size value. Defaults to (size of
first dimension of `losses`) * (number of replicas).
regularization_loss: Regularization loss.
Returns:
An AggregatedLosses named tuple with scalar losses to optimize.
"""
total_loss, weighted_loss, reg_loss = None, None, None
if sample_weight is not None and not isinstance(sample_weight, tf.Tensor):
sample_weight = tf.convert_to_tensor(sample_weight, dtype=tf.float32)
# Compute loss that is scaled by global batch size.
if per_example_loss is not None:
loss_rank = per_example_loss.shape.rank
if sample_weight is not None:
weight_rank = sample_weight.shape.rank
# Expand `sample_weight` to be broadcastable to the shape of
# `per_example_loss`, to ensure that multiplication works properly.
if weight_rank > 0 and loss_rank > weight_rank:
for dim in range(weight_rank, loss_rank):
sample_weight = tf.expand_dims(sample_weight, dim)
# Sometimes we have an episode boundary or similar, and at this location
# the loss is nonsensical (i.e., inf or nan); and sample_weight is zero.
# In this case, we should respect the zero sample_weight and ignore the
# frame.
per_example_loss = tf.math.multiply_no_nan(
per_example_loss, sample_weight)
if loss_rank is not None and loss_rank == 0:
err_msg = (
'Need to use a loss function that computes losses per sample, ex: '
'replace losses.mean_squared_error with tf.math.squared_difference. '
'Invalid value passed for `per_example_loss`. Expected a tensor '
'tensor with at least rank 1, received: {}'.format(per_example_loss))
if tf.distribute.has_strategy():
raise ValueError(err_msg)
else:
logging.warning(err_msg)
# Add extra dimension to prevent error in compute_average_loss.
per_example_loss = tf.expand_dims(per_example_loss, 0)
elif loss_rank > 1:
# If per_example_loss is shaped [B, T, ...], we need to compute the mean
# across the extra dimensions, ex. time, as well.
per_example_loss = tf.reduce_mean(per_example_loss, range(1, loss_rank))
global_batch_size = global_batch_size and tf.cast(global_batch_size,
per_example_loss.dtype)
weighted_loss = tf.nn.compute_average_loss(
per_example_loss,
global_batch_size=global_batch_size)
total_loss = weighted_loss
# Add scaled regularization losses.
if regularization_loss is not None:
reg_loss = tf.nn.scale_regularization_loss(regularization_loss)
if total_loss is None:
total_loss = reg_loss
else:
total_loss += reg_loss
return AggregatedLosses(total_loss, weighted_loss, reg_loss)
def summarize_scalar_dict(name_data, step, name_scope='Losses/'):
if name_data:
with tf.name_scope(name_scope):
for name, data in name_data.items():
if data is not None:
tf.compat.v2.summary.scalar(
name=name, data=data, step=step)
@contextlib.contextmanager
def soft_device_placement():
"""Context manager for soft device placement, allowing summaries on CPU.
Eager and graph contexts have different default device placements. See
b/148408921 for details. This context manager should be used whenever using
summary writers contexts to make sure summaries work when executing on TPUs.
Yields:
Sets `tf.config.set_soft_device_placement(True)` within the context
"""
original_setting = tf.config.get_soft_device_placement()
try:
tf.config.set_soft_device_placement(True)
yield
finally:
tf.config.set_soft_device_placement(original_setting)
def deduped_network_variables(network, *args):
"""Returns a list of variables in net1 that are not in any other nets.
Args:
network: A Keras network.
*args: other networks to check for duplicate variables.
"""
other_vars = object_identity.ObjectIdentitySet(
[v for n in args for v in n.variables]) # pylint:disable=g-complex-comprehension
return [v for v in network.variables if v not in other_vars]
def safe_has_state(state):
"""Safely checks `state not in (None, (), [])`."""
# TODO(b/158804957): tf.function changes "s in ((),)" to a tensor bool expr.
# pylint: disable=literal-comparison
return state is not None and state is not () and state is not []
# pylint: enable=literal-comparison
| apache-2.0 | 7,468,900,581,231,471,000 | 34.356098 | 87 | 0.673802 | false |
leonardoo/django-pipeline | pipeline/templatetags/pipeline.py | 1 | 5342 | from __future__ import unicode_literals
import logging
from django.contrib.staticfiles.storage import staticfiles_storage
from django import template
from django.template.base import VariableDoesNotExist
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
from ..collector import default_collector
from ..conf import settings
from ..packager import Packager, PackageNotFound
from ..utils import guess_type
logger = logging.getLogger(__name__)
register = template.Library()
class PipelineMixin(object):
request = None
_request_var = None
@property
def request_var(self):
if not self._request_var:
self._request_var = template.Variable('request')
return self._request_var
def package_for(self, package_name, package_type):
package = {
'js': getattr(settings, 'JAVASCRIPT', {}).get(package_name, {}),
'css': getattr(settings, 'STYLESHEETS', {}).get(package_name, {}),
}[package_type]
if package:
package = {package_name: package}
packager = {
'js': Packager(css_packages={}, js_packages=package),
'css': Packager(css_packages=package, js_packages={}),
}[package_type]
return packager.package_for(package_type, package_name)
def render(self, context):
try:
self.request = self.request_var.resolve(context)
except VariableDoesNotExist:
pass
def render_compressed(self, package, package_type):
if settings.PIPELINE_ENABLED:
method = getattr(self, "render_{0}".format(package_type))
return method(package, package.output_filename)
else:
if settings.PIPELINE_COLLECTOR_ENABLED:
default_collector.collect(self.request)
packager = Packager()
method = getattr(self, "render_individual_{0}".format(package_type))
paths = packager.compile(package.paths)
templates = packager.pack_templates(package)
return method(package, paths, templates=templates)
class StylesheetNode(PipelineMixin, template.Node):
def __init__(self, name):
self.name = name
def render(self, context):
super(StylesheetNode, self).render(context)
package_name = template.Variable(self.name).resolve(context)
try:
package = self.package_for(package_name, 'css')
except PackageNotFound:
logger.warn("Package %r is unknown. Check PIPELINE_CSS in your settings.", package_name)
return '' # fail silently, do not return anything if an invalid group is specified
return self.render_compressed(package, 'css')
def render_css(self, package, path):
template_name = package.template_name or "pipeline/css.html"
context = package.extra_context
context.update({
'type': guess_type(path, 'text/css'),
'url': mark_safe(staticfiles_storage.url(path))
})
return render_to_string(template_name, context)
def render_individual_css(self, package, paths, **kwargs):
tags = [self.render_css(package, path) for path in paths]
return '\n'.join(tags)
class JavascriptNode(PipelineMixin, template.Node):
def __init__(self, name):
self.name = name
def render(self, context):
super(JavascriptNode, self).render(context)
package_name = template.Variable(self.name).resolve(context)
try:
package = self.package_for(package_name, 'js')
except PackageNotFound:
logger.warn("Package %r is unknown. Check PIPELINE_JS in your settings.", package_name)
return '' # fail silently, do not return anything if an invalid group is specified
return self.render_compressed(package, 'js')
def render_js(self, package, path):
template_name = package.template_name or "pipeline/js.html"
context = package.extra_context
context.update({
'type': guess_type(path, 'text/javascript'),
'url': mark_safe(staticfiles_storage.url(path))
})
return render_to_string(template_name, context)
def render_inline(self, package, js):
context = package.extra_context
context.update({
'source': js
})
return render_to_string("pipeline/inline_js.html", context)
def render_individual_js(self, package, paths, templates=None):
tags = [self.render_js(package, js) for js in paths]
if templates:
tags.append(self.render_inline(package, templates))
return '\n'.join(tags)
@register.tag
def stylesheet(parser, token):
try:
tag_name, name = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError('%r requires exactly one argument: the name of a group in the PIPELINE.STYLESHEETS setting' % token.split_contents()[0])
return StylesheetNode(name)
@register.tag
def javascript(parser, token):
try:
tag_name, name = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError('%r requires exactly one argument: the name of a group in the PIPELINE.JAVASVRIPT setting' % token.split_contents()[0])
return JavascriptNode(name)
| mit | 3,920,505,165,884,596,000 | 34.377483 | 163 | 0.647323 | false |
ncsu-osgeorel/grass-particle-flow-visualization | v.random.probability/v.random.probability.py | 1 | 3506 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
############################################################################
#
# MODULE: v.random.probability
# AUTHOR(S): Anna Petrasova
# PURPOSE: Generates random vector points based on probability raster
# COPYRIGHT: (C) 2013 by the GRASS Development Team
#
# This program is free software under the GNU General Public
# License (>=v2). Read the file COPYING that comes with GRASS
# for details.
#
#############################################################################
#%module
#% description: Generates random vector points based on probability raster
#% keywords: vector
#% keywords: random
#%end
#%option
#% type: string
#% gisprompt: old,cell,raster
#% key: probability
#% label: Name of the input probability raster map
#% description: Probability of generating points
#% required : yes
#%end
#%option
#% type: string
#% gisprompt: new,vector,vector
#% key: output
#% description: Name for output vector map
#% required : yes
#%end
#%option
#% key: count
#% type: integer
#% description: Approximate number of particles
#% required : yes
#%end
import os
import atexit
from math import sqrt
from grass.script import core as gcore
from grass.script import raster as grast
TMP_RAST = []
TMP_VECT = []
def main():
options, flags = gcore.parser()
probability = options['probability']
output = options['output']
count = int(options['count'])
gcore.use_temp_region()
# probability map
probab_01 = 'probability_01_' + str(os.getpid())
TMP_RAST.append(probab_01)
info = grast.raster_info(probability)
gcore.write_command('r.recode', flags='d', input=probability, output=probab_01,
title="Recoded probability map to 0 to 1",
rules='-', stdin='{minim}:{maxim}:0:1'.format(minim=info['min'], maxim=info['max']))
mean = gcore.parse_key_val(gcore.read_command('r.univar', map=probab_01, flags='g'),
val_type=float)['mean']
resolution = count / (mean * (info['north'] - info['south'] + info['east'] - info['west']))
resolution = sqrt((mean * (info['north'] - info['south']) * (info['east'] - info['west'])) / count)
gcore.run_command('g.region', res=resolution)
random_name = 'random_' + str(os.getpid())
point_map = 'points_' + str(os.getpid())
point_grid = 'points_' + str(os.getpid())
TMP_RAST.append(random_name)
TMP_RAST.append(point_map)
TMP_VECT.append(point_grid)
gcore.run_command('r.surf.random', output=random_name, min=0, max=1)
grast.mapcalc(exp='{point_map} = if({rand} <= {prob}, 1, null())'.format(rand=random_name,
prob=probab_01,
point_map=point_map))
gcore.run_command('r.to.vect', flags='t', input=point_map, output=point_grid, type='point')
gcore.run_command('v.perturb', input=point_grid, output=output,
parameter=resolution / 2., seed=os.getpid())
def cleanup():
if len(TMP_RAST + TMP_VECT):
gcore.info(_("Cleaning %d temporary maps...") % len(TMP_RAST + TMP_VECT))
gcore.run_command('g.remove', rast=','.join(TMP_RAST), quiet=True)
gcore.run_command('g.remove', vect=','.join(TMP_VECT), quiet=True)
gcore.del_temp_region()
if __name__ == '__main__':
atexit.register(cleanup)
main()
| gpl-2.0 | -3,447,419,904,983,814,000 | 32.711538 | 108 | 0.579578 | false |
bmoretz/Algorithms | Python/ch_02/Progression/__init__.py | 1 | 1237 | class Progression:
"""Iterator producing a generic progression
Default iterator produces the whole numbers 0, 1, 2, ...
"""
def __init__(self, start=0):
"""Initialize current to the first value of the progression."""
self._current = start
def _advance(self):
"""Update self._current to a new value.
This should be overriden by a subclass to customize progression.
By convention, if current is set to None, this designates the
end of a finite progression.
"""
self._current += 1
def __next__(self):
"""Return the next element, or else raise StopIteration error."""
if self._current is None:
raise StopIteration()
else:
answer = self._current
self._advance()
return answer
def __iter__(self):
"""By convention, an iterator must return itself as an iterator."""
return self
def print_progression(self, n):
"""Print next n values of the progression."""
print(' '.join(str(next(self)) for j in range(n)))
def main():
p = Progression()
for n in range(0,10):
print('n: {0} value: {1}'.format(n, p.__next__()))
main() | gpl-3.0 | -8,686,870,622,804,007,000 | 26.511111 | 75 | 0.57882 | false |
hobarrera/khal | khal/khalendar/khalendar.py | 1 | 13821 | # Copyright (c) 2013-2017 Christian Geier et al.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
CalendarCollection should enable modifying and querying a collection of
calendars. Each calendar is defined by the contents of a vdir, but uses an
SQLite db for caching (see backend if you're interested).
"""
import datetime
import os
import os.path
import itertools
from .vdir import CollectionNotFoundError, AlreadyExistingError, Vdir, \
get_etag_from_file
from . import backend
from .event import Event
from .. import log
from .exceptions import CouldNotCreateDbDir, UnsupportedFeatureError, \
ReadOnlyCalendarError, UpdateFailed, DuplicateUid
logger = log.logger
def create_directory(path):
if not os.path.isdir(path):
if os.path.exists(path):
raise RuntimeError('{0} is not a directory.'.format(path))
try:
os.makedirs(path, mode=0o750)
except OSError as error:
logger.fatal('failed to create {0}: {1}'.format(path, error))
raise CouldNotCreateDbDir()
class CalendarCollection(object):
"""CalendarCollection allows access to various calendars stored in vdirs
all calendars are cached in an sqlitedb for performance reasons"""
def __init__(self,
calendars=None,
hmethod='fg',
default_color='',
multiple='',
color='',
highlight_event_days=0,
locale=None,
dbpath=None,
):
assert dbpath is not None
assert calendars is not None
self._calendars = calendars
self._default_calendar_name = None
self._storages = dict()
for name, calendar in self._calendars.items():
ctype = calendar.get('ctype', 'calendar')
if ctype == 'calendar':
file_ext = '.ics'
elif ctype == 'birthdays':
file_ext = '.vcf'
else:
raise ValueError('ctype must be either `calendar` or `birthdays`')
try:
self._storages[name] = Vdir(calendar['path'], file_ext)
except CollectionNotFoundError:
os.makedirs(calendar['path'])
logger.info('created non-existing vdir {}'.format(calendar['path']))
self._storages[name] = Vdir(calendar['path'], file_ext)
self.hmethod = hmethod
self.default_color = default_color
self.multiple = multiple
self.color = color
self.highlight_event_days = highlight_event_days
self._locale = locale
self._backend = backend.SQLiteDb(
calendars=self.names, db_path=dbpath, locale=self._locale)
self._last_ctags = dict()
self.update_db()
@property
def writable_names(self):
return [c for c in self._calendars if not self._calendars[c].get('readonly', False)]
@property
def calendars(self):
return self._calendars.values()
@property
def names(self):
return self._calendars.keys()
@property
def default_calendar_name(self):
return self._default_calendar_name
@default_calendar_name.setter
def default_calendar_name(self, default):
if default is None:
self._default_calendar_name = default
elif default not in self.names:
raise ValueError('Unknown calendar: {0}'.format(default))
readonly = self._calendars[default].get('readonly', False)
if not readonly:
self._default_calendar_name = default
else:
raise ValueError(
'Calendar "{0}" is read-only and cannot be used as default'.format(default))
def _local_ctag(self, calendar):
return get_etag_from_file(self._calendars[calendar]['path'])
def _cover_event(self, event):
event.color = self._calendars[event.calendar]['color']
event.readonly = self._calendars[event.calendar]['readonly']
event.unicode_symbols = self._locale['unicode_symbols']
return event
def get_floating(self, start, end, minimal=False):
events = self._backend.get_floating(start, end, minimal)
return (self._cover_event(event) for event in events)
def get_localized(self, start, end, minimal=False):
events = self._backend.get_localized(start, end, minimal)
return (self._cover_event(event) for event in events)
def get_events_on(self, day, minimal=False):
"""return all events on `day`
:param day: datetime.date
:rtype: list()
"""
start = datetime.datetime.combine(day, datetime.time.min)
end = datetime.datetime.combine(day, datetime.time.max)
floating_events = self.get_floating(start, end, minimal)
localize = self._locale['local_timezone'].localize
localized_events = self.get_localized(localize(start), localize(end), minimal)
return itertools.chain(floating_events, localized_events)
def update(self, event):
"""update `event` in vdir and db"""
assert event.etag
if self._calendars[event.calendar]['readonly']:
raise ReadOnlyCalendarError()
with self._backend.at_once():
event.etag = self._storages[event.calendar].update(event.href, event, event.etag)
self._backend.update(event.raw, event.href, event.etag, calendar=event.calendar)
self._backend.set_ctag(self._local_ctag(event.calendar), calendar=event.calendar)
def force_update(self, event, collection=None):
"""update `event` even if an event with the same uid/href already exists"""
calendar = collection if collection is not None else event.calendar
if self._calendars[calendar]['readonly']:
raise ReadOnlyCalendarError()
with self._backend.at_once():
try:
href, etag = self._storages[calendar].upload(event)
except AlreadyExistingError as error:
href = error.existing_href
_, etag = self._storages[calendar].get(href)
etag = self._storages[calendar].update(href, event, etag)
self._backend.update(event.raw, href, etag, calendar=calendar)
self._backend.set_ctag(self._local_ctag(calendar), calendar=calendar)
def new(self, event, collection=None):
"""save a new event to the vdir and the database
param event: the event that should be updated, will get a new href and
etag properties
type event: event.Event
"""
calendar = collection if collection is not None else event.calendar
if hasattr(event, 'etag'):
assert not event.etag
if self._calendars[calendar]['readonly']:
raise ReadOnlyCalendarError()
with self._backend.at_once():
try:
event.href, event.etag = self._storages[calendar].upload(event)
except AlreadyExistingError as Error:
href = getattr(Error, 'existing_href', None)
raise DuplicateUid(href)
self._backend.update(event.raw, event.href, event.etag, calendar=calendar)
self._backend.set_ctag(self._local_ctag(calendar), calendar=calendar)
def delete(self, href, etag, calendar):
if self._calendars[calendar]['readonly']:
raise ReadOnlyCalendarError()
self._storages[calendar].delete(href, etag)
self._backend.delete(href, calendar=calendar)
def get_event(self, href, calendar):
return self._cover_event(self._backend.get(href, calendar=calendar))
def change_collection(self, event, new_collection):
href, etag, calendar = event.href, event.etag, event.calendar
event.etag = None
self.new(event, new_collection)
self.delete(href, etag, calendar=calendar)
def new_event(self, ical, collection):
"""creates and returns (but does not insert) new event from ical
string"""
calendar = collection or self.writable_names[0]
return Event.fromString(ical, locale=self._locale, calendar=calendar)
def update_db(self):
"""update the db from the vdir,
should be called after every change to the vdir
"""
for calendar in self._calendars:
if self._needs_update(calendar, remember=True):
self._db_update(calendar)
def needs_update(self):
"""Check if you need to call update_db.
This could either be the case because the vdirs were changed externally,
or another instance of khal updated the caching db already.
"""
# TODO is it a good idea to munch both use cases together?
# in case another instance of khal has updated the db, we only need
# to get new events, but # update_db() takes potentially a long time to return
# but then the code (in ikhal's refresh code) would need to look like
# this:
#
# update_ui = False
# if collection.needs_update():
# collection.update_db()
# update_ui = True
# if collection.needs_refresh() or update_ui:
# do_the_update()
#
# and the API would be made even uglier than it already is...
for calendar in self._calendars:
if self._needs_update(calendar) or \
self._last_ctags[calendar] != self._local_ctag(calendar):
return True
return False
def _needs_update(self, calendar, remember=False):
"""checks if the db for the given calendar needs an update"""
local_ctag = self._local_ctag(calendar)
if remember:
self._last_ctags[calendar] = local_ctag
return local_ctag != self._backend.get_ctag(calendar)
def _db_update(self, calendar):
"""implements the actual db update on a per calendar base"""
local_ctag = self._local_ctag(calendar)
db_hrefs = set(href for href, etag in self._backend.list(calendar))
storage_hrefs = set()
with self._backend.at_once():
for href, etag in self._storages[calendar].list():
storage_hrefs.add(href)
db_etag = self._backend.get_etag(href, calendar=calendar)
if etag != db_etag:
logger.debug('Updating {0} because {1} != {2}'.format(href, etag, db_etag))
self._update_vevent(href, calendar=calendar)
for href in db_hrefs - storage_hrefs:
self._backend.delete(href, calendar=calendar)
self._backend.set_ctag(local_ctag, calendar=calendar)
self._last_ctags[calendar] = local_ctag
def _update_vevent(self, href, calendar):
"""should only be called during db_update, only updates the db,
does not check for readonly"""
event, etag = self._storages[calendar].get(href)
try:
if self._calendars[calendar].get('ctype') == 'birthdays':
update = self._backend.update_birthday
else:
update = self._backend.update
update(event.raw, href=href, etag=etag, calendar=calendar)
return True
except Exception as e:
if not isinstance(e, (UpdateFailed, UnsupportedFeatureError)):
logger.exception('Unknown exception happened.')
logger.warning(
'Skipping {0}/{1}: {2}\n'
'This event will not be available in khal.'.format(calendar, href, str(e)))
return False
def search(self, search_string):
"""search for the db for events matching `search_string`"""
return (self._cover_event(event) for event in self._backend.search(search_string))
def get_day_styles(self, day, focus):
devents = list(self.get_events_on(day, minimal=True))
if len(devents) == 0:
return None
if self.color != '':
return 'highlight_days_color'
dcalendars = list(set(map(lambda event: event.calendar, devents)))
if len(dcalendars) == 1:
return 'calendar ' + dcalendars[0]
if self.multiple != '':
return 'highlight_days_multiple'
return ('calendar ' + dcalendars[0], 'calendar ' + dcalendars[1])
def get_styles(self, date, focus):
if focus:
if date == date.today():
return 'today focus'
else:
return 'reveal focus'
else:
if date == date.today():
return 'today'
else:
if self.highlight_event_days:
return self.get_day_styles(date, focus)
else:
return None
| mit | -4,693,823,169,885,647,000 | 39.177326 | 95 | 0.616019 | false |
bit-jmm/ttarm | result/plots/error_bar.py | 1 | 1372 | """
Demo of the errorbar function, including upper and lower limits
"""
import numpy as np
import matplotlib.pyplot as plt
# example data
x = np.arange(0.5, 5.5, 0.5)
y = np.exp(-x)
xerr = 0.1
yerr = 0.2
ls = 'dotted'
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
# standard error bars
plt.errorbar(x, y, xerr=xerr, yerr=yerr, ls=ls, color='blue')
# including upper limits
uplims = np.zeros(x.shape)
uplims[[1, 5, 9]] = True
plt.errorbar(x, y+0.5, xerr=xerr, yerr=yerr, uplims=uplims, ls=ls,
color='green')
# including lower limits
lolims = np.zeros(x.shape)
lolims[[2, 4, 8]] = True
plt.errorbar(x, y+1.0, xerr=xerr, yerr=yerr, lolims=lolims, ls=ls,
color='red')
# including upper and lower limits
plt.errorbar(x, y+1.5, marker='o', ms=8, xerr=xerr, yerr=yerr,
lolims=lolims, uplims=uplims, ls=ls, color='magenta')
# including xlower and xupper limits
xerr = 0.2
yerr = np.zeros(x.shape) + 0.2
yerr[[3, 6]] = 0.3
xlolims = lolims
xuplims = uplims
lolims = np.zeros(x.shape)
uplims = np.zeros(x.shape)
lolims[[6]] = True
uplims[[3]] = True
plt.errorbar(x, y+2.1, marker='o', ms=8, xerr=xerr, yerr=yerr,
xlolims=xlolims, xuplims=xuplims, uplims=uplims, lolims=lolims,
ls='none', mec='blue', capsize=0, color='cyan')
ax.set_xlim((0, 5.5))
ax.set_title('Errorbar upper and lower limits')
plt.show()
| gpl-2.0 | -120,588,826,542,438,910 | 25.384615 | 76 | 0.648688 | false |
canbal/Tally | django_source/Tally/wsgi.py | 1 | 1581 | """
WSGI config for mysite project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
path = os.path.join(os.path.abspath(os.path.dirname(__file__)))
if path not in sys.path:
sys.path.append(path)
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "Tally.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Tally.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| mit | 6,453,998,761,826,364,000 | 40.72973 | 79 | 0.765971 | false |
Ernestyj/PyStudy | DataScience/python/mysql_query/td_job_time_used_query.py | 1 | 2104 | # -*- coding: utf-8 -*-
import os
import MySQLdb
import pandas as pd
import traceback
import logging
from mysql_query import ROOT_PATH
from tools.json_tools import JsonConf
class TDJobTimeUsedQuery():
def __init__(self, conf_path=os.path.join(ROOT_PATH, 'conf/conf.json')):
# init param
self.conf_path = conf_path
# init param placeholder
self.logger = None
self.conf = None
self.conf_mysql = None
self.con = None
self.df_time_used = None
def init(self):
self._init_logger()
self._get_conf()
self._init_connect_mysql()
def query(self):
sql = """
SELECT * FROM td_refresh_job_time_used WHERE job_start_time BETWEEN NOW() - INTERVAL 30 DAY AND NOW();
"""
self.df_time_used = pd.read_sql(sql=sql, con=self.con)
return self.df_time_used
# init helper##############################################################################
def _init_logger(self):
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.INFO)
consoleHandler = logging.StreamHandler()
consoleHandler.setLevel(logging.INFO)
consoleHandler.setFormatter(logging.Formatter('[%(asctime)s] - [%(name)s] - [%(levelname)s] : %(message)s'))
self.logger.addHandler(consoleHandler)
def _get_conf(self):
try:
self.conf = JsonConf().get_json_conf(path=self.conf_path)
self.conf_mysql = self.conf['mysql']
except Exception, e:
traceback.print_exc()
self.logger.error(e.message)
def _init_connect_mysql(self):
mysql_conf = self.conf_mysql
self.con = MySQLdb.connect(host=mysql_conf['host'], # your host, usually localhost
user=mysql_conf['username'], # your username
passwd=mysql_conf['password'], # your password
db=mysql_conf['defaultdb']) # name of the data base
# singleton
job_time_used_query_instance = TDJobTimeUsedQuery() | apache-2.0 | 8,852,176,755,778,378,000 | 34.083333 | 116 | 0.569867 | false |
appleseedhq/cortex | test/IECoreScene/OBJReaderTest.py | 2 | 3644 | ##########################################################################
#
# Copyright (c) 2007-2009, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import sys
import IECore
import IECoreScene
class TestOBJReader( unittest.TestCase ) :
def testRead( self ) :
self.testfile = 'test/IECore/data/obj/triangle.obj'
r = IECore.Reader.create(self.testfile)
self.assertEqual(type(r), IECoreScene.OBJReader)
mesh = r.read()
self.failUnless( mesh.isInstanceOf( IECoreScene.MeshPrimitive.staticTypeId() ) )
self.failUnless( mesh.arePrimitiveVariablesValid() )
self.assertEqual( len( mesh ), 1 )
self.failUnless( "P" in mesh )
def testReadNormals( self ) :
self.testfile = 'test/IECore/data/obj/triangle_normals.obj'
r = IECore.Reader.create(self.testfile)
self.assertEqual(type(r), IECoreScene.OBJReader)
mesh = r.read()
self.failUnless( mesh.isInstanceOf( IECoreScene.MeshPrimitive.staticTypeId() ) )
self.failUnless( mesh.arePrimitiveVariablesValid() )
self.assertEqual( len( mesh ), 4 )
self.failUnless( "P" in mesh )
self.failUnless( "N" in mesh )
self.failUnless( "s" in mesh )
self.failUnless( "t" in mesh )
def testReadNoTexture( self ) :
self.testfile = 'test/IECore/data/obj/triangle_no_texture.obj'
r = IECore.Reader.create(self.testfile)
self.assertEqual(type(r), IECoreScene.OBJReader)
mesh = r.read()
self.failUnless( mesh.isInstanceOf( IECoreScene.MeshPrimitive.staticTypeId() ) )
self.failUnless( mesh.arePrimitiveVariablesValid() )
self.assertEqual( len( mesh ), 2 )
self.failUnless( "P" in mesh )
self.failUnless( "N" in mesh )
def testGroups( self ) :
self.testfile = 'test/IECore/data/obj/groups.obj'
r = IECore.Reader.create(self.testfile)
self.assertEqual(type(r), IECoreScene.OBJReader)
mesh = r.read()
self.failUnless( mesh.isInstanceOf( IECoreScene.MeshPrimitive.staticTypeId() ) )
self.failUnless( mesh.arePrimitiveVariablesValid() )
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | 6,596,177,313,644,003,000 | 34.72549 | 82 | 0.706641 | false |
savithruml/HOT-OpenStack | python-automate/create_VN.py | 1 | 1146 | #!/usr/bin/env python
#AUTHOR: SAVITHRU LOKANATH
#CONTACT: SAVITHRU AT JUNIPER.NET
import sys
from vnc_api import vnc_api
def create_VirtualNetwork(network_name, network_subnet, network_mask, vnc, project):
""" FUNCTION TO CREATE VIRTUAL-NETWORK """
vn_obj = vnc_api.VirtualNetwork(name=network_name, parent_obj=project)
vn_obj.add_network_ipam(vnc_api.NetworkIpam(),
vnc_api.VnSubnetsType([vnc_api.IpamSubnetType(subnet = vnc_api.SubnetType(network_subnet,network_mask))]))
vnc.virtual_network_create(vn_obj)
print 'Network "{}" created successfully\n'.format(network_name)
def main():
""" MAIN/AUTHENTICATE """
vnc = vnc_api.VncApi(username='admin', password='', api_server_host = '192.168.1.1', tenant_name='admin')
project = vnc.project_read(fq_name = ['default-domain', 'admin'])
left_network_name = 'left_VN'
left_network_subnet = '192.168.200.0'
left_network_mask = 24
create_VirtualNetwork(left_network_name, left_network_subnet, left_network_mask, vnc, project)
if __name__=="__main__":
main()
| apache-2.0 | 7,363,044,350,327,122,000 | 29.972973 | 130 | 0.648342 | false |
Accelerite/cinder | cinder/tests/test_storwize_svc.py | 1 | 160933 | # Copyright 2013 IBM Corp.
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Tests for the IBM Storwize family and SVC volume driver.
"""
import random
import re
import time
import mock
from oslo_concurrency import processutils
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import units
from cinder import context
from cinder import exception
from cinder.i18n import _
from cinder.openstack.common import log as logging
from cinder import test
from cinder.tests import utils as testutils
from cinder import utils
from cinder.volume import configuration as conf
from cinder.volume.drivers.ibm import storwize_svc
from cinder.volume.drivers.ibm.storwize_svc import helpers
from cinder.volume.drivers.ibm.storwize_svc import ssh
from cinder.volume import qos_specs
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
class StorwizeSVCManagementSimulator:
def __init__(self, pool_name):
self._flags = {'storwize_svc_volpool_name': pool_name}
self._volumes_list = {}
self._hosts_list = {}
self._mappings_list = {}
self._fcmappings_list = {}
self._fcconsistgrp_list = {}
self._other_pools = {'openstack2': {}, 'openstack3': {}}
self._next_cmd_error = {
'lsportip': '',
'lsfabric': '',
'lsiscsiauth': '',
'lsnodecanister': '',
'mkvdisk': '',
'lsvdisk': '',
'lsfcmap': '',
'prestartfcmap': '',
'startfcmap': '',
'rmfcmap': '',
'lslicense': '',
}
self._errors = {
'CMMVC5701E': ('', 'CMMVC5701E No object ID was specified.'),
'CMMVC6035E': ('', 'CMMVC6035E The action failed as the '
'object already exists.'),
'CMMVC5753E': ('', 'CMMVC5753E The specified object does not '
'exist or is not a suitable candidate.'),
'CMMVC5707E': ('', 'CMMVC5707E Required parameters are missing.'),
'CMMVC6581E': ('', 'CMMVC6581E The command has failed because '
'the maximum number of allowed iSCSI '
'qualified names (IQNs) has been reached, '
'or the IQN is already assigned or is not '
'valid.'),
'CMMVC5754E': ('', 'CMMVC5754E The specified object does not '
'exist, or the name supplied does not meet '
'the naming rules.'),
'CMMVC6071E': ('', 'CMMVC6071E The VDisk-to-host mapping was '
'not created because the VDisk is already '
'mapped to a host.'),
'CMMVC5879E': ('', 'CMMVC5879E The VDisk-to-host mapping was '
'not created because a VDisk is already '
'mapped to this host with this SCSI LUN.'),
'CMMVC5840E': ('', 'CMMVC5840E The virtual disk (VDisk) was '
'not deleted because it is mapped to a '
'host or because it is part of a FlashCopy '
'or Remote Copy mapping, or is involved in '
'an image mode migrate.'),
'CMMVC6527E': ('', 'CMMVC6527E The name that you have entered '
'is not valid. The name can contain letters, '
'numbers, spaces, periods, dashes, and '
'underscores. The name must begin with a '
'letter or an underscore. The name must not '
'begin or end with a space.'),
'CMMVC5871E': ('', 'CMMVC5871E The action failed because one or '
'more of the configured port names is in a '
'mapping.'),
'CMMVC5924E': ('', 'CMMVC5924E The FlashCopy mapping was not '
'created because the source and target '
'virtual disks (VDisks) are different sizes.'),
'CMMVC6303E': ('', 'CMMVC6303E The create failed because the '
'source and target VDisks are the same.'),
'CMMVC7050E': ('', 'CMMVC7050E The command failed because at '
'least one node in the I/O group does not '
'support compressed VDisks.'),
'CMMVC6430E': ('', 'CMMVC6430E The command failed because the '
'target and source managed disk groups must '
'be different.'),
'CMMVC6353E': ('', 'CMMVC6353E The command failed because the '
'copy specified does not exist.'),
'CMMVC6446E': ('', 'The command failed because the managed disk '
'groups have different extent sizes.'),
# Catch-all for invalid state transitions:
'CMMVC5903E': ('', 'CMMVC5903E The FlashCopy mapping was not '
'changed because the mapping or consistency '
'group is another state.'),
'CMMVC5709E': ('', 'CMMVC5709E [-%(VALUE)s] is not a supported '
'parameter.'),
}
self._fc_transitions = {'begin': {'make': 'idle_or_copied'},
'idle_or_copied': {'prepare': 'preparing',
'delete': 'end',
'delete_force': 'end'},
'preparing': {'flush_failed': 'stopped',
'wait': 'prepared'},
'end': None,
'stopped': {'prepare': 'preparing',
'delete_force': 'end'},
'prepared': {'stop': 'stopped',
'start': 'copying'},
'copying': {'wait': 'idle_or_copied',
'stop': 'stopping'},
# Assume the worst case where stopping->stopped
# rather than stopping idle_or_copied
'stopping': {'wait': 'stopped'},
}
self._fc_cg_transitions = {'begin': {'make': 'empty'},
'empty': {'add': 'idle_or_copied'},
'idle_or_copied': {'prepare': 'preparing',
'delete': 'end',
'delete_force': 'end'},
'preparing': {'flush_failed': 'stopped',
'wait': 'prepared'},
'end': None,
'stopped': {'prepare': 'preparing',
'delete_force': 'end'},
'prepared': {'stop': 'stopped',
'start': 'copying',
'delete_force': 'end',
'delete': 'end'},
'copying': {'wait': 'idle_or_copied',
'stop': 'stopping',
'delete_force': 'end',
'delete': 'end'},
# Assume the case where stopping->stopped
# rather than stopping idle_or_copied
'stopping': {'wait': 'stopped'},
}
def _state_transition(self, function, fcmap):
if (function == 'wait' and
'wait' not in self._fc_transitions[fcmap['status']]):
return ('', '')
if fcmap['status'] == 'copying' and function == 'wait':
if fcmap['copyrate'] != '0':
if fcmap['progress'] == '0':
fcmap['progress'] = '50'
else:
fcmap['progress'] = '100'
fcmap['status'] = 'idle_or_copied'
return ('', '')
else:
try:
curr_state = fcmap['status']
fcmap['status'] = self._fc_transitions[curr_state][function]
return ('', '')
except Exception:
return self._errors['CMMVC5903E']
def _fc_cg_state_transition(self, function, fc_consistgrp):
if (function == 'wait' and
'wait' not in self._fc_transitions[fc_consistgrp['status']]):
return ('', '')
try:
curr_state = fc_consistgrp['status']
fc_consistgrp['status'] \
= self._fc_cg_transitions[curr_state][function]
return ('', '')
except Exception:
return self._errors['CMMVC5903E']
# Find an unused ID
@staticmethod
def _find_unused_id(d):
ids = []
for v in d.itervalues():
ids.append(int(v['id']))
ids.sort()
for index, n in enumerate(ids):
if n > index:
return str(index)
return str(len(ids))
# Check if name is valid
@staticmethod
def _is_invalid_name(name):
if re.match(r'^[a-zA-Z_][\w ._-]*$', name):
return False
return True
# Convert argument string to dictionary
@staticmethod
def _cmd_to_dict(arg_list):
no_param_args = [
'autodelete',
'bytes',
'compressed',
'force',
'nohdr',
]
one_param_args = [
'chapsecret',
'cleanrate',
'copy',
'copyrate',
'delim',
'easytier',
'filtervalue',
'grainsize',
'hbawwpn',
'host',
'iogrp',
'iscsiname',
'mdiskgrp',
'name',
'rsize',
'scsi',
'size',
'source',
'target',
'unit',
'vdisk',
'warning',
'wwpn',
'primary',
'consistgrp'
]
no_or_one_param_args = [
'autoexpand',
]
# Handle the special case of lsnode which is a two-word command
# Use the one word version of the command internally
if arg_list[0] in ('svcinfo', 'svctask'):
if arg_list[1] == 'lsnode':
if len(arg_list) > 4: # e.g. svcinfo lsnode -delim ! <node id>
ret = {'cmd': 'lsnode', 'node_id': arg_list[-1]}
else:
ret = {'cmd': 'lsnodecanister'}
else:
ret = {'cmd': arg_list[1]}
arg_list.pop(0)
else:
ret = {'cmd': arg_list[0]}
skip = False
for i in range(1, len(arg_list)):
if skip:
skip = False
continue
if arg_list[i][0] == '-':
if arg_list[i][1:] in no_param_args:
ret[arg_list[i][1:]] = True
elif arg_list[i][1:] in one_param_args:
ret[arg_list[i][1:]] = arg_list[i + 1]
skip = True
elif arg_list[i][1:] in no_or_one_param_args:
if i == (len(arg_list) - 1) or arg_list[i + 1][0] == '-':
ret[arg_list[i][1:]] = True
else:
ret[arg_list[i][1:]] = arg_list[i + 1]
skip = True
else:
raise exception.InvalidInput(
reason=_('unrecognized argument %s') % arg_list[i])
else:
ret['obj'] = arg_list[i]
return ret
@staticmethod
def _print_info_cmd(rows, delim=' ', nohdr=False, **kwargs):
"""Generic function for printing information."""
if nohdr:
del rows[0]
for index in range(len(rows)):
rows[index] = delim.join(rows[index])
return ('%s' % '\n'.join(rows), '')
@staticmethod
def _print_info_obj_cmd(header, row, delim=' ', nohdr=False):
"""Generic function for printing information for a specific object."""
objrows = []
for idx, val in enumerate(header):
objrows.append([val, row[idx]])
if nohdr:
for index in range(len(objrows)):
objrows[index] = ' '.join(objrows[index][1:])
for index in range(len(objrows)):
objrows[index] = delim.join(objrows[index])
return ('%s' % '\n'.join(objrows), '')
@staticmethod
def _convert_bytes_units(bytestr):
num = int(bytestr)
unit_array = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
unit_index = 0
while num > 1024:
num = num / 1024
unit_index += 1
return '%d%s' % (num, unit_array[unit_index])
@staticmethod
def _convert_units_bytes(num, unit):
unit_array = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
unit_index = 0
while unit.lower() != unit_array[unit_index].lower():
num = num * 1024
unit_index += 1
return str(num)
def _cmd_lslicense(self, **kwargs):
rows = [None] * 3
rows[0] = ['used_compression_capacity', '0.08']
rows[1] = ['license_compression_capacity', '0']
if self._next_cmd_error['lslicense'] == 'no_compression':
self._next_cmd_error['lslicense'] = ''
rows[2] = ['license_compression_enclosures', '0']
else:
rows[2] = ['license_compression_enclosures', '1']
return self._print_info_cmd(rows=rows, **kwargs)
# Print mostly made-up stuff in the correct syntax
def _cmd_lssystem(self, **kwargs):
rows = [None] * 3
rows[0] = ['id', '0123456789ABCDEF']
rows[1] = ['name', 'storwize-svc-sim']
rows[2] = ['code_level', '7.2.0.0 (build 87.0.1311291000)']
return self._print_info_cmd(rows=rows, **kwargs)
# Print mostly made-up stuff in the correct syntax, assume -bytes passed
def _cmd_lsmdiskgrp(self, **kwargs):
rows = [None] * 4
rows[0] = ['id', 'name', 'status', 'mdisk_count',
'vdisk_count', 'capacity', 'extent_size',
'free_capacity', 'virtual_capacity', 'used_capacity',
'real_capacity', 'overallocation', 'warning',
'easy_tier', 'easy_tier_status']
rows[1] = ['1', self._flags['storwize_svc_volpool_name'], 'online',
'1', str(len(self._volumes_list)), '3573412790272',
'256', '3529926246400', '1693247906775', '277841182',
'38203734097', '47', '80', 'auto', 'inactive']
rows[2] = ['2', 'openstack2', 'online',
'1', '0', '3573412790272', '256',
'3529432325160', '1693247906775', '277841182',
'38203734097', '47', '80', 'auto', 'inactive']
rows[3] = ['3', 'openstack3', 'online',
'1', '0', '3573412790272', '128',
'3529432325160', '1693247906775', '277841182',
'38203734097', '47', '80', 'auto', 'inactive']
if 'obj' not in kwargs:
return self._print_info_cmd(rows=rows, **kwargs)
else:
if kwargs['obj'] == self._flags['storwize_svc_volpool_name']:
row = rows[1]
elif kwargs['obj'] == 'openstack2':
row = rows[2]
elif kwargs['obj'] == 'openstack3':
row = rows[3]
else:
return self._errors['CMMVC5754E']
objrows = []
for idx, val in enumerate(rows[0]):
objrows.append([val, row[idx]])
if 'nohdr' in kwargs:
for index in range(len(objrows)):
objrows[index] = ' '.join(objrows[index][1:])
if 'delim' in kwargs:
for index in range(len(objrows)):
objrows[index] = kwargs['delim'].join(objrows[index])
return ('%s' % '\n'.join(objrows), '')
# Print mostly made-up stuff in the correct syntax
def _cmd_lsnodecanister(self, **kwargs):
rows = [None] * 3
rows[0] = ['id', 'name', 'UPS_serial_number', 'WWNN', 'status',
'IO_group_id', 'IO_group_name', 'config_node',
'UPS_unique_id', 'hardware', 'iscsi_name', 'iscsi_alias',
'panel_name', 'enclosure_id', 'canister_id',
'enclosure_serial_number']
rows[1] = ['1', 'node1', '', '123456789ABCDEF0', 'online', '0',
'io_grp0',
'yes', '123456789ABCDEF0', '100',
'iqn.1982-01.com.ibm:1234.sim.node1', '', '01-1', '1', '1',
'0123ABC']
rows[2] = ['2', 'node2', '', '123456789ABCDEF1', 'online', '0',
'io_grp0',
'no', '123456789ABCDEF1', '100',
'iqn.1982-01.com.ibm:1234.sim.node2', '', '01-2', '1', '2',
'0123ABC']
if self._next_cmd_error['lsnodecanister'] == 'header_mismatch':
rows[0].pop(2)
self._next_cmd_error['lsnodecanister'] = ''
if self._next_cmd_error['lsnodecanister'] == 'remove_field':
for row in rows:
row.pop(0)
self._next_cmd_error['lsnodecanister'] = ''
return self._print_info_cmd(rows=rows, **kwargs)
# Print information of every single node of SVC
def _cmd_lsnode(self, **kwargs):
node_infos = dict()
node_infos['1'] = r'''id!1
name!node1
port_id!500507680210C744
port_status!active
port_speed!8Gb
port_id!500507680220C744
port_status!active
port_speed!8Gb
'''
node_infos['2'] = r'''id!2
name!node2
port_id!500507680220C745
port_status!active
port_speed!8Gb
port_id!500507680230C745
port_status!inactive
port_speed!N/A
'''
node_id = kwargs.get('node_id', None)
stdout = node_infos.get(node_id, '')
return stdout, ''
# Print mostly made-up stuff in the correct syntax
def _cmd_lsportip(self, **kwargs):
if self._next_cmd_error['lsportip'] == 'ip_no_config':
self._next_cmd_error['lsportip'] = ''
ip_addr1 = ''
ip_addr2 = ''
gw = ''
else:
ip_addr1 = '1.234.56.78'
ip_addr2 = '1.234.56.79'
gw = '1.234.56.1'
rows = [None] * 17
rows[0] = ['id', 'node_id', 'node_name', 'IP_address', 'mask',
'gateway', 'IP_address_6', 'prefix_6', 'gateway_6', 'MAC',
'duplex', 'state', 'speed', 'failover']
rows[1] = ['1', '1', 'node1', ip_addr1, '255.255.255.0',
gw, '', '', '', '01:23:45:67:89:00', 'Full',
'online', '1Gb/s', 'no']
rows[2] = ['1', '1', 'node1', '', '', '', '', '', '',
'01:23:45:67:89:00', 'Full', 'online', '1Gb/s', 'yes']
rows[3] = ['2', '1', 'node1', '', '', '', '', '', '',
'01:23:45:67:89:01', 'Full', 'unconfigured', '1Gb/s', 'no']
rows[4] = ['2', '1', 'node1', '', '', '', '', '', '',
'01:23:45:67:89:01', 'Full', 'unconfigured', '1Gb/s', 'yes']
rows[5] = ['3', '1', 'node1', '', '', '', '', '', '', '', '',
'unconfigured', '', 'no']
rows[6] = ['3', '1', 'node1', '', '', '', '', '', '', '', '',
'unconfigured', '', 'yes']
rows[7] = ['4', '1', 'node1', '', '', '', '', '', '', '', '',
'unconfigured', '', 'no']
rows[8] = ['4', '1', 'node1', '', '', '', '', '', '', '', '',
'unconfigured', '', 'yes']
rows[9] = ['1', '2', 'node2', ip_addr2, '255.255.255.0',
gw, '', '', '', '01:23:45:67:89:02', 'Full',
'online', '1Gb/s', 'no']
rows[10] = ['1', '2', 'node2', '', '', '', '', '', '',
'01:23:45:67:89:02', 'Full', 'online', '1Gb/s', 'yes']
rows[11] = ['2', '2', 'node2', '', '', '', '', '', '',
'01:23:45:67:89:03', 'Full', 'unconfigured', '1Gb/s', 'no']
rows[12] = ['2', '2', 'node2', '', '', '', '', '', '',
'01:23:45:67:89:03', 'Full', 'unconfigured', '1Gb/s',
'yes']
rows[13] = ['3', '2', 'node2', '', '', '', '', '', '', '', '',
'unconfigured', '', 'no']
rows[14] = ['3', '2', 'node2', '', '', '', '', '', '', '', '',
'unconfigured', '', 'yes']
rows[15] = ['4', '2', 'node2', '', '', '', '', '', '', '', '',
'unconfigured', '', 'no']
rows[16] = ['4', '2', 'node2', '', '', '', '', '', '', '', '',
'unconfigured', '', 'yes']
if self._next_cmd_error['lsportip'] == 'header_mismatch':
rows[0].pop(2)
self._next_cmd_error['lsportip'] = ''
if self._next_cmd_error['lsportip'] == 'remove_field':
for row in rows:
row.pop(1)
self._next_cmd_error['lsportip'] = ''
return self._print_info_cmd(rows=rows, **kwargs)
def _cmd_lsfabric(self, **kwargs):
host_name = kwargs['host'].strip('\'\"') if 'host' in kwargs else None
target_wwpn = kwargs['wwpn'] if 'wwpn' in kwargs else None
host_infos = []
for hv in self._hosts_list.itervalues():
if (not host_name) or (hv['host_name'] == host_name):
for mv in self._mappings_list.itervalues():
if mv['host'] == hv['host_name']:
if not target_wwpn or target_wwpn in hv['wwpns']:
host_infos.append(hv)
break
if not len(host_infos):
return ('', '')
rows = []
rows.append(['remote_wwpn', 'remote_nportid', 'id', 'node_name',
'local_wwpn', 'local_port', 'local_nportid', 'state',
'name', 'cluster_name', 'type'])
for host_info in host_infos:
for wwpn in host_info['wwpns']:
rows.append([wwpn, '123456', host_info['id'], 'nodeN',
'AABBCCDDEEFF0011', '1', '0123ABC', 'active',
host_info['host_name'], '', 'host'])
if self._next_cmd_error['lsfabric'] == 'header_mismatch':
rows[0].pop(0)
self._next_cmd_error['lsfabric'] = ''
if self._next_cmd_error['lsfabric'] == 'remove_field':
for row in rows:
row.pop(0)
self._next_cmd_error['lsfabric'] = ''
return self._print_info_cmd(rows=rows, **kwargs)
# Create a vdisk
def _cmd_mkvdisk(self, **kwargs):
# We only save the id/uid, name, and size - all else will be made up
volume_info = {}
volume_info['id'] = self._find_unused_id(self._volumes_list)
volume_info['uid'] = ('ABCDEF' * 3) + ('0' * 14) + volume_info['id']
if 'name' in kwargs:
volume_info['name'] = kwargs['name'].strip('\'\"')
else:
volume_info['name'] = 'vdisk' + volume_info['id']
# Assume size and unit are given, store it in bytes
capacity = int(kwargs['size'])
unit = kwargs['unit']
volume_info['capacity'] = self._convert_units_bytes(capacity, unit)
volume_info['IO_group_id'] = kwargs['iogrp']
volume_info['IO_group_name'] = 'io_grp%s' % kwargs['iogrp']
if 'easytier' in kwargs:
if kwargs['easytier'] == 'on':
volume_info['easy_tier'] = 'on'
else:
volume_info['easy_tier'] = 'off'
if 'rsize' in kwargs:
# Fake numbers
volume_info['used_capacity'] = '786432'
volume_info['real_capacity'] = '21474816'
volume_info['free_capacity'] = '38219264'
if 'warning' in kwargs:
volume_info['warning'] = kwargs['warning'].rstrip('%')
else:
volume_info['warning'] = '80'
if 'autoexpand' in kwargs:
volume_info['autoexpand'] = 'on'
else:
volume_info['autoexpand'] = 'off'
if 'grainsize' in kwargs:
volume_info['grainsize'] = kwargs['grainsize']
else:
volume_info['grainsize'] = '32'
if 'compressed' in kwargs:
volume_info['compressed_copy'] = 'yes'
else:
volume_info['compressed_copy'] = 'no'
else:
volume_info['used_capacity'] = volume_info['capacity']
volume_info['real_capacity'] = volume_info['capacity']
volume_info['free_capacity'] = '0'
volume_info['warning'] = ''
volume_info['autoexpand'] = ''
volume_info['grainsize'] = ''
volume_info['compressed_copy'] = 'no'
vol_cp = {'id': '0',
'status': 'online',
'sync': 'yes',
'primary': 'yes',
'mdisk_grp_id': '1',
'mdisk_grp_name': self._flags['storwize_svc_volpool_name'],
'easy_tier': volume_info['easy_tier'],
'compressed_copy': volume_info['compressed_copy']}
volume_info['copies'] = {'0': vol_cp}
if volume_info['name'] in self._volumes_list:
return self._errors['CMMVC6035E']
else:
self._volumes_list[volume_info['name']] = volume_info
return ('Virtual Disk, id [%s], successfully created' %
(volume_info['id']), '')
# Delete a vdisk
def _cmd_rmvdisk(self, **kwargs):
force = True if 'force' in kwargs else False
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
vol_name = kwargs['obj'].strip('\'\"')
if vol_name not in self._volumes_list:
return self._errors['CMMVC5753E']
if not force:
for mapping in self._mappings_list.itervalues():
if mapping['vol'] == vol_name:
return self._errors['CMMVC5840E']
for fcmap in self._fcmappings_list.itervalues():
if ((fcmap['source'] == vol_name) or
(fcmap['target'] == vol_name)):
return self._errors['CMMVC5840E']
del self._volumes_list[vol_name]
return ('', '')
def _cmd_expandvdisksize(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
vol_name = kwargs['obj'].strip('\'\"')
# Assume unit is gb
if 'size' not in kwargs:
return self._errors['CMMVC5707E']
size = int(kwargs['size'])
if vol_name not in self._volumes_list:
return self._errors['CMMVC5753E']
curr_size = int(self._volumes_list[vol_name]['capacity'])
addition = size * units.Gi
self._volumes_list[vol_name]['capacity'] = str(curr_size + addition)
return ('', '')
def _get_fcmap_info(self, vol_name):
ret_vals = {
'fc_id': '',
'fc_name': '',
'fc_map_count': '0',
}
for fcmap in self._fcmappings_list.itervalues():
if ((fcmap['source'] == vol_name) or
(fcmap['target'] == vol_name)):
ret_vals['fc_id'] = fcmap['id']
ret_vals['fc_name'] = fcmap['name']
ret_vals['fc_map_count'] = '1'
return ret_vals
# List information about vdisks
def _cmd_lsvdisk(self, **kwargs):
rows = []
rows.append(['id', 'name', 'IO_group_id', 'IO_group_name',
'status', 'mdisk_grp_id', 'mdisk_grp_name',
'capacity', 'type', 'FC_id', 'FC_name', 'RC_id',
'RC_name', 'vdisk_UID', 'fc_map_count', 'copy_count',
'fast_write_state', 'se_copy_count', 'RC_change'])
for vol in self._volumes_list.itervalues():
if (('filtervalue' not in kwargs) or
(kwargs['filtervalue'] == 'name=' + vol['name']) or
(kwargs['filtervalue'] == 'vdisk_UID=' + vol['uid'])):
fcmap_info = self._get_fcmap_info(vol['name'])
if 'bytes' in kwargs:
cap = self._convert_bytes_units(vol['capacity'])
else:
cap = vol['capacity']
rows.append([str(vol['id']), vol['name'], vol['IO_group_id'],
vol['IO_group_name'], 'online', '0',
self._flags['storwize_svc_volpool_name'],
cap, 'striped',
fcmap_info['fc_id'], fcmap_info['fc_name'],
'', '', vol['uid'],
fcmap_info['fc_map_count'], '1', 'empty',
'1', 'no'])
if 'obj' not in kwargs:
return self._print_info_cmd(rows=rows, **kwargs)
else:
if kwargs['obj'] not in self._volumes_list:
return self._errors['CMMVC5754E']
vol = self._volumes_list[kwargs['obj']]
fcmap_info = self._get_fcmap_info(vol['name'])
cap = vol['capacity']
cap_u = vol['used_capacity']
cap_r = vol['real_capacity']
cap_f = vol['free_capacity']
if 'bytes' not in kwargs:
for item in [cap, cap_u, cap_r, cap_f]:
item = self._convert_bytes_units(item)
rows = []
rows.append(['id', str(vol['id'])])
rows.append(['name', vol['name']])
rows.append(['IO_group_id', vol['IO_group_id']])
rows.append(['IO_group_name', vol['IO_group_name']])
rows.append(['status', 'online'])
rows.append(['capacity', cap])
rows.append(['formatted', 'no'])
rows.append(['mdisk_id', ''])
rows.append(['mdisk_name', ''])
rows.append(['FC_id', fcmap_info['fc_id']])
rows.append(['FC_name', fcmap_info['fc_name']])
rows.append(['RC_id', ''])
rows.append(['RC_name', ''])
rows.append(['vdisk_UID', vol['uid']])
rows.append(['throttling', '0'])
if self._next_cmd_error['lsvdisk'] == 'blank_pref_node':
rows.append(['preferred_node_id', ''])
self._next_cmd_error['lsvdisk'] = ''
elif self._next_cmd_error['lsvdisk'] == 'no_pref_node':
self._next_cmd_error['lsvdisk'] = ''
else:
rows.append(['preferred_node_id', '1'])
rows.append(['fast_write_state', 'empty'])
rows.append(['cache', 'readwrite'])
rows.append(['udid', ''])
rows.append(['fc_map_count', fcmap_info['fc_map_count']])
rows.append(['sync_rate', '50'])
rows.append(['copy_count', '1'])
rows.append(['se_copy_count', '0'])
rows.append(['mirror_write_priority', 'latency'])
rows.append(['RC_change', 'no'])
for copy in vol['copies'].itervalues():
rows.append(['copy_id', copy['id']])
rows.append(['status', copy['status']])
rows.append(['primary', copy['primary']])
rows.append(['mdisk_grp_id', copy['mdisk_grp_id']])
rows.append(['mdisk_grp_name', copy['mdisk_grp_name']])
rows.append(['type', 'striped'])
rows.append(['used_capacity', cap_u])
rows.append(['real_capacity', cap_r])
rows.append(['free_capacity', cap_f])
rows.append(['easy_tier', copy['easy_tier']])
rows.append(['compressed_copy', copy['compressed_copy']])
rows.append(['autoexpand', vol['autoexpand']])
rows.append(['warning', vol['warning']])
rows.append(['grainsize', vol['grainsize']])
if 'nohdr' in kwargs:
for index in range(len(rows)):
rows[index] = ' '.join(rows[index][1:])
if 'delim' in kwargs:
for index in range(len(rows)):
rows[index] = kwargs['delim'].join(rows[index])
return ('%s' % '\n'.join(rows), '')
def _cmd_lsiogrp(self, **kwargs):
rows = [None] * 6
rows[0] = ['id', 'name', 'node_count', 'vdisk_count', 'host_count']
rows[1] = ['0', 'io_grp0', '2', '0', '4']
rows[2] = ['1', 'io_grp1', '2', '0', '4']
rows[3] = ['2', 'io_grp2', '0', '0', '4']
rows[4] = ['3', 'io_grp3', '0', '0', '4']
rows[5] = ['4', 'recovery_io_grp', '0', '0', '0']
return self._print_info_cmd(rows=rows, **kwargs)
def _add_port_to_host(self, host_info, **kwargs):
if 'iscsiname' in kwargs:
added_key = 'iscsi_names'
added_val = kwargs['iscsiname'].strip('\'\"')
elif 'hbawwpn' in kwargs:
added_key = 'wwpns'
added_val = kwargs['hbawwpn'].strip('\'\"')
else:
return self._errors['CMMVC5707E']
host_info[added_key].append(added_val)
for v in self._hosts_list.itervalues():
if v['id'] == host_info['id']:
continue
for port in v[added_key]:
if port == added_val:
return self._errors['CMMVC6581E']
return ('', '')
# Make a host
def _cmd_mkhost(self, **kwargs):
host_info = {}
host_info['id'] = self._find_unused_id(self._hosts_list)
if 'name' in kwargs:
host_name = kwargs['name'].strip('\'\"')
else:
host_name = 'host' + str(host_info['id'])
if self._is_invalid_name(host_name):
return self._errors['CMMVC6527E']
if host_name in self._hosts_list:
return self._errors['CMMVC6035E']
host_info['host_name'] = host_name
host_info['iscsi_names'] = []
host_info['wwpns'] = []
out, err = self._add_port_to_host(host_info, **kwargs)
if not len(err):
self._hosts_list[host_name] = host_info
return ('Host, id [%s], successfully created' %
(host_info['id']), '')
else:
return (out, err)
# Add ports to an existing host
def _cmd_addhostport(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
host_name = kwargs['obj'].strip('\'\"')
if host_name not in self._hosts_list:
return self._errors['CMMVC5753E']
host_info = self._hosts_list[host_name]
return self._add_port_to_host(host_info, **kwargs)
# Change host properties
def _cmd_chhost(self, **kwargs):
if 'chapsecret' not in kwargs:
return self._errors['CMMVC5707E']
secret = kwargs['obj'].strip('\'\"')
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
host_name = kwargs['obj'].strip('\'\"')
if host_name not in self._hosts_list:
return self._errors['CMMVC5753E']
self._hosts_list[host_name]['chapsecret'] = secret
return ('', '')
# Remove a host
def _cmd_rmhost(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
host_name = kwargs['obj'].strip('\'\"')
if host_name not in self._hosts_list:
return self._errors['CMMVC5753E']
for v in self._mappings_list.itervalues():
if (v['host'] == host_name):
return self._errors['CMMVC5871E']
del self._hosts_list[host_name]
return ('', '')
# List information about hosts
def _cmd_lshost(self, **kwargs):
if 'obj' not in kwargs:
rows = []
rows.append(['id', 'name', 'port_count', 'iogrp_count', 'status'])
found = False
for host in self._hosts_list.itervalues():
filterstr = 'name=' + host['host_name']
if (('filtervalue' not in kwargs) or
(kwargs['filtervalue'] == filterstr)):
rows.append([host['id'], host['host_name'], '1', '4',
'offline'])
found = True
if found:
return self._print_info_cmd(rows=rows, **kwargs)
else:
return ('', '')
else:
host_name = kwargs['obj'].strip('\'\"')
if host_name not in self._hosts_list:
return self._errors['CMMVC5754E']
host = self._hosts_list[host_name]
rows = []
rows.append(['id', host['id']])
rows.append(['name', host['host_name']])
rows.append(['port_count', '1'])
rows.append(['type', 'generic'])
rows.append(['mask', '1111'])
rows.append(['iogrp_count', '4'])
rows.append(['status', 'online'])
for port in host['iscsi_names']:
rows.append(['iscsi_name', port])
rows.append(['node_logged_in_count', '0'])
rows.append(['state', 'offline'])
for port in host['wwpns']:
rows.append(['WWPN', port])
rows.append(['node_logged_in_count', '0'])
rows.append(['state', 'active'])
if 'nohdr' in kwargs:
for index in range(len(rows)):
rows[index] = ' '.join(rows[index][1:])
if 'delim' in kwargs:
for index in range(len(rows)):
rows[index] = kwargs['delim'].join(rows[index])
return ('%s' % '\n'.join(rows), '')
# List iSCSI authorization information about hosts
def _cmd_lsiscsiauth(self, **kwargs):
if self._next_cmd_error['lsiscsiauth'] == 'no_info':
self._next_cmd_error['lsiscsiauth'] = ''
return ('', '')
rows = []
rows.append(['type', 'id', 'name', 'iscsi_auth_method',
'iscsi_chap_secret'])
for host in self._hosts_list.itervalues():
method = 'none'
secret = ''
if 'chapsecret' in host:
method = 'chap'
secret = host['chapsecret']
rows.append(['host', host['id'], host['host_name'], method,
secret])
return self._print_info_cmd(rows=rows, **kwargs)
# Create a vdisk-host mapping
def _cmd_mkvdiskhostmap(self, **kwargs):
mapping_info = {}
mapping_info['id'] = self._find_unused_id(self._mappings_list)
if 'host' not in kwargs:
return self._errors['CMMVC5707E']
mapping_info['host'] = kwargs['host'].strip('\'\"')
if 'scsi' not in kwargs:
return self._errors['CMMVC5707E']
mapping_info['lun'] = kwargs['scsi'].strip('\'\"')
if 'obj' not in kwargs:
return self._errors['CMMVC5707E']
mapping_info['vol'] = kwargs['obj'].strip('\'\"')
if mapping_info['vol'] not in self._volumes_list:
return self._errors['CMMVC5753E']
if mapping_info['host'] not in self._hosts_list:
return self._errors['CMMVC5754E']
if mapping_info['vol'] in self._mappings_list:
return self._errors['CMMVC6071E']
for v in self._mappings_list.itervalues():
if ((v['host'] == mapping_info['host']) and
(v['lun'] == mapping_info['lun'])):
return self._errors['CMMVC5879E']
for v in self._mappings_list.itervalues():
if (v['lun'] == mapping_info['lun']) and ('force' not in kwargs):
return self._errors['CMMVC6071E']
self._mappings_list[mapping_info['id']] = mapping_info
return ('Virtual Disk to Host map, id [%s], successfully created'
% (mapping_info['id']), '')
# Delete a vdisk-host mapping
def _cmd_rmvdiskhostmap(self, **kwargs):
if 'host' not in kwargs:
return self._errors['CMMVC5707E']
host = kwargs['host'].strip('\'\"')
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
vol = kwargs['obj'].strip('\'\"')
mapping_ids = []
for v in self._mappings_list.itervalues():
if v['vol'] == vol:
mapping_ids.append(v['id'])
if not mapping_ids:
return self._errors['CMMVC5753E']
this_mapping = None
for mapping_id in mapping_ids:
if self._mappings_list[mapping_id]['host'] == host:
this_mapping = mapping_id
if this_mapping is None:
return self._errors['CMMVC5753E']
del self._mappings_list[this_mapping]
return ('', '')
# List information about host->vdisk mappings
def _cmd_lshostvdiskmap(self, **kwargs):
host_name = kwargs['obj'].strip('\'\"')
if host_name not in self._hosts_list:
return self._errors['CMMVC5754E']
rows = []
rows.append(['id', 'name', 'SCSI_id', 'vdisk_id', 'vdisk_name',
'vdisk_UID'])
for mapping in self._mappings_list.itervalues():
if (host_name == '') or (mapping['host'] == host_name):
volume = self._volumes_list[mapping['vol']]
rows.append([mapping['id'], mapping['host'],
mapping['lun'], volume['id'],
volume['name'], volume['uid']])
return self._print_info_cmd(rows=rows, **kwargs)
# List information about vdisk->host mappings
def _cmd_lsvdiskhostmap(self, **kwargs):
mappings_found = 0
vdisk_name = kwargs['obj']
if vdisk_name not in self._volumes_list:
return self._errors['CMMVC5753E']
rows = []
rows.append(['id name', 'SCSI_id', 'host_id', 'host_name', 'vdisk_UID',
'IO_group_id', 'IO_group_name'])
for mapping in self._mappings_list.itervalues():
if (mapping['vol'] == vdisk_name):
mappings_found += 1
volume = self._volumes_list[mapping['vol']]
host = self._hosts_list[mapping['host']]
rows.append([volume['id'], volume['name'], host['id'],
host['host_name'], volume['uid'],
volume['IO_group_id'], volume['IO_group_name']])
if mappings_found:
return self._print_info_cmd(rows=rows, **kwargs)
else:
return ('', '')
# Create a FlashCopy mapping
def _cmd_mkfcmap(self, **kwargs):
source = ''
target = ''
copyrate = kwargs['copyrate'] if 'copyrate' in kwargs else '50'
if 'source' not in kwargs:
return self._errors['CMMVC5707E']
source = kwargs['source'].strip('\'\"')
if source not in self._volumes_list:
return self._errors['CMMVC5754E']
if 'target' not in kwargs:
return self._errors['CMMVC5707E']
target = kwargs['target'].strip('\'\"')
if target not in self._volumes_list:
return self._errors['CMMVC5754E']
if source == target:
return self._errors['CMMVC6303E']
if (self._volumes_list[source]['capacity'] !=
self._volumes_list[target]['capacity']):
return self._errors['CMMVC5754E']
fcmap_info = {}
fcmap_info['source'] = source
fcmap_info['target'] = target
fcmap_info['id'] = self._find_unused_id(self._fcmappings_list)
fcmap_info['name'] = 'fcmap' + fcmap_info['id']
fcmap_info['copyrate'] = copyrate
fcmap_info['progress'] = '0'
fcmap_info['autodelete'] = True if 'autodelete' in kwargs else False
fcmap_info['status'] = 'idle_or_copied'
# Add fcmap to consistency group
if 'consistgrp' in kwargs:
consistgrp = kwargs['consistgrp']
# if is digit, assume is cg id, else is cg name
cg_id = 0
if not consistgrp.isdigit():
for consistgrp_key in self._fcconsistgrp_list.keys():
if (self._fcconsistgrp_list[consistgrp_key]['name']
== consistgrp):
cg_id = consistgrp_key
fcmap_info['consistgrp'] = consistgrp_key
break
else:
if int(consistgrp) in self._fcconsistgrp_list.keys():
cg_id = int(consistgrp)
# If can't find exist consistgrp id, return not exist error
if not cg_id:
return self._errors['CMMVC5754E']
fcmap_info['consistgrp'] = cg_id
# Add fcmap to consistgrp
self._fcconsistgrp_list[cg_id]['fcmaps'][fcmap_info['id']] = (
fcmap_info['name'])
self._fc_cg_state_transition('add',
self._fcconsistgrp_list[cg_id])
self._fcmappings_list[fcmap_info['id']] = fcmap_info
return('FlashCopy Mapping, id [' + fcmap_info['id'] +
'], successfully created', '')
def _cmd_prestartfcmap(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
id_num = kwargs['obj']
if self._next_cmd_error['prestartfcmap'] == 'bad_id':
id_num = -1
self._next_cmd_error['prestartfcmap'] = ''
try:
fcmap = self._fcmappings_list[id_num]
except KeyError:
return self._errors['CMMVC5753E']
return self._state_transition('prepare', fcmap)
def _cmd_startfcmap(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
id_num = kwargs['obj']
if self._next_cmd_error['startfcmap'] == 'bad_id':
id_num = -1
self._next_cmd_error['startfcmap'] = ''
try:
fcmap = self._fcmappings_list[id_num]
except KeyError:
return self._errors['CMMVC5753E']
return self._state_transition('start', fcmap)
def _cmd_stopfcmap(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
id_num = kwargs['obj']
try:
fcmap = self._fcmappings_list[id_num]
except KeyError:
return self._errors['CMMVC5753E']
return self._state_transition('stop', fcmap)
def _cmd_rmfcmap(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
id_num = kwargs['obj']
force = True if 'force' in kwargs else False
if self._next_cmd_error['rmfcmap'] == 'bad_id':
id_num = -1
self._next_cmd_error['rmfcmap'] = ''
try:
fcmap = self._fcmappings_list[id_num]
except KeyError:
return self._errors['CMMVC5753E']
function = 'delete_force' if force else 'delete'
ret = self._state_transition(function, fcmap)
if fcmap['status'] == 'end':
del self._fcmappings_list[id_num]
return ret
def _cmd_lsvdiskfcmappings(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5707E']
vdisk = kwargs['obj']
rows = []
rows.append(['id', 'name'])
for v in self._fcmappings_list.itervalues():
if v['source'] == vdisk or v['target'] == vdisk:
rows.append([v['id'], v['name']])
return self._print_info_cmd(rows=rows, **kwargs)
def _cmd_chfcmap(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5707E']
id_num = kwargs['obj']
try:
fcmap = self._fcmappings_list[id_num]
except KeyError:
return self._errors['CMMVC5753E']
for key in ['name', 'copyrate', 'autodelete']:
if key in kwargs:
fcmap[key] = kwargs[key]
return ('', '')
def _cmd_lsfcmap(self, **kwargs):
rows = []
rows.append(['id', 'name', 'source_vdisk_id', 'source_vdisk_name',
'target_vdisk_id', 'target_vdisk_name', 'group_id',
'group_name', 'status', 'progress', 'copy_rate',
'clean_progress', 'incremental', 'partner_FC_id',
'partner_FC_name', 'restoring', 'start_time',
'rc_controlled'])
# Assume we always get a filtervalue argument
filter_key = kwargs['filtervalue'].split('=')[0]
filter_value = kwargs['filtervalue'].split('=')[1]
to_delete = []
for k, v in self._fcmappings_list.iteritems():
if str(v[filter_key]) == filter_value:
source = self._volumes_list[v['source']]
target = self._volumes_list[v['target']]
self._state_transition('wait', v)
if self._next_cmd_error['lsfcmap'] == 'speed_up':
self._next_cmd_error['lsfcmap'] = ''
curr_state = v['status']
while self._state_transition('wait', v) == ("", ""):
if curr_state == v['status']:
break
curr_state = v['status']
if ((v['status'] == 'idle_or_copied' and v['autodelete'] and
v['progress'] == '100') or (v['status'] == 'end')):
to_delete.append(k)
else:
rows.append([v['id'], v['name'], source['id'],
source['name'], target['id'], target['name'],
'', '', v['status'], v['progress'],
v['copyrate'], '100', 'off', '', '', 'no', '',
'no'])
for d in to_delete:
del self._fcmappings_list[d]
return self._print_info_cmd(rows=rows, **kwargs)
# Create a FlashCopy mapping
def _cmd_mkfcconsistgrp(self, **kwargs):
fcconsistgrp_info = {}
fcconsistgrp_info['id'] = self._find_unused_id(self._fcconsistgrp_list)
if 'name' in kwargs:
fcconsistgrp_info['name'] = kwargs['name'].strip('\'\"')
else:
fcconsistgrp_info['name'] = 'fccstgrp' + fcconsistgrp_info['id']
if 'autodelete' in kwargs:
fcconsistgrp_info['autodelete'] = True
else:
fcconsistgrp_info['autodelete'] = False
fcconsistgrp_info['status'] = 'empty'
fcconsistgrp_info['start_time'] = None
fcconsistgrp_info['fcmaps'] = {}
self._fcconsistgrp_list[fcconsistgrp_info['id']] = fcconsistgrp_info
return('FlashCopy Consistency Group, id [' + fcconsistgrp_info['id'] +
'], successfully created', '')
def _cmd_prestartfcconsistgrp(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
cg_name = kwargs['obj']
cg_id = 0
for cg_id in self._fcconsistgrp_list.keys():
if cg_name == self._fcconsistgrp_list[cg_id]['name']:
break
return self._fc_cg_state_transition('prepare',
self._fcconsistgrp_list[cg_id])
def _cmd_startfcconsistgrp(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
cg_name = kwargs['obj']
cg_id = 0
for cg_id in self._fcconsistgrp_list.keys():
if cg_name == self._fcconsistgrp_list[cg_id]['name']:
break
return self._fc_cg_state_transition('start',
self._fcconsistgrp_list[cg_id])
def _cmd_stopfcconsistgrp(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
id_num = kwargs['obj']
try:
fcconsistgrps = self._fcconsistgrp_list[id_num]
except KeyError:
return self._errors['CMMVC5753E']
return self._fc_cg_state_transition('stop', fcconsistgrps)
def _cmd_rmfcconsistgrp(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
cg_name = kwargs['obj']
force = True if 'force' in kwargs else False
cg_id = 0
for cg_id in self._fcconsistgrp_list.keys():
if cg_name == self._fcconsistgrp_list[cg_id]['name']:
break
if not cg_id:
return self._errors['CMMVC5753E']
fcconsistgrps = self._fcconsistgrp_list[cg_id]
function = 'delete_force' if force else 'delete'
ret = self._fc_cg_state_transition(function, fcconsistgrps)
if fcconsistgrps['status'] == 'end':
del self._fcconsistgrp_list[cg_id]
return ret
def _cmd_lsfcconsistgrp(self, **kwargs):
rows = []
if 'obj' not in kwargs:
rows.append(['id', 'name', 'status' 'start_time'])
for fcconsistgrp in self._fcconsistgrp_list.itervalues():
rows.append([fcconsistgrp['id'],
fcconsistgrp['name'],
fcconsistgrp['status'],
fcconsistgrp['start_time']])
return self._print_info_cmd(rows=rows, **kwargs)
else:
fcconsistgrp = None
cg_id = 0
for cg_id in self._fcconsistgrp_list.keys():
if self._fcconsistgrp_list[cg_id]['name'] == kwargs['obj']:
fcconsistgrp = self._fcconsistgrp_list[cg_id]
rows = []
rows.append(['id', str(cg_id)])
rows.append(['name', fcconsistgrp['name']])
rows.append(['status', fcconsistgrp['status']])
rows.append(['autodelete', str(fcconsistgrp['autodelete'])])
rows.append(['start_time', str(fcconsistgrp['start_time'])])
for fcmap_id in fcconsistgrp['fcmaps'].keys():
rows.append(['FC_mapping_id', str(fcmap_id)])
rows.append(['FC_mapping_name',
fcconsistgrp['fcmaps'][fcmap_id]])
if 'delim' in kwargs:
for index in range(len(rows)):
rows[index] = kwargs['delim'].join(rows[index])
self._fc_cg_state_transition('wait', fcconsistgrp)
return ('%s' % '\n'.join(rows), '')
def _cmd_migratevdisk(self, **kwargs):
if 'mdiskgrp' not in kwargs or 'vdisk' not in kwargs:
return self._errors['CMMVC5707E']
mdiskgrp = kwargs['mdiskgrp'].strip('\'\"')
vdisk = kwargs['vdisk'].strip('\'\"')
if vdisk in self._volumes_list:
curr_mdiskgrp = self._volumes_list
else:
for pool in self._other_pools:
if vdisk in pool:
curr_mdiskgrp = pool
break
else:
return self._errors['CMMVC5754E']
if mdiskgrp == self._flags['storwize_svc_volpool_name']:
tgt_mdiskgrp = self._volumes_list
elif mdiskgrp == 'openstack2':
tgt_mdiskgrp = self._other_pools['openstack2']
elif mdiskgrp == 'openstack3':
tgt_mdiskgrp = self._other_pools['openstack3']
else:
return self._errors['CMMVC5754E']
if curr_mdiskgrp == tgt_mdiskgrp:
return self._errors['CMMVC6430E']
vol = curr_mdiskgrp[vdisk]
tgt_mdiskgrp[vdisk] = vol
del curr_mdiskgrp[vdisk]
return ('', '')
def _cmd_addvdiskcopy(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
vol_name = kwargs['obj'].strip('\'\"')
if vol_name not in self._volumes_list:
return self._errors['CMMVC5753E']
vol = self._volumes_list[vol_name]
if 'mdiskgrp' not in kwargs:
return self._errors['CMMVC5707E']
mdiskgrp = kwargs['mdiskgrp'].strip('\'\"')
copy_info = {}
copy_info['id'] = self._find_unused_id(vol['copies'])
copy_info['status'] = 'online'
copy_info['sync'] = 'no'
copy_info['primary'] = 'no'
copy_info['mdisk_grp_name'] = mdiskgrp
if mdiskgrp == self._flags['storwize_svc_volpool_name']:
copy_info['mdisk_grp_id'] = '1'
elif mdiskgrp == 'openstack2':
copy_info['mdisk_grp_id'] = '2'
elif mdiskgrp == 'openstack3':
copy_info['mdisk_grp_id'] = '3'
if 'easytier' in kwargs:
if kwargs['easytier'] == 'on':
copy_info['easy_tier'] = 'on'
else:
copy_info['easy_tier'] = 'off'
if 'rsize' in kwargs:
if 'compressed' in kwargs:
copy_info['compressed_copy'] = 'yes'
else:
copy_info['compressed_copy'] = 'no'
vol['copies'][copy_info['id']] = copy_info
return ('Vdisk [%(vid)s] copy [%(cid)s] successfully created' %
{'vid': vol['id'], 'cid': copy_info['id']}, '')
def _cmd_lsvdiskcopy(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5804E']
name = kwargs['obj']
vol = self._volumes_list[name]
rows = []
rows.append(['vdisk_id', 'vdisk_name', 'copy_id', 'status', 'sync',
'primary', 'mdisk_grp_id', 'mdisk_grp_name', 'capacity',
'type', 'se_copy', 'easy_tier', 'easy_tier_status',
'compressed_copy'])
for copy in vol['copies'].itervalues():
rows.append([vol['id'], vol['name'], copy['id'],
copy['status'], copy['sync'], copy['primary'],
copy['mdisk_grp_id'], copy['mdisk_grp_name'],
vol['capacity'], 'striped', 'yes', copy['easy_tier'],
'inactive', copy['compressed_copy']])
if 'copy' not in kwargs:
return self._print_info_cmd(rows=rows, **kwargs)
else:
copy_id = kwargs['copy'].strip('\'\"')
if copy_id not in vol['copies']:
return self._errors['CMMVC6353E']
copy = vol['copies'][copy_id]
rows = []
rows.append(['vdisk_id', vol['id']])
rows.append(['vdisk_name', vol['name']])
rows.append(['capacity', vol['capacity']])
rows.append(['copy_id', copy['id']])
rows.append(['status', copy['status']])
rows.append(['sync', copy['sync']])
copy['sync'] = 'yes'
rows.append(['primary', copy['primary']])
rows.append(['mdisk_grp_id', copy['mdisk_grp_id']])
rows.append(['mdisk_grp_name', copy['mdisk_grp_name']])
rows.append(['easy_tier', copy['easy_tier']])
rows.append(['easy_tier_status', 'inactive'])
rows.append(['compressed_copy', copy['compressed_copy']])
if 'delim' in kwargs:
for index in range(len(rows)):
rows[index] = kwargs['delim'].join(rows[index])
return ('%s' % '\n'.join(rows), '')
def _cmd_rmvdiskcopy(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
vol_name = kwargs['obj'].strip('\'\"')
if 'copy' not in kwargs:
return self._errors['CMMVC5707E']
copy_id = kwargs['copy'].strip('\'\"')
if vol_name not in self._volumes_list:
return self._errors['CMMVC5753E']
vol = self._volumes_list[vol_name]
if copy_id not in vol['copies']:
return self._errors['CMMVC6353E']
del vol['copies'][copy_id]
return ('', '')
def _cmd_chvdisk(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
vol_name = kwargs['obj'].strip('\'\"')
vol = self._volumes_list[vol_name]
kwargs.pop('obj')
params = ['name', 'warning', 'udid',
'autoexpand', 'easytier', 'primary']
for key, value in kwargs.iteritems():
if key == 'easytier':
vol['easy_tier'] = value
continue
if key == 'warning':
vol['warning'] = value.rstrip('%')
continue
if key == 'name':
vol['name'] = value
del self._volumes_list[vol_name]
self._volumes_list[value] = vol
if key == 'primary':
if value == '0':
self._volumes_list[vol_name]['copies']['0']['primary']\
= 'yes'
self._volumes_list[vol_name]['copies']['1']['primary']\
= 'no'
elif value == '1':
self._volumes_list[vol_name]['copies']['0']['primary']\
= 'no'
self._volumes_list[vol_name]['copies']['1']['primary']\
= 'yes'
else:
err = self._errors['CMMVC6353E'][1] % {'VALUE': key}
return ('', err)
if key in params:
vol[key] = value
else:
err = self._errors['CMMVC5709E'][1] % {'VALUE': key}
return ('', err)
return ('', '')
def _cmd_movevdisk(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
vol_name = kwargs['obj'].strip('\'\"')
vol = self._volumes_list[vol_name]
if 'iogrp' not in kwargs:
return self._errors['CMMVC5707E']
iogrp = kwargs['iogrp']
if iogrp.isdigit():
vol['IO_group_id'] = iogrp
vol['IO_group_name'] = 'io_grp%s' % iogrp
else:
vol['IO_group_id'] = iogrp[6:]
vol['IO_group_name'] = iogrp
return ('', '')
def _cmd_addvdiskaccess(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
return ('', '')
def _cmd_rmvdiskaccess(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
return ('', '')
# list vdisk sync process
def _cmd_lsvdisksyncprogress(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5804E']
name = kwargs['obj']
copy_id = kwargs.get('copy', None)
vol = self._volumes_list[name]
rows = []
rows.append(['vdisk_id', 'vdisk_name', 'copy_id', 'progress',
'estimated_completion_time'])
copy_found = False
for copy in vol['copies'].itervalues():
if not copy_id or copy_id == copy['id']:
copy_found = True
row = [vol['id'], name, copy['id']]
if copy['sync'] == 'yes':
row.extend(['100', ''])
else:
row.extend(['50', '140210115226'])
copy['sync'] = 'yes'
rows.append(row)
if not copy_found:
return self._errors['CMMVC5804E']
return self._print_info_cmd(rows=rows, **kwargs)
def _add_host_to_list(self, connector):
host_info = {}
host_info['id'] = self._find_unused_id(self._hosts_list)
host_info['host_name'] = connector['host']
host_info['iscsi_names'] = []
host_info['wwpns'] = []
if 'initiator' in connector:
host_info['iscsi_names'].append(connector['initiator'])
if 'wwpns' in connector:
host_info['wwpns'] = host_info['wwpns'] + connector['wwpns']
self._hosts_list[connector['host']] = host_info
def _host_in_list(self, host_name):
for k in self._hosts_list:
if k.startswith(host_name):
return k
return None
# The main function to run commands on the management simulator
def execute_command(self, cmd, check_exit_code=True):
try:
kwargs = self._cmd_to_dict(cmd)
except IndexError:
return self._errors['CMMVC5707E']
command = kwargs['cmd']
del kwargs['cmd']
func = getattr(self, '_cmd_' + command)
out, err = func(**kwargs)
if (check_exit_code) and (len(err) != 0):
raise processutils.ProcessExecutionError(exit_code=1,
stdout=out,
stderr=err,
cmd=' '.join(cmd))
return (out, err)
# After calling this function, the next call to the specified command will
# result in in the error specified
def error_injection(self, cmd, error):
self._next_cmd_error[cmd] = error
class StorwizeSVCFakeDriver(storwize_svc.StorwizeSVCDriver):
def __init__(self, *args, **kwargs):
super(StorwizeSVCFakeDriver, self).__init__(*args, **kwargs)
def set_fake_storage(self, fake):
self.fake_storage = fake
def _run_ssh(self, cmd, check_exit_code=True, attempts=1):
try:
LOG.debug('Run CLI command: %s' % cmd)
utils.check_ssh_injection(cmd)
ret = self.fake_storage.execute_command(cmd, check_exit_code)
(stdout, stderr) = ret
LOG.debug('CLI output:\n stdout: %(stdout)s\n stderr: '
'%(stderr)s' % {'stdout': stdout, 'stderr': stderr})
except processutils.ProcessExecutionError as e:
with excutils.save_and_reraise_exception():
LOG.debug('CLI Exception output:\n stdout: %(out)s\n '
'stderr: %(err)s' % {'out': e.stdout,
'err': e.stderr})
return ret
class StorwizeSVCDriverTestCase(test.TestCase):
@mock.patch.object(time, 'sleep')
def setUp(self, mock_sleep):
super(StorwizeSVCDriverTestCase, self).setUp()
self.USESIM = True
if self.USESIM:
self.driver = StorwizeSVCFakeDriver(
configuration=conf.Configuration(None))
self._def_flags = {'san_ip': 'hostname',
'san_login': 'user',
'san_password': 'pass',
'storwize_svc_volpool_name': 'openstack',
'storwize_svc_flashcopy_timeout': 20,
# Test ignore capitalization
'storwize_svc_connection_protocol': 'iScSi',
'storwize_svc_multipath_enabled': False,
'storwize_svc_allow_tenant_qos': True}
wwpns = [str(random.randint(0, 9999999999999999)).zfill(16),
str(random.randint(0, 9999999999999999)).zfill(16)]
initiator = 'test.initiator.%s' % str(random.randint(10000, 99999))
self._connector = {'ip': '1.234.56.78',
'host': 'storwize-svc-test',
'wwpns': wwpns,
'initiator': initiator}
self.sim = StorwizeSVCManagementSimulator('openstack')
self.driver.set_fake_storage(self.sim)
self.ctxt = context.get_admin_context()
else:
self.driver = storwize_svc.StorwizeSVCDriver(
configuration=conf.Configuration(None))
self._def_flags = {'san_ip': '1.111.11.11',
'san_login': 'user',
'san_password': 'password',
'storwize_svc_volpool_name': 'openstack',
# Test ignore capitalization
'storwize_svc_connection_protocol': 'iScSi',
'storwize_svc_multipath_enabled': False,
'storwize_svc_allow_tenant_qos': True,
'ssh_conn_timeout': 0}
config_group = self.driver.configuration.config_group
self.driver.configuration.set_override('rootwrap_config',
'/etc/cinder/rootwrap.conf',
config_group)
self._connector = utils.brick_get_connector_properties()
self._reset_flags()
self.ctxt = context.get_admin_context()
db_driver = self.driver.configuration.db_driver
self.db = importutils.import_module(db_driver)
self.driver.db = self.db
self.driver.do_setup(None)
self.driver.check_for_setup_error()
self.driver._helpers.check_fcmapping_interval = 0
def _set_flag(self, flag, value):
group = self.driver.configuration.config_group
self.driver.configuration.set_override(flag, value, group)
def _reset_flags(self):
self.driver.configuration.local_conf.reset()
for k, v in self._def_flags.iteritems():
self._set_flag(k, v)
def _assert_vol_exists(self, name, exists):
is_vol_defined = self.driver._helpers.is_vdisk_defined(name)
self.assertEqual(is_vol_defined, exists)
def test_storwize_svc_connectivity(self):
# Make sure we detect if the pool doesn't exist
no_exist_pool = 'i-dont-exist-%s' % random.randint(10000, 99999)
self._set_flag('storwize_svc_volpool_name', no_exist_pool)
self.assertRaises(exception.InvalidInput,
self.driver.do_setup, None)
self._reset_flags()
# Check the case where the user didn't configure IP addresses
# as well as receiving unexpected results from the storage
if self.USESIM:
self.sim.error_injection('lsnodecanister', 'header_mismatch')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.do_setup, None)
self.sim.error_injection('lsnodecanister', 'remove_field')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.do_setup, None)
self.sim.error_injection('lsportip', 'header_mismatch')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.do_setup, None)
self.sim.error_injection('lsportip', 'remove_field')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.do_setup, None)
# Check with bad parameters
self._set_flag('san_ip', '')
self.assertRaises(exception.InvalidInput,
self.driver.check_for_setup_error)
self._reset_flags()
self._set_flag('san_password', None)
self._set_flag('san_private_key', None)
self.assertRaises(exception.InvalidInput,
self.driver.check_for_setup_error)
self._reset_flags()
self._set_flag('storwize_svc_vol_rsize', 101)
self.assertRaises(exception.InvalidInput,
self.driver.check_for_setup_error)
self._reset_flags()
self._set_flag('storwize_svc_vol_warning', 101)
self.assertRaises(exception.InvalidInput,
self.driver.check_for_setup_error)
self._reset_flags()
self._set_flag('storwize_svc_vol_grainsize', 42)
self.assertRaises(exception.InvalidInput,
self.driver.check_for_setup_error)
self._reset_flags()
self._set_flag('storwize_svc_flashcopy_timeout', 601)
self.assertRaises(exception.InvalidInput,
self.driver.check_for_setup_error)
self._reset_flags()
self._set_flag('storwize_svc_vol_compression', True)
self._set_flag('storwize_svc_vol_rsize', -1)
self.assertRaises(exception.InvalidInput,
self.driver.check_for_setup_error)
self._reset_flags()
self._set_flag('storwize_svc_connection_protocol', 'foo')
self.assertRaises(exception.InvalidInput,
self.driver.check_for_setup_error)
self._reset_flags()
self._set_flag('storwize_svc_vol_iogrp', 5)
self.assertRaises(exception.InvalidInput,
self.driver.check_for_setup_error)
self._reset_flags()
if self.USESIM:
self.sim.error_injection('lslicense', 'no_compression')
self._set_flag('storwize_svc_vol_compression', True)
self.driver.do_setup(None)
self.assertRaises(exception.InvalidInput,
self.driver.check_for_setup_error)
self._reset_flags()
# Finally, check with good parameters
self.driver.do_setup(None)
def _generate_vol_info(self, vol_name, vol_id):
rand_id = str(random.randint(10000, 99999))
if vol_name:
return {'name': 'snap_volume%s' % rand_id,
'volume_name': vol_name,
'id': rand_id,
'volume_id': vol_id,
'volume_size': 10,
'mdisk_grp_name': 'openstack'}
else:
return {'name': 'test_volume%s' % rand_id,
'size': 10,
'id': '%s' % rand_id,
'volume_type_id': None,
'mdisk_grp_name': 'openstack'}
def _create_volume(self, **kwargs):
vol = testutils.create_volume(self.ctxt, **kwargs)
self.driver.create_volume(vol)
return vol
def _delete_volume(self, volume):
self.driver.delete_volume(volume)
self.db.volume_destroy(self.ctxt, volume['id'])
def _create_consistencygroup_in_db(self, **kwargs):
cg = testutils.create_consistencygroup(self.ctxt, **kwargs)
return cg
def _create_cgsnapshot_in_db(self, cg_id, **kwargs):
cg_snapshot = testutils.create_cgsnapshot(self.ctxt,
consistencygroup_id= cg_id,
**kwargs)
cg_id = cg_snapshot['consistencygroup_id']
volumes = self.db.volume_get_all_by_group(self.ctxt.elevated(), cg_id)
if not volumes:
msg = _("Consistency group is empty. No cgsnapshot "
"will be created.")
raise exception.InvalidConsistencyGroup(reason=msg)
for volume in volumes:
testutils.create_snapshot(self.ctxt,
volume['id'],
cg_snapshot['id'],
cg_snapshot['name'],
cg_snapshot['id'],
"creating")
return cg_snapshot
def _create_test_vol(self, opts):
ctxt = testutils.get_test_admin_context()
type_ref = volume_types.create(ctxt, 'testtype', opts)
volume = self._generate_vol_info(None, None)
type_id = type_ref['id']
type_ref = volume_types.get_volume_type(ctxt, type_id)
volume['volume_type_id'] = type_id
volume['volume_type'] = type_ref
self.driver.create_volume(volume)
attrs = self.driver._helpers.get_vdisk_attributes(volume['name'])
self.driver.delete_volume(volume)
volume_types.destroy(ctxt, type_ref['id'])
return attrs
def _get_default_opts(self):
opt = {'rsize': 2,
'warning': 0,
'autoexpand': True,
'grainsize': 256,
'compression': False,
'easytier': True,
'protocol': 'iSCSI',
'multipath': False,
'iogrp': 0,
'qos': None,
'replication': False,
'stretched_cluster': None}
return opt
@mock.patch.object(helpers.StorwizeHelpers, 'add_vdisk_qos')
@mock.patch.object(storwize_svc.StorwizeSVCDriver, '_get_vdisk_params')
def test_storwize_svc_create_volume_with_qos(self, get_vdisk_params,
add_vdisk_qos):
vol = testutils.create_volume(self.ctxt)
fake_opts = self._get_default_opts()
# If the qos is empty, chvdisk should not be called
# for create_volume.
get_vdisk_params.return_value = fake_opts
self.driver.create_volume(vol)
self._assert_vol_exists(vol['name'], True)
self.assertFalse(add_vdisk_qos.called)
self.driver.delete_volume(vol)
# If the qos is not empty, chvdisk should be called
# for create_volume.
fake_opts['qos'] = {'IOThrottling': 5000}
get_vdisk_params.return_value = fake_opts
self.driver.create_volume(vol)
self._assert_vol_exists(vol['name'], True)
add_vdisk_qos.assert_called_once_with(vol['name'], fake_opts['qos'])
self.driver.delete_volume(vol)
self._assert_vol_exists(vol['name'], False)
def test_storwize_svc_snapshots(self):
vol1 = self._create_volume()
snap1 = self._generate_vol_info(vol1['name'], vol1['id'])
# Test timeout and volume cleanup
self._set_flag('storwize_svc_flashcopy_timeout', 1)
self.assertRaises(exception.VolumeDriverException,
self.driver.create_snapshot, snap1)
self._assert_vol_exists(snap1['name'], False)
self._reset_flags()
# Test prestartfcmap failing
with mock.patch.object(ssh.StorwizeSSH, 'prestartfcmap') as prestart:
prestart.side_effect = exception.VolumeBackendAPIException
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_snapshot, snap1)
if self.USESIM:
self.sim.error_injection('lsfcmap', 'speed_up')
self.sim.error_injection('startfcmap', 'bad_id')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_snapshot, snap1)
self._assert_vol_exists(snap1['name'], False)
self.sim.error_injection('prestartfcmap', 'bad_id')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_snapshot, snap1)
self._assert_vol_exists(snap1['name'], False)
# Test successful snapshot
self.driver.create_snapshot(snap1)
self._assert_vol_exists(snap1['name'], True)
# Try to create a snapshot from an non-existing volume - should fail
snap_novol = self._generate_vol_info('undefined-vol', '12345')
self.assertRaises(exception.VolumeDriverException,
self.driver.create_snapshot,
snap_novol)
# We support deleting a volume that has snapshots, so delete the volume
# first
self.driver.delete_volume(vol1)
self.driver.delete_snapshot(snap1)
def test_storwize_svc_create_volfromsnap_clone(self):
vol1 = self._create_volume()
snap1 = self._generate_vol_info(vol1['name'], vol1['id'])
self.driver.create_snapshot(snap1)
vol2 = self._generate_vol_info(None, None)
vol3 = self._generate_vol_info(None, None)
# Try to create a volume from a non-existing snapshot
snap_novol = self._generate_vol_info('undefined-vol', '12345')
vol_novol = self._generate_vol_info(None, None)
self.assertRaises(exception.VolumeDriverException,
self.driver.create_volume_from_snapshot,
vol_novol,
snap_novol)
# Fail the snapshot
with mock.patch.object(ssh.StorwizeSSH, 'prestartfcmap') as prestart:
prestart.side_effect = exception.VolumeBackendAPIException
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume_from_snapshot,
vol2, snap1)
self._assert_vol_exists(vol2['name'], False)
# Try to create where source size != target size
vol2['size'] += 1
self.assertRaises(exception.InvalidInput,
self.driver.create_volume_from_snapshot,
vol2, snap1)
self._assert_vol_exists(vol2['name'], False)
vol2['size'] -= 1
# Succeed
if self.USESIM:
self.sim.error_injection('lsfcmap', 'speed_up')
self.driver.create_volume_from_snapshot(vol2, snap1)
self._assert_vol_exists(vol2['name'], True)
# Try to clone where source size != target size
vol3['size'] += 1
self.assertRaises(exception.InvalidInput,
self.driver.create_cloned_volume,
vol3, vol2)
self._assert_vol_exists(vol3['name'], False)
vol3['size'] -= 1
if self.USESIM:
self.sim.error_injection('lsfcmap', 'speed_up')
self.driver.create_cloned_volume(vol3, vol2)
self._assert_vol_exists(vol3['name'], True)
# Delete in the 'opposite' order to make sure it works
self.driver.delete_volume(vol3)
self._assert_vol_exists(vol3['name'], False)
self.driver.delete_volume(vol2)
self._assert_vol_exists(vol2['name'], False)
self.driver.delete_snapshot(snap1)
self._assert_vol_exists(snap1['name'], False)
self.driver.delete_volume(vol1)
self._assert_vol_exists(vol1['name'], False)
@mock.patch.object(helpers.StorwizeHelpers, 'add_vdisk_qos')
def test_storwize_svc_create_volfromsnap_clone_with_qos(self,
add_vdisk_qos):
vol1 = self._create_volume()
snap1 = self._generate_vol_info(vol1['name'], vol1['id'])
self.driver.create_snapshot(snap1)
vol2 = self._generate_vol_info(None, None)
vol3 = self._generate_vol_info(None, None)
fake_opts = self._get_default_opts()
# Succeed
if self.USESIM:
self.sim.error_injection('lsfcmap', 'speed_up')
# If the qos is empty, chvdisk should not be called
# for create_volume_from_snapshot.
with mock.patch.object(storwize_svc.StorwizeSVCDriver,
'_get_vdisk_params') as get_vdisk_params:
get_vdisk_params.return_value = fake_opts
self.driver.create_volume_from_snapshot(vol2, snap1)
self._assert_vol_exists(vol2['name'], True)
self.assertFalse(add_vdisk_qos.called)
self.driver.delete_volume(vol2)
# If the qos is not empty, chvdisk should be called
# for create_volume_from_snapshot.
fake_opts['qos'] = {'IOThrottling': 5000}
get_vdisk_params.return_value = fake_opts
self.driver.create_volume_from_snapshot(vol2, snap1)
self._assert_vol_exists(vol2['name'], True)
add_vdisk_qos.assert_called_once_with(vol2['name'],
fake_opts['qos'])
if self.USESIM:
self.sim.error_injection('lsfcmap', 'speed_up')
# If the qos is empty, chvdisk should not be called
# for create_volume_from_snapshot.
add_vdisk_qos.reset_mock()
fake_opts['qos'] = None
get_vdisk_params.return_value = fake_opts
self.driver.create_cloned_volume(vol3, vol2)
self._assert_vol_exists(vol3['name'], True)
self.assertFalse(add_vdisk_qos.called)
self.driver.delete_volume(vol3)
# If the qos is not empty, chvdisk should be called
# for create_volume_from_snapshot.
fake_opts['qos'] = {'IOThrottling': 5000}
get_vdisk_params.return_value = fake_opts
self.driver.create_cloned_volume(vol3, vol2)
self._assert_vol_exists(vol3['name'], True)
add_vdisk_qos.assert_called_once_with(vol3['name'],
fake_opts['qos'])
# Delete in the 'opposite' order to make sure it works
self.driver.delete_volume(vol3)
self._assert_vol_exists(vol3['name'], False)
self.driver.delete_volume(vol2)
self._assert_vol_exists(vol2['name'], False)
self.driver.delete_snapshot(snap1)
self._assert_vol_exists(snap1['name'], False)
self.driver.delete_volume(vol1)
self._assert_vol_exists(vol1['name'], False)
def test_storwize_svc_volumes(self):
# Create a first volume
volume = self._generate_vol_info(None, None)
self.driver.create_volume(volume)
self.driver.ensure_export(None, volume)
# Do nothing
self.driver.create_export(None, volume)
self.driver.remove_export(None, volume)
# Make sure volume attributes are as they should be
attributes = self.driver._helpers.get_vdisk_attributes(volume['name'])
attr_size = float(attributes['capacity']) / units.Gi # bytes to GB
self.assertEqual(attr_size, float(volume['size']))
pool = self.driver.configuration.local_conf.storwize_svc_volpool_name
self.assertEqual(attributes['mdisk_grp_name'], pool)
# Try to create the volume again (should fail)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume,
volume)
# Try to delete a volume that doesn't exist (should not fail)
vol_no_exist = {'name': 'i_dont_exist',
'id': '111111'}
self.driver.delete_volume(vol_no_exist)
# Ensure export for volume that doesn't exist (should not fail)
self.driver.ensure_export(None, vol_no_exist)
# Delete the volume
self.driver.delete_volume(volume)
def test_storwize_svc_volume_params(self):
# Option test matrix
# Option Value Covered by test #
# rsize -1 1
# rsize 2 2,3
# warning 0 2
# warning 80 3
# autoexpand True 2
# autoexpand False 3
# grainsize 32 2
# grainsize 256 3
# compression True 4
# compression False 2,3
# easytier True 1,3
# easytier False 2
# iogrp 0 1
# iogrp 1 2
opts_list = []
chck_list = []
opts_list.append({'rsize': -1, 'easytier': True, 'iogrp': 0})
chck_list.append({'free_capacity': '0', 'easy_tier': 'on',
'IO_group_id': '0'})
test_iogrp = 1 if self.USESIM else 0
opts_list.append({'rsize': 2, 'compression': False, 'warning': 0,
'autoexpand': True, 'grainsize': 32,
'easytier': False, 'iogrp': test_iogrp})
chck_list.append({'-free_capacity': '0', 'compressed_copy': 'no',
'warning': '0', 'autoexpand': 'on',
'grainsize': '32', 'easy_tier': 'off',
'IO_group_id': str(test_iogrp)})
opts_list.append({'rsize': 2, 'compression': False, 'warning': 80,
'autoexpand': False, 'grainsize': 256,
'easytier': True})
chck_list.append({'-free_capacity': '0', 'compressed_copy': 'no',
'warning': '80', 'autoexpand': 'off',
'grainsize': '256', 'easy_tier': 'on'})
opts_list.append({'rsize': 2, 'compression': True})
chck_list.append({'-free_capacity': '0',
'compressed_copy': 'yes'})
for idx in range(len(opts_list)):
attrs = self._create_test_vol(opts_list[idx])
for k, v in chck_list[idx].iteritems():
try:
if k[0] == '-':
k = k[1:]
self.assertNotEqual(attrs[k], v)
else:
self.assertEqual(attrs[k], v)
except processutils.ProcessExecutionError as e:
if 'CMMVC7050E' not in e.stderr:
raise
def test_storwize_svc_unicode_host_and_volume_names(self):
# We'll check with iSCSI only - nothing protocol-dependednt here
self._set_flag('storwize_svc_connection_protocol', 'iSCSI')
self.driver.do_setup(None)
rand_id = random.randint(10000, 99999)
volume1 = {'name': u'unicode1_volume%s' % rand_id,
'size': 2,
'id': 1,
'volume_type_id': None}
self.driver.create_volume(volume1)
self._assert_vol_exists(volume1['name'], True)
self.assertRaises(exception.VolumeDriverException,
self.driver._helpers.create_host,
{'host': 12345})
# Add a host first to make life interesting (this host and
# conn['host'] should be translated to the same prefix, and the
# initiator should differentiate
tmpconn1 = {'initiator': u'unicode:initiator1.%s' % rand_id,
'ip': '10.10.10.10',
'host': u'unicode.foo}.bar{.baz-%s' % rand_id}
self.driver._helpers.create_host(tmpconn1)
# Add a host with a different prefix
tmpconn2 = {'initiator': u'unicode:initiator2.%s' % rand_id,
'ip': '10.10.10.11',
'host': u'unicode.hello.world-%s' % rand_id}
self.driver._helpers.create_host(tmpconn2)
conn = {'initiator': u'unicode:initiator3.%s' % rand_id,
'ip': '10.10.10.12',
'host': u'unicode.foo}.bar}.baz-%s' % rand_id}
self.driver.initialize_connection(volume1, conn)
host_name = self.driver._helpers.get_host_from_connector(conn)
self.assertIsNotNone(host_name)
self.driver.terminate_connection(volume1, conn)
host_name = self.driver._helpers.get_host_from_connector(conn)
self.assertIsNone(host_name)
self.driver.delete_volume(volume1)
# Clean up temporary hosts
for tmpconn in [tmpconn1, tmpconn2]:
host_name = self.driver._helpers.get_host_from_connector(tmpconn)
self.assertIsNotNone(host_name)
self.driver._helpers.delete_host(host_name)
def test_storwize_svc_validate_connector(self):
conn_neither = {'host': 'host'}
conn_iscsi = {'host': 'host', 'initiator': 'foo'}
conn_fc = {'host': 'host', 'wwpns': 'bar'}
conn_both = {'host': 'host', 'initiator': 'foo', 'wwpns': 'bar'}
self.driver._state['enabled_protocols'] = set(['iSCSI'])
self.driver.validate_connector(conn_iscsi)
self.driver.validate_connector(conn_both)
self.assertRaises(exception.InvalidConnectorException,
self.driver.validate_connector, conn_fc)
self.assertRaises(exception.InvalidConnectorException,
self.driver.validate_connector, conn_neither)
self.driver._state['enabled_protocols'] = set(['FC'])
self.driver.validate_connector(conn_fc)
self.driver.validate_connector(conn_both)
self.assertRaises(exception.InvalidConnectorException,
self.driver.validate_connector, conn_iscsi)
self.assertRaises(exception.InvalidConnectorException,
self.driver.validate_connector, conn_neither)
self.driver._state['enabled_protocols'] = set(['iSCSI', 'FC'])
self.driver.validate_connector(conn_iscsi)
self.driver.validate_connector(conn_fc)
self.driver.validate_connector(conn_both)
self.assertRaises(exception.InvalidConnectorException,
self.driver.validate_connector, conn_neither)
def test_storwize_svc_host_maps(self):
# Create two volumes to be used in mappings
ctxt = context.get_admin_context()
volume1 = self._generate_vol_info(None, None)
self.driver.create_volume(volume1)
volume2 = self._generate_vol_info(None, None)
self.driver.create_volume(volume2)
# Create volume types that we created
types = {}
for protocol in ['FC', 'iSCSI']:
opts = {'storage_protocol': '<in> ' + protocol}
types[protocol] = volume_types.create(ctxt, protocol, opts)
expected = {'FC': {'driver_volume_type': 'fibre_channel',
'data': {'target_lun': 0,
'target_wwn': 'AABBCCDDEEFF0011',
'target_discovered': False}},
'iSCSI': {'driver_volume_type': 'iscsi',
'data': {'target_discovered': False,
'target_iqn':
'iqn.1982-01.com.ibm:1234.sim.node1',
'target_portal': '1.234.56.78:3260',
'target_lun': 0,
'auth_method': 'CHAP',
'discovery_auth_method': 'CHAP'}}}
for protocol in ['FC', 'iSCSI']:
volume1['volume_type_id'] = types[protocol]['id']
volume2['volume_type_id'] = types[protocol]['id']
# Check case where no hosts exist
if self.USESIM:
ret = self.driver._helpers.get_host_from_connector(
self._connector)
self.assertIsNone(ret)
# Make sure that the volumes have been created
self._assert_vol_exists(volume1['name'], True)
self._assert_vol_exists(volume2['name'], True)
# Initialize connection from the first volume to a host
ret = self.driver.initialize_connection(volume1, self._connector)
self.assertEqual(ret['driver_volume_type'],
expected[protocol]['driver_volume_type'])
for k, v in expected[protocol]['data'].iteritems():
self.assertEqual(ret['data'][k], v)
# Initialize again, should notice it and do nothing
ret = self.driver.initialize_connection(volume1, self._connector)
self.assertEqual(ret['driver_volume_type'],
expected[protocol]['driver_volume_type'])
for k, v in expected[protocol]['data'].iteritems():
self.assertEqual(ret['data'][k], v)
# Try to delete the 1st volume (should fail because it is mapped)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.delete_volume,
volume1)
# Check bad output from lsfabric for the 2nd volume
if protocol == 'FC' and self.USESIM:
for error in ['remove_field', 'header_mismatch']:
self.sim.error_injection('lsfabric', error)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
volume2, self._connector)
# with storwize_svc_npiv_compatibility_mode set to True,
# lsfabric can return [] and initilize_connection will still
# complete successfully
with mock.patch.object(helpers.StorwizeHelpers,
'get_conn_fc_wwpns') as conn_fc_wwpns:
conn_fc_wwpns.return_value = []
self._set_flag('storwize_svc_npiv_compatibility_mode',
True)
expected_fc_npiv = {
'driver_volume_type': 'fibre_channel',
'data': {'target_lun': 1,
'target_wwn': '500507680220C744',
'target_discovered': False}}
ret = self.driver.initialize_connection(volume2,
self._connector)
self.assertEqual(
ret['driver_volume_type'],
expected_fc_npiv['driver_volume_type'])
for k, v in expected_fc_npiv['data'].iteritems():
self.assertEqual(ret['data'][k], v)
self._set_flag('storwize_svc_npiv_compatibility_mode',
False)
self.driver.terminate_connection(volume1, self._connector)
# for npiv compatibility test case, we need to terminate connection
# to the 2nd volume
if protocol == 'FC' and self.USESIM:
self.driver.terminate_connection(volume2, self._connector)
if self.USESIM:
ret = self.driver._helpers.get_host_from_connector(
self._connector)
self.assertIsNone(ret)
# Check cases with no auth set for host
if self.USESIM:
for auth_enabled in [True, False]:
for host_exists in ['yes-auth', 'yes-noauth', 'no']:
self._set_flag('storwize_svc_iscsi_chap_enabled',
auth_enabled)
case = 'en' + str(auth_enabled) + 'ex' + str(host_exists)
conn_na = {'initiator': 'test:init:%s' %
random.randint(10000, 99999),
'ip': '11.11.11.11',
'host': 'host-%s' % case}
if host_exists.startswith('yes'):
self.sim._add_host_to_list(conn_na)
if host_exists == 'yes-auth':
kwargs = {'chapsecret': 'foo',
'obj': conn_na['host']}
self.sim._cmd_chhost(**kwargs)
volume1['volume_type_id'] = types['iSCSI']['id']
init_ret = self.driver.initialize_connection(volume1,
conn_na)
host_name = self.sim._host_in_list(conn_na['host'])
chap_ret = self.driver._helpers.get_chap_secret_for_host(
host_name)
if auth_enabled or host_exists == 'yes-auth':
self.assertIn('auth_password', init_ret['data'])
self.assertIsNotNone(chap_ret)
else:
self.assertNotIn('auth_password', init_ret['data'])
self.assertIsNone(chap_ret)
self.driver.terminate_connection(volume1, conn_na)
self._set_flag('storwize_svc_iscsi_chap_enabled', True)
# Test no preferred node
if self.USESIM:
self.sim.error_injection('lsvdisk', 'no_pref_node')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
volume1, self._connector)
# Initialize connection from the second volume to the host with no
# preferred node set if in simulation mode, otherwise, just
# another initialize connection.
if self.USESIM:
self.sim.error_injection('lsvdisk', 'blank_pref_node')
self.driver.initialize_connection(volume2, self._connector)
# Try to remove connection from host that doesn't exist (should fail)
conn_no_exist = self._connector.copy()
conn_no_exist['initiator'] = 'i_dont_exist'
conn_no_exist['wwpns'] = ['0000000000000000']
self.assertRaises(exception.VolumeDriverException,
self.driver.terminate_connection,
volume1,
conn_no_exist)
# Try to remove connection from volume that isn't mapped (should print
# message but NOT fail)
unmapped_vol = self._generate_vol_info(None, None)
self.driver.create_volume(unmapped_vol)
self.driver.terminate_connection(unmapped_vol, self._connector)
self.driver.delete_volume(unmapped_vol)
# Remove the mapping from the 1st volume and delete it
self.driver.terminate_connection(volume1, self._connector)
self.driver.delete_volume(volume1)
self._assert_vol_exists(volume1['name'], False)
# Make sure our host still exists
host_name = self.driver._helpers.get_host_from_connector(
self._connector)
self.assertIsNotNone(host_name)
# Remove the mapping from the 2nd volume. The host should
# be automatically removed because there are no more mappings.
self.driver.terminate_connection(volume2, self._connector)
# Check if we successfully terminate connections when the host is not
# specified (see bug #1244257)
fake_conn = {'ip': '127.0.0.1', 'initiator': 'iqn.fake'}
self.driver.initialize_connection(volume2, self._connector)
host_name = self.driver._helpers.get_host_from_connector(
self._connector)
self.assertIsNotNone(host_name)
self.driver.terminate_connection(volume2, fake_conn)
host_name = self.driver._helpers.get_host_from_connector(
self._connector)
self.assertIsNone(host_name)
self.driver.delete_volume(volume2)
self._assert_vol_exists(volume2['name'], False)
# Delete volume types that we created
for protocol in ['FC', 'iSCSI']:
volume_types.destroy(ctxt, types[protocol]['id'])
# Check if our host still exists (it should not)
if self.USESIM:
ret = self.driver._helpers.get_host_from_connector(self._connector)
self.assertIsNone(ret)
def test_storwize_svc_multi_host_maps(self):
# We can't test connecting to multiple hosts from a single host when
# using real storage
if not self.USESIM:
return
# Create a volume to be used in mappings
ctxt = context.get_admin_context()
volume = self._generate_vol_info(None, None)
self.driver.create_volume(volume)
# Create volume types for protocols
types = {}
for protocol in ['FC', 'iSCSI']:
opts = {'storage_protocol': '<in> ' + protocol}
types[protocol] = volume_types.create(ctxt, protocol, opts)
# Create a connector for the second 'host'
wwpns = [str(random.randint(0, 9999999999999999)).zfill(16),
str(random.randint(0, 9999999999999999)).zfill(16)]
initiator = 'test.initiator.%s' % str(random.randint(10000, 99999))
conn2 = {'ip': '1.234.56.79',
'host': 'storwize-svc-test2',
'wwpns': wwpns,
'initiator': initiator}
for protocol in ['FC', 'iSCSI']:
volume['volume_type_id'] = types[protocol]['id']
# Make sure that the volume has been created
self._assert_vol_exists(volume['name'], True)
self.driver.initialize_connection(volume, self._connector)
self._set_flag('storwize_svc_multihostmap_enabled', False)
self.assertRaises(exception.CinderException,
self.driver.initialize_connection, volume, conn2)
self._set_flag('storwize_svc_multihostmap_enabled', True)
self.driver.initialize_connection(volume, conn2)
self.driver.terminate_connection(volume, conn2)
self.driver.terminate_connection(volume, self._connector)
def test_storwize_svc_delete_volume_snapshots(self):
# Create a volume with two snapshots
master = self._create_volume()
# Fail creating a snapshot - will force delete the snapshot
if self.USESIM and False:
snap = self._generate_vol_info(master['name'], master['id'])
self.sim.error_injection('startfcmap', 'bad_id')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_snapshot, snap)
self._assert_vol_exists(snap['name'], False)
# Delete a snapshot
snap = self._generate_vol_info(master['name'], master['id'])
self.driver.create_snapshot(snap)
self._assert_vol_exists(snap['name'], True)
self.driver.delete_snapshot(snap)
self._assert_vol_exists(snap['name'], False)
# Delete a volume with snapshots (regular)
snap = self._generate_vol_info(master['name'], master['id'])
self.driver.create_snapshot(snap)
self._assert_vol_exists(snap['name'], True)
self.driver.delete_volume(master)
self._assert_vol_exists(master['name'], False)
# Fail create volume from snapshot - will force delete the volume
if self.USESIM:
volfs = self._generate_vol_info(None, None)
self.sim.error_injection('startfcmap', 'bad_id')
self.sim.error_injection('lsfcmap', 'speed_up')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume_from_snapshot,
volfs, snap)
self._assert_vol_exists(volfs['name'], False)
# Create volume from snapshot and delete it
volfs = self._generate_vol_info(None, None)
if self.USESIM:
self.sim.error_injection('lsfcmap', 'speed_up')
self.driver.create_volume_from_snapshot(volfs, snap)
self._assert_vol_exists(volfs['name'], True)
self.driver.delete_volume(volfs)
self._assert_vol_exists(volfs['name'], False)
# Create volume from snapshot and delete the snapshot
volfs = self._generate_vol_info(None, None)
if self.USESIM:
self.sim.error_injection('lsfcmap', 'speed_up')
self.driver.create_volume_from_snapshot(volfs, snap)
self.driver.delete_snapshot(snap)
self._assert_vol_exists(snap['name'], False)
# Fail create clone - will force delete the target volume
if self.USESIM:
clone = self._generate_vol_info(None, None)
self.sim.error_injection('startfcmap', 'bad_id')
self.sim.error_injection('lsfcmap', 'speed_up')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_cloned_volume,
clone, volfs)
self._assert_vol_exists(clone['name'], False)
# Create the clone, delete the source and target
clone = self._generate_vol_info(None, None)
if self.USESIM:
self.sim.error_injection('lsfcmap', 'speed_up')
self.driver.create_cloned_volume(clone, volfs)
self._assert_vol_exists(clone['name'], True)
self.driver.delete_volume(volfs)
self._assert_vol_exists(volfs['name'], False)
self.driver.delete_volume(clone)
self._assert_vol_exists(clone['name'], False)
def test_storwize_svc_get_volume_stats(self):
self._set_flag('reserved_percentage', 25)
stats = self.driver.get_volume_stats()
self.assertLessEqual(stats['free_capacity_gb'],
stats['total_capacity_gb'])
self.assertEqual(stats['reserved_percentage'], 25)
pool = self.driver.configuration.local_conf.storwize_svc_volpool_name
if self.USESIM:
expected = 'storwize-svc-sim_' + pool
self.assertEqual(stats['volume_backend_name'], expected)
self.assertAlmostEqual(stats['total_capacity_gb'], 3328.0)
self.assertAlmostEqual(stats['free_capacity_gb'], 3287.5)
def test_storwize_svc_extend_volume(self):
volume = self._create_volume()
self.driver.extend_volume(volume, '13')
attrs = self.driver._helpers.get_vdisk_attributes(volume['name'])
vol_size = int(attrs['capacity']) / units.Gi
self.assertAlmostEqual(vol_size, 13)
snap = self._generate_vol_info(volume['name'], volume['id'])
self.driver.create_snapshot(snap)
self._assert_vol_exists(snap['name'], True)
self.assertRaises(exception.VolumeDriverException,
self.driver.extend_volume, volume, '16')
self.driver.delete_snapshot(snap)
self.driver.delete_volume(volume)
def _check_loc_info(self, capabilities, expected):
host = {'host': 'foo', 'capabilities': capabilities}
vol = {'name': 'test', 'id': 1, 'size': 1}
ctxt = context.get_admin_context()
moved, model_update = self.driver.migrate_volume(ctxt, vol, host)
self.assertEqual(moved, expected['moved'])
self.assertEqual(model_update, expected['model_update'])
def test_storwize_svc_migrate_bad_loc_info(self):
self._check_loc_info({}, {'moved': False, 'model_update': None})
cap = {'location_info': 'foo'}
self._check_loc_info(cap, {'moved': False, 'model_update': None})
cap = {'location_info': 'FooDriver:foo:bar'}
self._check_loc_info(cap, {'moved': False, 'model_update': None})
cap = {'location_info': 'StorwizeSVCDriver:foo:bar'}
self._check_loc_info(cap, {'moved': False, 'model_update': None})
def test_storwize_svc_volume_migrate(self):
# Make sure we don't call migrate_volume_vdiskcopy
self.driver.do_setup(None)
loc = ('StorwizeSVCDriver:' + self.driver._state['system_id'] +
':openstack2')
cap = {'location_info': loc, 'extent_size': '256'}
host = {'host': 'foo', 'capabilities': cap}
ctxt = context.get_admin_context()
volume = self._create_volume()
volume['volume_type_id'] = None
self.driver.migrate_volume(ctxt, volume, host)
self._delete_volume(volume)
def test_storwize_svc_get_vdisk_params(self):
self.driver.do_setup(None)
fake_qos = {'qos:IOThrottling': 5000}
expected_qos = {'IOThrottling': 5000}
fake_opts = self._get_default_opts()
# The parameters retured should be the same to the default options,
# if the QoS is empty.
vol_type_empty_qos = self._create_volume_type_qos(True, None)
type_id = vol_type_empty_qos['id']
params = self.driver._get_vdisk_params(type_id,
volume_type=vol_type_empty_qos,
volume_metadata=None)
self.assertEqual(fake_opts, params)
volume_types.destroy(self.ctxt, type_id)
# If the QoS is set via the qos association with the volume type,
# qos value should be set in the retured parameters.
vol_type_qos = self._create_volume_type_qos(False, fake_qos)
type_id = vol_type_qos['id']
# If type_id is not none and volume_type is none, it should work fine.
params = self.driver._get_vdisk_params(type_id, volume_type=None,
volume_metadata=None)
self.assertEqual(expected_qos, params['qos'])
# If type_id is not none and volume_type is not none, it should
# work fine.
params = self.driver._get_vdisk_params(type_id,
volume_type=vol_type_qos,
volume_metadata=None)
self.assertEqual(expected_qos, params['qos'])
# If type_id is none and volume_type is not none, it should work fine.
params = self.driver._get_vdisk_params(None,
volume_type=vol_type_qos,
volume_metadata=None)
self.assertEqual(expected_qos, params['qos'])
# If both type_id and volume_type are none, no qos will be returned
# in the parameter.
params = self.driver._get_vdisk_params(None, volume_type=None,
volume_metadata=None)
self.assertEqual(None, params['qos'])
qos_spec = volume_types.get_volume_type_qos_specs(type_id)
volume_types.destroy(self.ctxt, type_id)
qos_specs.delete(self.ctxt, qos_spec['qos_specs']['id'])
# If the QoS is set via the extra specs in the volume type,
# qos value should be set in the retured parameters.
vol_type_qos = self._create_volume_type_qos(True, fake_qos)
type_id = vol_type_qos['id']
# If type_id is not none and volume_type is none, it should work fine.
params = self.driver._get_vdisk_params(type_id, volume_type=None,
volume_metadata=None)
self.assertEqual(expected_qos, params['qos'])
# If type_id is not none and volume_type is not none,
# it should work fine.
params = self.driver._get_vdisk_params(type_id,
volume_type=vol_type_qos,
volume_metadata=None)
self.assertEqual(expected_qos, params['qos'])
# If type_id is none and volume_type is not none,
# it should work fine.
params = self.driver._get_vdisk_params(None,
volume_type=vol_type_qos,
volume_metadata=None)
self.assertEqual(expected_qos, params['qos'])
# If both type_id and volume_type are none, no qos will be returned
# in the parameter.
params = self.driver._get_vdisk_params(None, volume_type=None,
volume_metadata=None)
self.assertEqual(None, params['qos'])
volume_types.destroy(self.ctxt, type_id)
# If the QoS is set in the volume metadata,
# qos value should be set in the retured parameters.
metadata = [{'key': 'qos:IOThrottling', 'value': 4000}]
expected_qos_metadata = {'IOThrottling': 4000}
params = self.driver._get_vdisk_params(None, volume_type=None,
volume_metadata=metadata)
self.assertEqual(expected_qos_metadata, params['qos'])
# If the QoS is set both in the metadata and the volume type, the one
# in the volume type will take effect.
vol_type_qos = self._create_volume_type_qos(True, fake_qos)
type_id = vol_type_qos['id']
params = self.driver._get_vdisk_params(type_id, volume_type=None,
volume_metadata=metadata)
self.assertEqual(expected_qos, params['qos'])
volume_types.destroy(self.ctxt, type_id)
# If the QoS is set both via the qos association and the
# extra specs, the one from the qos association will take effect.
fake_qos_associate = {'qos:IOThrottling': 6000}
expected_qos_associate = {'IOThrottling': 6000}
vol_type_qos = self._create_volume_type_qos_both(fake_qos,
fake_qos_associate)
type_id = vol_type_qos['id']
params = self.driver._get_vdisk_params(type_id, volume_type=None,
volume_metadata=None)
self.assertEqual(expected_qos_associate, params['qos'])
qos_spec = volume_types.get_volume_type_qos_specs(type_id)
volume_types.destroy(self.ctxt, type_id)
qos_specs.delete(self.ctxt, qos_spec['qos_specs']['id'])
@mock.patch.object(helpers.StorwizeHelpers, 'disable_vdisk_qos')
@mock.patch.object(helpers.StorwizeHelpers, 'update_vdisk_qos')
def test_storwize_svc_retype_no_copy(self, update_vdisk_qos,
disable_vdisk_qos):
self.driver.do_setup(None)
loc = ('StorwizeSVCDriver:' + self.driver._state['system_id'] +
':openstack')
cap = {'location_info': loc, 'extent_size': '128'}
self.driver._stats = {'location_info': loc}
host = {'host': 'foo', 'capabilities': cap}
ctxt = context.get_admin_context()
key_specs_old = {'easytier': False, 'warning': 2, 'autoexpand': True}
key_specs_new = {'easytier': True, 'warning': 5, 'autoexpand': False}
old_type_ref = volume_types.create(ctxt, 'old', key_specs_old)
new_type_ref = volume_types.create(ctxt, 'new', key_specs_new)
diff, _equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'],
new_type_ref['id'])
volume = self._generate_vol_info(None, None)
old_type = volume_types.get_volume_type(ctxt, old_type_ref['id'])
volume['volume_type'] = old_type
volume['host'] = host
new_type = volume_types.get_volume_type(ctxt, new_type_ref['id'])
self.driver.create_volume(volume)
self.driver.retype(ctxt, volume, new_type, diff, host)
attrs = self.driver._helpers.get_vdisk_attributes(volume['name'])
self.assertEqual('on', attrs['easy_tier'], 'Volume retype failed')
self.assertEqual('5', attrs['warning'], 'Volume retype failed')
self.assertEqual('off', attrs['autoexpand'], 'Volume retype failed')
self.driver.delete_volume(volume)
fake_opts = self._get_default_opts()
fake_opts_old = self._get_default_opts()
fake_opts_old['qos'] = {'IOThrottling': 4000}
fake_opts_qos = self._get_default_opts()
fake_opts_qos['qos'] = {'IOThrottling': 5000}
self.driver.create_volume(volume)
with mock.patch.object(storwize_svc.StorwizeSVCDriver,
'_get_vdisk_params') as get_vdisk_params:
# If qos is empty for both the source and target volumes,
# add_vdisk_qos and disable_vdisk_qos will not be called for
# retype.
get_vdisk_params.side_effect = [fake_opts, fake_opts]
self.driver.retype(ctxt, volume, new_type, diff, host)
self.assertFalse(update_vdisk_qos.called)
self.assertFalse(disable_vdisk_qos.called)
self.driver.delete_volume(volume)
self.driver.create_volume(volume)
update_vdisk_qos.reset_mock()
with mock.patch.object(storwize_svc.StorwizeSVCDriver,
'_get_vdisk_params') as get_vdisk_params:
# If qos is specified for both source and target volumes,
# add_vdisk_qos will be called for retype, and disable_vdisk_qos
# will not be called.
get_vdisk_params.side_effect = [fake_opts_old, fake_opts_qos]
self.driver.retype(ctxt, volume, new_type, diff, host)
update_vdisk_qos.assert_called_with(volume['name'],
fake_opts_qos['qos'])
self.assertFalse(disable_vdisk_qos.called)
self.driver.delete_volume(volume)
self.driver.create_volume(volume)
update_vdisk_qos.reset_mock()
with mock.patch.object(storwize_svc.StorwizeSVCDriver,
'_get_vdisk_params') as get_vdisk_params:
# If qos is empty for source and speficied for target volume,
# add_vdisk_qos will be called for retype, and disable_vdisk_qos
# will not be called.
get_vdisk_params.side_effect = [fake_opts, fake_opts_qos]
self.driver.retype(ctxt, volume, new_type, diff, host)
update_vdisk_qos.assert_called_with(volume['name'],
fake_opts_qos['qos'])
self.assertFalse(disable_vdisk_qos.called)
self.driver.delete_volume(volume)
self.driver.create_volume(volume)
update_vdisk_qos.reset_mock()
with mock.patch.object(storwize_svc.StorwizeSVCDriver,
'_get_vdisk_params') as get_vdisk_params:
# If qos is empty for target volume and specified for source
# volume, add_vdisk_qos will not be called for retype, and
# disable_vdisk_qos will be called.
get_vdisk_params.side_effect = [fake_opts_qos, fake_opts]
self.driver.retype(ctxt, volume, new_type, diff, host)
self.assertFalse(update_vdisk_qos.called)
disable_vdisk_qos.assert_called_with(volume['name'],
fake_opts_qos['qos'])
self.driver.delete_volume(volume)
def test_storwize_svc_retype_only_change_iogrp(self):
self.driver.do_setup(None)
loc = ('StorwizeSVCDriver:' + self.driver._state['system_id'] +
':openstack')
cap = {'location_info': loc, 'extent_size': '128'}
self.driver._stats = {'location_info': loc}
host = {'host': 'foo', 'capabilities': cap}
ctxt = context.get_admin_context()
key_specs_old = {'iogrp': 0}
key_specs_new = {'iogrp': 1}
old_type_ref = volume_types.create(ctxt, 'old', key_specs_old)
new_type_ref = volume_types.create(ctxt, 'new', key_specs_new)
diff, _equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'],
new_type_ref['id'])
volume = self._generate_vol_info(None, None)
old_type = volume_types.get_volume_type(ctxt, old_type_ref['id'])
volume['volume_type'] = old_type
volume['host'] = host
new_type = volume_types.get_volume_type(ctxt, new_type_ref['id'])
self.driver.create_volume(volume)
self.driver.retype(ctxt, volume, new_type, diff, host)
attrs = self.driver._helpers.get_vdisk_attributes(volume['name'])
self.assertEqual('1', attrs['IO_group_id'], 'Volume retype '
'failed')
self.driver.delete_volume(volume)
@mock.patch.object(helpers.StorwizeHelpers, 'disable_vdisk_qos')
@mock.patch.object(helpers.StorwizeHelpers, 'update_vdisk_qos')
def test_storwize_svc_retype_need_copy(self, update_vdisk_qos,
disable_vdisk_qos):
self.driver.do_setup(None)
loc = ('StorwizeSVCDriver:' + self.driver._state['system_id'] +
':openstack')
cap = {'location_info': loc, 'extent_size': '128'}
self.driver._stats = {'location_info': loc}
host = {'host': 'foo', 'capabilities': cap}
ctxt = context.get_admin_context()
key_specs_old = {'compression': True, 'iogrp': 0}
key_specs_new = {'compression': False, 'iogrp': 1}
old_type_ref = volume_types.create(ctxt, 'old', key_specs_old)
new_type_ref = volume_types.create(ctxt, 'new', key_specs_new)
diff, _equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'],
new_type_ref['id'])
volume = self._generate_vol_info(None, None)
old_type = volume_types.get_volume_type(ctxt, old_type_ref['id'])
volume['volume_type'] = old_type
volume['host'] = host
new_type = volume_types.get_volume_type(ctxt, new_type_ref['id'])
self.driver.create_volume(volume)
self.driver.retype(ctxt, volume, new_type, diff, host)
attrs = self.driver._helpers.get_vdisk_attributes(volume['name'])
self.assertEqual('no', attrs['compressed_copy'])
self.assertEqual('1', attrs['IO_group_id'], 'Volume retype '
'failed')
self.driver.delete_volume(volume)
fake_opts = self._get_default_opts()
fake_opts_old = self._get_default_opts()
fake_opts_old['qos'] = {'IOThrottling': 4000}
fake_opts_qos = self._get_default_opts()
fake_opts_qos['qos'] = {'IOThrottling': 5000}
self.driver.create_volume(volume)
with mock.patch.object(storwize_svc.StorwizeSVCDriver,
'_get_vdisk_params') as get_vdisk_params:
# If qos is empty for both the source and target volumes,
# add_vdisk_qos and disable_vdisk_qos will not be called for
# retype.
get_vdisk_params.side_effect = [fake_opts, fake_opts]
self.driver.retype(ctxt, volume, new_type, diff, host)
self.assertFalse(update_vdisk_qos.called)
self.assertFalse(disable_vdisk_qos.called)
self.driver.delete_volume(volume)
self.driver.create_volume(volume)
update_vdisk_qos.reset_mock()
with mock.patch.object(storwize_svc.StorwizeSVCDriver,
'_get_vdisk_params') as get_vdisk_params:
# If qos is specified for both source and target volumes,
# add_vdisk_qos will be called for retype, and disable_vdisk_qos
# will not be called.
get_vdisk_params.side_effect = [fake_opts_old, fake_opts_qos]
self.driver.retype(ctxt, volume, new_type, diff, host)
update_vdisk_qos.assert_called_with(volume['name'],
fake_opts_qos['qos'])
self.assertFalse(disable_vdisk_qos.called)
self.driver.delete_volume(volume)
self.driver.create_volume(volume)
update_vdisk_qos.reset_mock()
with mock.patch.object(storwize_svc.StorwizeSVCDriver,
'_get_vdisk_params') as get_vdisk_params:
# If qos is empty for source and speficied for target volume,
# add_vdisk_qos will be called for retype, and disable_vdisk_qos
# will not be called.
get_vdisk_params.side_effect = [fake_opts, fake_opts_qos]
self.driver.retype(ctxt, volume, new_type, diff, host)
update_vdisk_qos.assert_called_with(volume['name'],
fake_opts_qos['qos'])
self.assertFalse(disable_vdisk_qos.called)
self.driver.delete_volume(volume)
self.driver.create_volume(volume)
update_vdisk_qos.reset_mock()
with mock.patch.object(storwize_svc.StorwizeSVCDriver,
'_get_vdisk_params') as get_vdisk_params:
# If qos is empty for target volume and specified for source
# volume, add_vdisk_qos will not be called for retype, and
# disable_vdisk_qos will be called.
get_vdisk_params.side_effect = [fake_opts_qos, fake_opts]
self.driver.retype(ctxt, volume, new_type, diff, host)
self.assertFalse(update_vdisk_qos.called)
disable_vdisk_qos.assert_called_with(volume['name'],
fake_opts_qos['qos'])
self.driver.delete_volume(volume)
def test_set_storage_code_level_success(self):
res = self.driver._helpers.get_system_info()
if self.USESIM:
self.assertEqual((7, 2, 0, 0), res['code_level'],
'Get code level error')
def test_storwize_vdisk_copy_ops(self):
ctxt = testutils.get_test_admin_context()
volume = self._create_volume()
driver = self.driver
dest_pool = self.driver.configuration.storwize_svc_volpool_name
new_ops = driver._helpers.add_vdisk_copy(volume['name'], dest_pool,
None, self.driver._state,
self.driver.configuration)
self.driver._add_vdisk_copy_op(ctxt, volume, new_ops)
admin_metadata = self.db.volume_admin_metadata_get(ctxt, volume['id'])
self.assertEqual(":".join(x for x in new_ops),
admin_metadata['vdiskcopyops'],
'Storwize driver add vdisk copy error.')
self.driver._check_volume_copy_ops()
self.driver._rm_vdisk_copy_op(ctxt, volume, new_ops[0], new_ops[1])
admin_metadata = self.db.volume_admin_metadata_get(ctxt, volume['id'])
self.assertEqual(None, admin_metadata.get('vdiskcopyops', None),
'Storwize driver delete vdisk copy error')
self._delete_volume(volume)
def test_storwize_delete_with_vdisk_copy_ops(self):
volume = self._create_volume()
self.driver._vdiskcopyops = {volume['id']: [('0', '1')]}
with mock.patch.object(self.driver, '_vdiskcopyops_loop'):
self.assertIn(volume['id'], self.driver._vdiskcopyops)
self.driver.delete_volume(volume)
self.assertNotIn(volume['id'], self.driver._vdiskcopyops)
def test_storwize_get_host_with_fc_connection(self):
# Create a FC host
del self._connector['initiator']
helper = self.driver._helpers
host_name = helper.create_host(self._connector)
# Remove the first wwpn from connector, and then try get host
wwpns = self._connector['wwpns']
wwpns.remove(wwpns[0])
host_name = helper.get_host_from_connector(self._connector)
self.assertIsNotNone(host_name)
def test_storwize_initiator_multiple_preferred_nodes_matching(self):
# Generate us a test volume
volume = self._create_volume()
# Fibre Channel volume type
extra_spec = {'capabilities:storage_protocol': '<in> FC'}
vol_type = volume_types.create(self.ctxt, 'FC', extra_spec)
volume['volume_type_id'] = vol_type['id']
# Make sure that the volumes have been created
self._assert_vol_exists(volume['name'], True)
#Set up one WWPN that won't match and one that will.
self.driver._state['storage_nodes']['1']['WWPN'] = ['123456789ABCDEF0',
'AABBCCDDEEFF0010']
wwpns = ['ff00000000000000', 'ff00000000000001']
connector = {'host': 'storwize-svc-test', 'wwpns': wwpns}
with mock.patch.object(helpers.StorwizeHelpers,
'get_conn_fc_wwpns') as get_mappings:
get_mappings.return_value = ['AABBCCDDEEFF0001',
'AABBCCDDEEFF0002',
'AABBCCDDEEFF0010',
'AABBCCDDEEFF0012']
# Initialize the connection
init_ret = self.driver.initialize_connection(volume, connector)
# Make sure we use the preferred WWPN.
self.assertEqual(init_ret['data']['target_wwn'],
'AABBCCDDEEFF0010')
def test_storwize_initiator_multiple_preferred_nodes_no_matching(self):
# Generate us a test volume
volume = self._create_volume()
# Fibre Channel volume type
extra_spec = {'capabilities:storage_protocol': '<in> FC'}
vol_type = volume_types.create(self.ctxt, 'FC', extra_spec)
volume['volume_type_id'] = vol_type['id']
# Make sure that the volumes have been created
self._assert_vol_exists(volume['name'], True)
#Set up WWPNs that will not match what is available.
self.driver._state['storage_nodes']['1']['WWPN'] = ['123456789ABCDEF0',
'123456789ABCDEF1']
wwpns = ['ff00000000000000', 'ff00000000000001']
connector = {'host': 'storwize-svc-test', 'wwpns': wwpns}
with mock.patch.object(helpers.StorwizeHelpers,
'get_conn_fc_wwpns') as get_mappings:
get_mappings.return_value = ['AABBCCDDEEFF0001',
'AABBCCDDEEFF0002',
'AABBCCDDEEFF0010',
'AABBCCDDEEFF0012']
# Initialize the connection
init_ret = self.driver.initialize_connection(volume, connector)
# Make sure we use the first available WWPN.
self.assertEqual(init_ret['data']['target_wwn'],
'AABBCCDDEEFF0001')
def test_storwize_initiator_single_preferred_node_matching(self):
# Generate us a test volume
volume = self._create_volume()
# Fibre Channel volume type
extra_spec = {'capabilities:storage_protocol': '<in> FC'}
vol_type = volume_types.create(self.ctxt, 'FC', extra_spec)
volume['volume_type_id'] = vol_type['id']
# Make sure that the volumes have been created
self._assert_vol_exists(volume['name'], True)
#Set up one WWPN.
self.driver._state['storage_nodes']['1']['WWPN'] = ['AABBCCDDEEFF0012']
wwpns = ['ff00000000000000', 'ff00000000000001']
connector = {'host': 'storwize-svc-test', 'wwpns': wwpns}
with mock.patch.object(helpers.StorwizeHelpers,
'get_conn_fc_wwpns') as get_mappings:
get_mappings.return_value = ['AABBCCDDEEFF0001',
'AABBCCDDEEFF0002',
'AABBCCDDEEFF0010',
'AABBCCDDEEFF0012']
# Initialize the connection
init_ret = self.driver.initialize_connection(volume, connector)
# Make sure we use the preferred WWPN.
self.assertEqual(init_ret['data']['target_wwn'],
'AABBCCDDEEFF0012')
def test_storwize_terminate_connection(self):
# create a FC volume
volume_fc = self._create_volume()
extra_spec = {'capabilities:storage_protocol': '<in> FC'}
vol_type_fc = volume_types.create(self.ctxt, 'FC', extra_spec)
volume_fc['volume_type_id'] = vol_type_fc['id']
# create a iSCSI volume
volume_iSCSI = self._create_volume()
extra_spec = {'capabilities:storage_protocol': '<in> iSCSI'}
vol_type_iSCSI = volume_types.create(self.ctxt, 'iSCSI', extra_spec)
volume_iSCSI['volume_type_id'] = vol_type_iSCSI['id']
connector = {'host': 'storwize-svc-host',
'wwnns': ['20000090fa17311e', '20000090fa17311f'],
'wwpns': ['ff00000000000000', 'ff00000000000001'],
'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1aaa'}
self.driver.initialize_connection(volume_fc, connector)
self.driver.initialize_connection(volume_iSCSI, connector)
self.driver.terminate_connection(volume_iSCSI, connector)
self.driver.terminate_connection(volume_fc, connector)
def test_storwize_initiator_target_map(self):
# Generate us a test volume
volume = self._create_volume()
# FIbre Channel volume type
extra_spec = {'capabilities:storage_protocol': '<in> FC'}
vol_type = volume_types.create(self.ctxt, 'FC', extra_spec)
volume['volume_type_id'] = vol_type['id']
# Make sure that the volumes have been created
self._assert_vol_exists(volume['name'], True)
wwpns = ['ff00000000000000', 'ff00000000000001']
connector = {'host': 'storwize-svc-test', 'wwpns': wwpns}
# Initialise the connection
init_ret = self.driver.initialize_connection(volume, connector)
# Check that the initiator_target_map is as expected
init_data = {'driver_volume_type': 'fibre_channel',
'data': {'initiator_target_map':
{'ff00000000000000': ['AABBCCDDEEFF0011'],
'ff00000000000001': ['AABBCCDDEEFF0011']},
'target_discovered': False,
'target_lun': 0,
'target_wwn': 'AABBCCDDEEFF0011',
'volume_id': volume['id']
}
}
self.assertEqual(init_data, init_ret)
# Terminate connection
term_ret = self.driver.terminate_connection(volume, connector)
# Check that the initiator_target_map is as expected
term_data = {'driver_volume_type': 'fibre_channel',
'data': {'initiator_target_map':
{'ff00000000000000': ['AABBCCDDEEFF0011'],
'ff00000000000001': ['AABBCCDDEEFF0011']}
}
}
self.assertEqual(term_data, term_ret)
def test_storwize_create_volume_with_replication_disable(self):
volume = self._generate_vol_info(None, None)
model_update = self.driver.create_volume(volume)
self.assertIsNone(model_update)
model_update = self.driver.get_replication_status(self.ctxt, volume)
self.assertIsNone(model_update)
def test_storwize_create_volume_with_strech_cluster_replication(self):
# Set replication flag, set pool openstack2 for secondary volume.
self._set_flag('storwize_svc_stretched_cluster_partner', 'openstack2')
# Create a type for repliation.
volume = self._generate_vol_info(None, None)
volume_type = self._create_replication_volume_type(True)
volume['volume_type_id'] = volume_type['id']
self.driver.do_setup(self.ctxt)
model_update = self.driver.create_volume(volume)
self.assertEqual('copying', model_update['replication_status'])
volume['replication_status'] = 'copying'
volume['replication_extended_status'] = None
model_update = self.driver.get_replication_status(self.ctxt, volume)
self.assertEqual('copying', model_update['replication_status'])
# Check the volume copy created on pool opentack2.
attrs = self.driver._helpers.get_vdisk_attributes(volume['name'])
self.assertIn('openstack2', attrs['mdisk_grp_name'])
primary_status = attrs['primary']
self.driver.promote_replica(self.ctxt, volume)
# After promote_replica, primary copy should be swiched.
attrs = self.driver._helpers.get_vdisk_attributes(volume['name'])
self.assertEqual(primary_status[0], attrs['primary'][1])
self.assertEqual(primary_status[1], attrs['primary'][0])
self.driver.delete_volume(volume)
attrs = self.driver._helpers.get_vdisk_attributes(volume['name'])
self.assertIsNone(attrs)
def test_storwize_create_cloned_volume_with_strech_cluster_replica(self):
# Set replication flag, set pool openstack2 for secondary volume.
self._set_flag('storwize_svc_stretched_cluster_partner', 'openstack2')
self.driver.do_setup(self.ctxt)
# Create a source volume.
src_volume = self._generate_vol_info(None, None)
self.driver.create_volume(src_volume)
# Create a type for repliation.
volume = self._generate_vol_info(None, None)
volume_type = self._create_replication_volume_type(True)
volume['volume_type_id'] = volume_type['id']
# Create a cloned volume from source volume.
model_update = self.driver.create_cloned_volume(volume, src_volume)
self.assertEqual('copying', model_update['replication_status'])
# Check the replication volume created on pool openstack2.
attrs = self.driver._helpers.get_vdisk_attributes(volume['name'])
self.assertIn('openstack2', attrs['mdisk_grp_name'])
def test_storwize_create_snapshot_volume_with_strech_cluster_replica(self):
# Set replication flag, set pool openstack2 for secondary volume.
self._set_flag('storwize_svc_stretched_cluster_partner', 'openstack2')
self.driver.do_setup(self.ctxt)
vol1 = self._create_volume()
snap = self._generate_vol_info(vol1['name'], vol1['id'])
self.driver.create_snapshot(snap)
vol2 = self._generate_vol_info(None, None)
# Create a type for repliation.
vol2 = self._generate_vol_info(None, None)
volume_type = self._create_replication_volume_type(True)
vol2['volume_type_id'] = volume_type['id']
model_update = self.driver.create_volume_from_snapshot(vol2, snap)
self._assert_vol_exists(vol2['name'], True)
self.assertEqual('copying', model_update['replication_status'])
# Check the replication volume created on pool openstack2.
attrs = self.driver._helpers.get_vdisk_attributes(vol2['name'])
self.assertIn('openstack2', attrs['mdisk_grp_name'])
def test_storwize_retype_with_strech_cluster_replication(self):
self._set_flag('storwize_svc_stretched_cluster_partner', 'openstack2')
self.driver.do_setup(self.ctxt)
loc = ('StorwizeSVCDriver:' + self.driver._state['system_id'] +
':openstack')
cap = {'location_info': loc, 'extent_size': '128'}
self.driver._stats = {'location_info': loc}
host = {'host': 'foo', 'capabilities': cap}
ctxt = context.get_admin_context()
disable_type = self._create_replication_volume_type(False)
enable_type = self._create_replication_volume_type(True)
diff, _equal = volume_types.volume_types_diff(ctxt,
disable_type['id'],
enable_type['id'])
volume = self._generate_vol_info(None, None)
volume['host'] = host
volume['volume_type_id'] = disable_type['id']
volume['volume_type'] = disable_type
volume['replication_status'] = None
volume['replication_extended_status'] = None
# Create volume which is not volume replication
self.driver.create_volume(volume)
# volume should be DB object in this parameter
model_update = self.driver.get_replication_status(self.ctxt, volume)
self.assertIs('error', model_update['replication_status'])
# Enable replica
self.driver.retype(ctxt, volume, enable_type, diff, host)
model_update = self.driver.get_replication_status(self.ctxt, volume)
self.assertIs('copying', model_update['replication_status'])
self.driver.delete_volume(volume)
def test_storwize_retype_from_none_to_strech_cluster_replication(self):
self._set_flag('storwize_svc_stretched_cluster_partner', 'openstack2')
self.driver.do_setup(self.ctxt)
loc = ('StorwizeSVCDriver:' + self.driver._state['system_id'] +
':openstack')
cap = {'location_info': loc, 'extent_size': '128'}
self.driver._stats = {'location_info': loc}
host = {'host': 'foo', 'capabilities': cap}
ctxt = context.get_admin_context()
volume = self._generate_vol_info(None, None)
volume['volume_type_id'] = None
volume['volume_type'] = None
volume['replication_status'] = "disabled"
volume['replication_extended_status'] = None
# Create volume which is not volume replication
model_update = self.driver.create_volume(volume)
self.assertIsNone(model_update)
# volume should be DB object in this parameter
model_update = self.driver.get_replication_status(self.ctxt, volume)
self.assertIsNone(model_update)
enable_type = self._create_replication_volume_type(True)
diff, _equal = volume_types.volume_types_diff(ctxt,
None,
enable_type['id'])
# Enable replica
self.driver.retype(ctxt, volume, enable_type, diff, host)
# In DB replication_status will be updated
volume['replication_status'] = None
model_update = self.driver.get_replication_status(self.ctxt, volume)
self.assertIs('copying', model_update['replication_status'])
self.driver.delete_volume(volume)
def test_storwize_initiator_target_map_npiv(self):
# Create two volumes to be used in mappings
ctxt = context.get_admin_context()
self._set_flag('storwize_svc_npiv_compatibility_mode', True)
# Generate us a test volume
volume = self._generate_vol_info(None, None)
self.driver.create_volume(volume)
# FIbre Channel volume type
vol_type = volume_types.create(ctxt, 'FC', {'protocol': 'FC'})
volume['volume_type_id'] = vol_type['id']
# Make sure that the volumes have been created
self._assert_vol_exists(volume['name'], True)
wwpns = ['ff00000000000000', 'ff00000000000001']
connector = {'host': 'storwize-svc-test', 'wwpns': wwpns}
# Initialise the connection
with mock.patch.object(helpers.StorwizeHelpers,
'get_conn_fc_wwpns') as conn_fc_wwpns:
conn_fc_wwpns.return_value = []
init_ret = self.driver.initialize_connection(volume, connector)
# Check that the initiator_target_map is as expected
init_data = {'driver_volume_type': 'fibre_channel',
'data': {'initiator_target_map':
{'ff00000000000000': ['500507680220C744',
'500507680210C744',
'500507680220C745',
'500507680230C745'],
'ff00000000000001': ['500507680220C744',
'500507680210C744',
'500507680220C745',
'500507680230C745']},
'target_discovered': False,
'target_lun': 0,
'target_wwn': '500507680220C744',
'volume_id': volume['id']
}
}
self.assertEqual(init_data, init_ret)
# Terminate connection
term_ret = self.driver.terminate_connection(volume, connector)
# Check that the initiator_target_map is as expected
term_data = {'driver_volume_type': 'fibre_channel',
'data': {'initiator_target_map':
{'ff00000000000000': ['AABBCCDDEEFF0011'],
'ff00000000000001': ['AABBCCDDEEFF0011']}
}
}
self.assertEqual(term_data, term_ret)
def test_storwize_consistency_group_snapshot(self):
cg_type = self._create_consistency_group_volume_type()
cg = self._create_consistencygroup_in_db(volume_type_id=cg_type['id'])
model_update = self.driver.create_consistencygroup(self.ctxt, cg)
self.assertEqual(model_update['status'],
'available',
"CG created failed")
# Add volumes to CG
self._create_volume(volume_type_id=cg_type['id'],
consistencygroup_id=cg['id'])
self._create_volume(volume_type_id=cg_type['id'],
consistencygroup_id=cg['id'])
self._create_volume(volume_type_id=cg_type['id'],
consistencygroup_id=cg['id'])
cg_snapshot = self._create_cgsnapshot_in_db(cg['id'])
model_update = self.driver.create_cgsnapshot(self.ctxt, cg_snapshot)
self.assertEqual('available',
model_update[0]['status'],
"CGSnapshot created failed")
for snapshot in model_update[1]:
self.assertEqual('available', snapshot['status'])
model_update = self.driver.delete_consistencygroup(self.ctxt, cg)
self.assertEqual('deleted', model_update[0]['status'])
for volume in model_update[1]:
self.assertEqual('deleted', volume['status'])
def _create_volume_type_qos(self, extra_specs, fake_qos):
# Generate a QoS volume type for volume.
if extra_specs:
spec = fake_qos
type_ref = volume_types.create(self.ctxt, "qos_extra_specs", spec)
else:
type_ref = volume_types.create(self.ctxt, "qos_associate", None)
if fake_qos:
qos_ref = qos_specs.create(self.ctxt, 'qos-specs', fake_qos)
qos_specs.associate_qos_with_type(self.ctxt, qos_ref['id'],
type_ref['id'])
qos_type = volume_types.get_volume_type(self.ctxt, type_ref['id'])
return qos_type
def _create_volume_type_qos_both(self, fake_qos, fake_qos_associate):
type_ref = volume_types.create(self.ctxt, "qos_extra_specs", fake_qos)
qos_ref = qos_specs.create(self.ctxt, 'qos-specs', fake_qos_associate)
qos_specs.associate_qos_with_type(self.ctxt, qos_ref['id'],
type_ref['id'])
qos_type = volume_types.get_volume_type(self.ctxt, type_ref['id'])
return qos_type
def _create_replication_volume_type(self, enable):
# Generate a volume type for volume repliation.
if enable:
spec = {'capabilities:replication': '<is> True'}
type_ref = volume_types.create(self.ctxt, "replication_1", spec)
else:
spec = {'capabilities:replication': '<is> False'}
type_ref = volume_types.create(self.ctxt, "replication_2", spec)
replication_type = volume_types.get_volume_type(self.ctxt,
type_ref['id'])
return replication_type
def _create_consistency_group_volume_type(self):
# Generate a volume type for volume consistencygroup.
spec = {'capabilities:consistencygroup_support': '<is> True'}
type_ref = volume_types.create(self.ctxt, "cg", spec)
cg_type = volume_types.get_volume_type(self.ctxt, type_ref['id'])
return cg_type
def _get_vdisk_uid(self, vdisk_name):
"""Return vdisk_UID for given vdisk.
Given a vdisk by name, performs an lvdisk command that extracts
the vdisk_UID parameter and returns it.
Returns None if the specified vdisk does not exist.
"""
vdisk_properties, _err = self.sim._cmd_lsvdisk(obj=vdisk_name,
delim='!')
# Iterate through each row until we find the vdisk_UID entry
for row in vdisk_properties.split('\n'):
words = row.split('!')
if words[0] == 'vdisk_UID':
return words[1]
return None
def _create_volume_and_return_uid(self, volume_name):
"""Creates a volume and returns its UID.
Creates a volume with the specified name, and returns the UID that
the Storwize controller allocated for it. We do this by executing a
create_volume and then calling into the simulator to perform an
lsvdisk directly.
"""
volume = self._generate_vol_info(None, None)
self.driver.create_volume(volume)
return (volume, self._get_vdisk_uid(volume['name']))
def test_manage_existing_bad_ref(self):
"""Error on manage with bad reference.
This test case attempts to manage an existing volume but passes in
a bad reference that the Storwize driver doesn't understand. We
expect an exception to be raised.
"""
volume = self._generate_vol_info(None, None)
ref = {}
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size, volume, ref)
def test_manage_existing_bad_uid(self):
"""Error when the specified UUID does not exist."""
volume = self._generate_vol_info(None, None)
ref = {'source-id': 'bad_uid'}
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size, volume, ref)
pass
def test_manage_existing_good_uid_not_mapped(self):
"""Tests managing a volume with no mappings.
This test case attempts to manage an existing volume by UID, and
we expect it to succeed. We verify that the backend volume was
renamed to have the name of the Cinder volume that we asked for it to
be associated with.
"""
# Create a volume as a way of getting a vdisk created, and find out the
# UID of that vdisk.
_volume, uid = self._create_volume_and_return_uid('manage_test')
# Descriptor of the Cinder volume that we want to own the vdisk
# referenced by uid.
new_volume = self._generate_vol_info(None, None)
# Submit the request to manage it.
ref = {'source-id': uid}
size = self.driver.manage_existing_get_size(new_volume, ref)
self.assertEqual(size, 10)
self.driver.manage_existing(new_volume, ref)
# Assert that there is a disk named after the new volume that has the
# ID that we passed in, indicating that the disk has been renamed.
uid_of_new_volume = self._get_vdisk_uid(new_volume['name'])
self.assertEqual(uid, uid_of_new_volume)
def test_manage_existing_good_uid_mapped(self):
"""Tests managing a mapped volume with no override.
This test case attempts to manage an existing volume by UID, but
the volume is mapped to a host, so we expect to see an exception
raised.
"""
# Create a volume as a way of getting a vdisk created, and find out the
# UUID of that vdisk.
volume, uid = self._create_volume_and_return_uid('manage_test')
# Map a host to the disk
conn = {'initiator': u'unicode:initiator3',
'ip': '10.10.10.12',
'host': u'unicode.foo}.bar}.baz'}
self.driver.initialize_connection(volume, conn)
# Descriptor of the Cinder volume that we want to own the vdisk
# referenced by uid.
volume = self._generate_vol_info(None, None)
ref = {'source-id': uid}
# Attempt to manage this disk, and except an exception beause the
# volume is already mapped.
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size, volume, ref)
def test_manage_existing_good_uid_mapped_with_override(self):
"""Tests managing a mapped volume with override.
This test case attempts to manage an existing volume by UID, when it
already mapped to a host, but the ref specifies that this is OK.
We verify that the backend volume was renamed to have the name of the
Cinder volume that we asked for it to be associated with.
"""
# Create a volume as a way of getting a vdisk created, and find out the
# UUID of that vdisk.
volume, uid = self._create_volume_and_return_uid('manage_test')
# Map a host to the disk
conn = {'initiator': u'unicode:initiator3',
'ip': '10.10.10.12',
'host': u'unicode.foo}.bar}.baz'}
self.driver.initialize_connection(volume, conn)
# Descriptor of the Cinder volume that we want to own the vdisk
# referenced by uid.
new_volume = self._generate_vol_info(None, None)
# Submit the request to manage it, specifying that it is OK to
# manage a volume that is already attached.
ref = {'source-id': uid, 'manage_if_in_use': True}
size = self.driver.manage_existing_get_size(new_volume, ref)
self.assertEqual(size, 10)
self.driver.manage_existing(new_volume, ref)
# Assert that there is a disk named after the new volume that has the
# ID that we passed in, indicating that the disk has been renamed.
uid_of_new_volume = self._get_vdisk_uid(new_volume['name'])
self.assertEqual(uid, uid_of_new_volume)
class CLIResponseTestCase(test.TestCase):
def test_empty(self):
self.assertEqual(0, len(ssh.CLIResponse('')))
self.assertEqual(0, len(ssh.CLIResponse(('', 'stderr'))))
def test_header(self):
raw = r'''id!name
1!node1
2!node2
'''
resp = ssh.CLIResponse(raw, with_header=True)
self.assertEqual(2, len(resp))
self.assertEqual('1', resp[0]['id'])
self.assertEqual('2', resp[1]['id'])
def test_select(self):
raw = r'''id!123
name!Bill
name!Bill2
age!30
home address!s1
home address!s2
id! 7
name!John
name!John2
age!40
home address!s3
home address!s4
'''
resp = ssh.CLIResponse(raw, with_header=False)
self.assertEqual(list(resp.select('home address', 'name',
'home address')),
[('s1', 'Bill', 's1'), ('s2', 'Bill2', 's2'),
('s3', 'John', 's3'), ('s4', 'John2', 's4')])
def test_lsnode_all(self):
raw = r'''id!name!UPS_serial_number!WWNN!status
1!node1!!500507680200C744!online
2!node2!!500507680200C745!online
'''
resp = ssh.CLIResponse(raw)
self.assertEqual(2, len(resp))
self.assertEqual('1', resp[0]['id'])
self.assertEqual('500507680200C744', resp[0]['WWNN'])
self.assertEqual('2', resp[1]['id'])
self.assertEqual('500507680200C745', resp[1]['WWNN'])
def test_lsnode_single(self):
raw = r'''id!1
port_id!500507680210C744
port_status!active
port_speed!8Gb
port_id!500507680240C744
port_status!inactive
port_speed!8Gb
'''
resp = ssh.CLIResponse(raw, with_header=False)
self.assertEqual(1, len(resp))
self.assertEqual('1', resp[0]['id'])
self.assertEqual(list(resp.select('port_id', 'port_status')),
[('500507680210C744', 'active'),
('500507680240C744', 'inactive')])
class StorwizeHelpersTestCase(test.TestCase):
def setUp(self):
super(StorwizeHelpersTestCase, self).setUp()
self.helpers = helpers.StorwizeHelpers(None)
def test_compression_enabled(self):
fake_license_without_keys = {}
fake_license = {
'license_compression_enclosures': '1',
'license_compression_capacity': '1'
}
# Check when keys of return licenses do not contain
# 'license_compression_enclosures' and 'license_compression_capacity'
with mock.patch.object(ssh.StorwizeSSH, 'lslicense') as lslicense:
lslicense.return_value = fake_license_without_keys
self.assertFalse(self.helpers.compression_enabled())
with mock.patch.object(ssh.StorwizeSSH, 'lslicense') as lslicense:
lslicense.return_value = fake_license
self.assertTrue(self.helpers.compression_enabled())
| apache-2.0 | 1,457,417,452,940,624,400 | 42.319785 | 79 | 0.53211 | false |
sproberts92/interleave-pdf | interleave-pdf.py | 1 | 1996 | import PyPDF2
import tkinter as tk
import tkinter.ttk as ttk
import tkinter.filedialog as fd
class Application(tk.Frame):
def __init__(self, master=None):
root = tk.Tk()
root.iconbitmap('icon\\interleave-pdf.ico')
self.input_path = tk.StringVar();
self.output_path = tk.StringVar();
ttk.Frame.__init__(self, master)
self.master.resizable(False, False)
self.master.title('Interleave PDF')
self.grid()
self.label = ttk.Label(self, text="Input", width=12, anchor=tk.CENTER)
self.label.grid(row=0, column=0)
self.entry_in = ttk.Entry(self, width=50, textvariable=self.input_path)
self.entry_in.grid(row=0, column=1)
self.button = ttk.Button(self, text="Browse", command=self.load_file, width=12)
self.button.grid(row=0, column=2)
self.label = ttk.Label(self, text="Output", width=12, anchor=tk.CENTER)
self.label.grid(row=1, column=0)
self.entry_out = ttk.Entry(self, width=50, textvariable=self.output_path)
self.entry_out.grid(row=1, column=1)
self.button = ttk.Button(self, text="Browse", command=self.save_file, width=12)
self.button.grid(row=1, column=2)
self.button = ttk.Button(self, text="Interleave", command=self.interleave, width=12)
self.button.grid(row=2, column=2)
def load_file(self):
path = fd.askopenfilename(filetypes=(("Adobe PDF Files", "*.pdf"), ("All files", "*.*")))
self.input_path.set(path)
def save_file(self):
path = fd.asksaveasfilename(filetypes=(("Adobe PDF Files", "*.pdf"), ("All files", "*.*")))
self.output_path.set(path)
def interleave(self):
self.input_path = self.entry_in.get()
self.output_path = self.entry_out.get()
if self.input_path and self.output_path:
document = PyPDF2.PdfFileReader(self.input_path)
writer = PyPDF2.PdfFileWriter()
for page in document.pages:
writer.addPage(page)
writer.addBlankPage()
outputStream = open(self.output_path, 'wb')
writer.write(outputStream)
outputStream.close()
if __name__ == "__main__":
Application().mainloop()
| mit | -7,549,118,881,770,922,000 | 29.242424 | 93 | 0.692385 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.