repo_name
stringlengths
5
100
path
stringlengths
4
299
copies
stringclasses
990 values
size
stringlengths
4
7
content
stringlengths
666
1.03M
license
stringclasses
15 values
hash
int64
-9,223,351,895,964,839,000
9,223,297,778B
line_mean
float64
3.17
100
line_max
int64
7
1k
alpha_frac
float64
0.25
0.98
autogenerated
bool
1 class
dejlek/pulsar
tests/stores/redis.py
1
2100
import asyncio from pulsar import HAS_C_EXTENSIONS from pulsar.apps.test import check_server from pulsar.apps.data import RedisScript from .pulsards import unittest, RedisCommands, create_store from .lock import RedisLockTests OK = check_server('redis') @unittest.skipUnless(OK, 'Requires a running Redis server') class RedisDbTest(RedisCommands, RedisLockTests): pass @unittest.skipUnless(OK, 'Requires a running redis server') class TestRedisStore(RedisDbTest, unittest.TestCase): store = None @classmethod def setUpClass(cls): addr = cls.cfg.redis_server if not addr.startswith('redis://'): addr = 'redis://%s' % cls.cfg.redis_server namespace = cls.__name__.lower() cls.store = create_store(addr, pool_size=3, namespace=namespace) cls.client = cls.store.client() @asyncio.coroutine def test_script(self): script = RedisScript("return 1") self.assertFalse(script.sha) self.assertTrue(script.script) result = yield from script(self.client) self.assertEqual(result, 1) self.assertTrue(script.sha) self.assertTrue(script.sha in self.client.store.loaded_scripts) result = yield from script(self.client) self.assertEqual(result, 1) @asyncio.coroutine def test_eval(self): result = yield from self.client.eval('return "Hello"') self.assertEqual(result, b'Hello') result = yield from self.client.eval("return {ok='OK'}") self.assertEqual(result, b'OK') @asyncio.coroutine def test_eval_with_keys(self): result = yield from self.client.eval("return {KEYS, ARGV}", ('a', 'b'), ('first', 'second', 'third')) self.assertEqual(len(result), 2) self.assertEqual(result[0], [b'a', b'b']) self.assertEqual(result[1], [b'first', b'second', b'third']) @unittest.skipUnless(OK and HAS_C_EXTENSIONS, 'Requires cython extensions') class TestRedisStorePyParser(TestRedisStore): pass
bsd-3-clause
-2,365,873,232,518,695,400
32.333333
75
0.640952
false
jblupus/PyLoyaltyProject
old/interactions/format_interactions.py
1
5936
from os import mkdir from os.path import exists import numpy as np import pandas as pd from old.project import CassandraUtils from old.project import get_time RTD_STS_KEY = 'retweetedStatus' MT_STS_KEY = 'userMentionEntities' PATH = '/home/joao/Dev/Data/Twitter/' FRIENDS_PATH = '/home/joao/Dev/Data/Twitter/friendships/' def form_tweet(tweet, date=None): return {'id': tweet['id'], 'lang': tweet['lang'] if 'lang' in tweet else None, 'text': tweet['text'], 'date': date or tweet['createdAt'], 'user': {'id': tweet['user']['id']}} def folding(tweets): last_index = np.ceil(len(tweets) * 0.75).astype(int) if last_index < 10: return np.array(tweets) np_array = np.array(tweets) return np_array[0:last_index] def append_frame(df, df_plus): return df.append(df_plus) def save_frames(file_name, _data): if len(_data) > 0: data_frame = mount_frame(data=_data) df = pd.DataFrame() df = append_frame(df, data_frame) try: df.to_csv(file_name, sep=',', encoding='utf-8') except IOError: df.to_csv('Saida/retweets/' + file_name.split('/')[len(file_name.split('/')) - 1]) def mount_frame(data): data = np.array(data) data_frame = pd.DataFrame() if np.size(data) > 0: data_frame['alter'] = data[:, 1] data_frame['tweet'] = data[:, 0] return data_frame class FriendsDataToDataFrame: # frame_path = '/home/joao/Dev/Data/Twitter/friends.data/data.frame/' def __init__(self): self.cass = CassandraUtils() self.frame_path = '/home/joao/Dev/Shared/Saida/' self.likes_path = self.frame_path + 'likes/' self.mentions_path = self.frame_path + 'mentions/' self.retweets_path = self.frame_path + 'retweets/' print self.retweets_path try: if not exists(self.frame_path): mkdir(self.frame_path) if not exists(self.likes_path): mkdir(self.likes_path) if not exists(self.mentions_path): mkdir(self.mentions_path) if not exists(self.retweets_path): mkdir(self.retweets_path) except Exception as e: raise e def get_seeds(self): # seeds = pd.DataFrame() seeds = self.cass.find_seeds(); # print seeds seeds = map(lambda x: x.user_id, seeds) return seeds # # def check_friends_data(self, user_id): # friends = self(user_id=user_id) # data = filter(lambda friend_id: not exists(path=self.frame_path + str(friend_id) + '.csv'), friends['id']) # return len(data) == 0 # def check_json_interactions(self, seeds=None): # print 'Starting...', get_time(True) # # seeds = load_seeds() if seeds is None else seeds # seeds = self.get_seeds() # for user_id in seeds: # if not self.check_friends_data(user_id=user_id): # return user_id # print user_id # print 'Stopping...', get_time(True) # return None def json_interactions(self, _type=None, _force=False, _clean=False, check=False, init_id=0, unique=False): print 'Starting...', get_time(True) if not unique: seeds = self.get_seeds() # if _clean: # clean_data(self.frame_path) # elif check: # user_id = self.check_json_interactions(seeds=seeds) # if user_id is not None: # self.friends_data(user_id=user_id, _type=_type, force=True) # return Nonea seeds = filter(lambda s: s > init_id, seeds) for user_id in seeds: self.friends_data(user_id=user_id, _type=_type, force=_force) else: self.friends_data(user_id=init_id, _type=_type, force=_force) print 'Stopping...', get_time(True) def friends_data(self, user_id, _type, force=False): friends = map(lambda x: x.friend_id, self.cass.find_friends(user_id=user_id)) for friend_id in friends: if _type is None: pass elif _type == 1: path = self.likes_path + str(friend_id) + '.csv' if not exists(path=path) or force: self.save_likes(friend_id=friend_id) elif _type == 2: path = self.mentions_path + str(friend_id) + '.csv' if not exists(path=path) or force: self.save_mentions(friend_id=friend_id) elif _type == 3: path = self.retweets_path + str(friend_id) + '.csv' if not exists(path=path) or force: self.save_retweets(friend_id=friend_id) else: pass print user_id, friend_id, get_time() def save_likes(self, friend_id): likes = self.cass.find_likes(user_id=friend_id) likes_data = map(lambda tt: [tt['id'], tt['user']['id']], likes) save_frames(self.likes_path + str(friend_id) + '.csv', likes_data) def save_retweets(self, friend_id, _tweets=None): tweets = _tweets or self.cass.find_tweets(user_id=friend_id) retweets = self.cass.find_retweets(tweets=tweets) retweets_data = map(lambda tt: [tt[RTD_STS_KEY]['id'], tt[RTD_STS_KEY]['user']['id']], retweets) save_frames(self.retweets_path + str(friend_id) + '.csv', retweets_data) def save_mentions(self, friend_id, _tweets=None): tweets = _tweets or self.cass.find_tweets(user_id=friend_id) mentions = self.cass.find_mentions(tweets=tweets) mentions_data = [] for tweet in mentions: mentions_data.extend(map(lambda mention: [tweet['id'], mention['id']], tweet[MT_STS_KEY])) save_frames(self.mentions_path + str(friend_id) + '.csv', mentions_data)
bsd-2-clause
5,772,484,004,934,030,000
35.417178
116
0.566038
false
pwhelan/djshouts
django/contrib/sites/models.py
387
2867
from django.db import models from django.utils.translation import ugettext_lazy as _ SITE_CACHE = {} class SiteManager(models.Manager): def get_current(self): """ Returns the current ``Site`` based on the SITE_ID in the project's settings. The ``Site`` object is cached the first time it's retrieved from the database. """ from django.conf import settings try: sid = settings.SITE_ID except AttributeError: from django.core.exceptions import ImproperlyConfigured raise ImproperlyConfigured("You're using the Django \"sites framework\" without having set the SITE_ID setting. Create a site in your database and set the SITE_ID setting to fix this error.") try: current_site = SITE_CACHE[sid] except KeyError: current_site = self.get(pk=sid) SITE_CACHE[sid] = current_site return current_site def clear_cache(self): """Clears the ``Site`` object cache.""" global SITE_CACHE SITE_CACHE = {} class Site(models.Model): domain = models.CharField(_('domain name'), max_length=100) name = models.CharField(_('display name'), max_length=50) objects = SiteManager() class Meta: db_table = 'django_site' verbose_name = _('site') verbose_name_plural = _('sites') ordering = ('domain',) def __unicode__(self): return self.domain def save(self, *args, **kwargs): super(Site, self).save(*args, **kwargs) # Cached information will likely be incorrect now. if self.id in SITE_CACHE: del SITE_CACHE[self.id] def delete(self): pk = self.pk super(Site, self).delete() try: del SITE_CACHE[pk] except KeyError: pass class RequestSite(object): """ A class that shares the primary interface of Site (i.e., it has ``domain`` and ``name`` attributes) but gets its data from a Django HttpRequest object rather than from a database. The save() and delete() methods raise NotImplementedError. """ def __init__(self, request): self.domain = self.name = request.get_host() def __unicode__(self): return self.domain def save(self, force_insert=False, force_update=False): raise NotImplementedError('RequestSite cannot be saved.') def delete(self): raise NotImplementedError('RequestSite cannot be deleted.') def get_current_site(request): """ Checks if contrib.sites is installed and returns either the current ``Site`` object or a ``RequestSite`` object based on the request. """ if Site._meta.installed: current_site = Site.objects.get_current() else: current_site = RequestSite(request) return current_site
bsd-3-clause
-1,679,323,123,397,846,800
29.178947
203
0.62016
false
jianghuaw/nova
nova/tests/unit/conductor/tasks/test_base.py
54
1604
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from nova.conductor.tasks import base from nova import test class FakeTask(base.TaskBase): def __init__(self, context, instance, fail=False): super(FakeTask, self).__init__(context, instance) self.fail = fail def _execute(self): if self.fail: raise Exception else: pass class TaskBaseTestCase(test.NoDBTestCase): def setUp(self): super(TaskBaseTestCase, self).setUp() self.task = FakeTask(mock.MagicMock(), mock.MagicMock()) @mock.patch.object(FakeTask, 'rollback') def test_wrapper_exception(self, fake_rollback): self.task.fail = True try: self.task.execute() except Exception: pass fake_rollback.assert_called_once_with() @mock.patch.object(FakeTask, 'rollback') def test_wrapper_no_exception(self, fake_rollback): try: self.task.execute() except Exception: pass self.assertFalse(fake_rollback.called)
apache-2.0
8,221,673,198,678,516,000
29.264151
78
0.653367
false
Yukarumya/Yukarum-Redfoxes
testing/marionette/harness/marionette_harness/tests/unit/test_rendered_element.py
1
1336
#Copyright 2007-2009 WebDriver committers #Copyright 2007-2009 Google Inc. # #Licensed under the Apache License, Version 2.0 (the "License"); #you may not use this file except in compliance with the License. #You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #Unless required by applicable law or agreed to in writing, software #distributed under the License is distributed on an "AS IS" BASIS, #WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #See the License for the specific language governing permissions and #limitations under the License. from marionette_driver.by import By from marionette_harness import MarionetteTestCase class RenderedElementTests(MarionetteTestCase): def testWeCanGetComputedStyleValueOnElement(self): test_url = self.marionette.absolute_url('javascriptPage.html') self.marionette.navigate(test_url) element = self.marionette.find_element(By.ID, "green-parent") backgroundColour = element.value_of_css_property("background-color") self.assertEqual("rgb(0, 128, 0)", backgroundColour) element = self.marionette.find_element(By.ID, "red-item") backgroundColour = element.value_of_css_property("background-color") self.assertEqual("rgb(255, 0, 0)", backgroundColour)
mpl-2.0
7,430,800,408,507,566,000
38.294118
76
0.749251
false
sandeepdsouza93/TensorFlow-15712
tensorflow/tools/dist_test/python/census_widendeep.py
3
11352
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Distributed training and evaluation of a wide and deep model.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import json import os from six.moves import urllib import tensorflow as tf from tensorflow.contrib.learn.python.learn import learn_runner from tensorflow.contrib.learn.python.learn.estimators import run_config # Define command-line flags flags = tf.app.flags flags.DEFINE_string("data_dir", "/tmp/census-data", "Directory for storing the cesnsus data") flags.DEFINE_string("model_dir", "/tmp/census_wide_and_deep_model", "Directory for storing the model") flags.DEFINE_string("output_dir", "", "Base output directory.") flags.DEFINE_string("schedule", "local_run", "Schedule to run for this experiment.") flags.DEFINE_string("master_grpc_url", "", "URL to master GRPC tensorflow server, e.g.," "grpc://127.0.0.1:2222") flags.DEFINE_integer("num_parameter_servers", 0, "Number of parameter servers") flags.DEFINE_integer("worker_index", 0, "Worker index (>=0)") flags.DEFINE_integer("train_steps", 1000, "Number of training steps") flags.DEFINE_integer("eval_steps", 1, "Number of evaluation steps") FLAGS = flags.FLAGS # Constants: Data download URLs TRAIN_DATA_URL = "https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data" TEST_DATA_URL = "https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test" # Define features for the model def census_model_config(): """Configuration for the census Wide & Deep model. Returns: columns: Column names to retrieve from the data source label_column: Name of the label column wide_columns: List of wide columns deep_columns: List of deep columns categorical_column_names: Names of the categorical columns continuous_column_names: Names of the continuous columns """ # 1. Categorical base columns. gender = tf.contrib.layers.sparse_column_with_keys( column_name="gender", keys=["female", "male"]) race = tf.contrib.layers.sparse_column_with_keys( column_name="race", keys=["Amer-Indian-Eskimo", "Asian-Pac-Islander", "Black", "Other", "White"]) education = tf.contrib.layers.sparse_column_with_hash_bucket( "education", hash_bucket_size=1000) marital_status = tf.contrib.layers.sparse_column_with_hash_bucket( "marital_status", hash_bucket_size=100) relationship = tf.contrib.layers.sparse_column_with_hash_bucket( "relationship", hash_bucket_size=100) workclass = tf.contrib.layers.sparse_column_with_hash_bucket( "workclass", hash_bucket_size=100) occupation = tf.contrib.layers.sparse_column_with_hash_bucket( "occupation", hash_bucket_size=1000) native_country = tf.contrib.layers.sparse_column_with_hash_bucket( "native_country", hash_bucket_size=1000) # 2. Continuous base columns. age = tf.contrib.layers.real_valued_column("age") age_buckets = tf.contrib.layers.bucketized_column( age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65]) education_num = tf.contrib.layers.real_valued_column("education_num") capital_gain = tf.contrib.layers.real_valued_column("capital_gain") capital_loss = tf.contrib.layers.real_valued_column("capital_loss") hours_per_week = tf.contrib.layers.real_valued_column("hours_per_week") wide_columns = [ gender, native_country, education, occupation, workclass, marital_status, relationship, age_buckets, tf.contrib.layers.crossed_column([education, occupation], hash_bucket_size=int(1e4)), tf.contrib.layers.crossed_column([native_country, occupation], hash_bucket_size=int(1e4)), tf.contrib.layers.crossed_column([age_buckets, race, occupation], hash_bucket_size=int(1e6))] deep_columns = [ tf.contrib.layers.embedding_column(workclass, dimension=8), tf.contrib.layers.embedding_column(education, dimension=8), tf.contrib.layers.embedding_column(marital_status, dimension=8), tf.contrib.layers.embedding_column(gender, dimension=8), tf.contrib.layers.embedding_column(relationship, dimension=8), tf.contrib.layers.embedding_column(race, dimension=8), tf.contrib.layers.embedding_column(native_country, dimension=8), tf.contrib.layers.embedding_column(occupation, dimension=8), age, education_num, capital_gain, capital_loss, hours_per_week] # Define the column names for the data sets. columns = ["age", "workclass", "fnlwgt", "education", "education_num", "marital_status", "occupation", "relationship", "race", "gender", "capital_gain", "capital_loss", "hours_per_week", "native_country", "income_bracket"] label_column = "label" categorical_columns = ["workclass", "education", "marital_status", "occupation", "relationship", "race", "gender", "native_country"] continuous_columns = ["age", "education_num", "capital_gain", "capital_loss", "hours_per_week"] return (columns, label_column, wide_columns, deep_columns, categorical_columns, continuous_columns) class CensusDataSource(object): """Source of census data.""" def __init__(self, data_dir, train_data_url, test_data_url, columns, label_column, categorical_columns, continuous_columns): """Constructor of CensusDataSource. Args: data_dir: Directory to save/load the data files train_data_url: URL from which the training data can be downloaded test_data_url: URL from which the test data can be downloaded columns: Columns to retrieve from the data files (A list of strings) label_column: Name of the label column categorical_columns: Names of the categorical columns (A list of strings) continuous_columns: Names of the continuous columsn (A list of strings) """ # Retrieve data from disk (if available) or download from the web. train_file_path = os.path.join(data_dir, "adult.data") if os.path.isfile(train_file_path): print("Loading training data from file: %s" % train_file_path) train_file = open(train_file_path) else: urllib.urlretrieve(train_data_url, train_file_path) test_file_path = os.path.join(data_dir, "adult.test") if os.path.isfile(test_file_path): print("Loading test data from file: %s" % test_file_path) test_file = open(test_file_path) else: test_file = open(test_file_path) urllib.urlretrieve(test_data_url, test_file_path) # Read the training and testing data sets into Pandas DataFrame. import pandas # pylint: disable=g-import-not-at-top self._df_train = pandas.read_csv(train_file, names=columns, skipinitialspace=True) self._df_test = pandas.read_csv(test_file, names=columns, skipinitialspace=True, skiprows=1) # Remove the NaN values in the last rows of the tables self._df_train = self._df_train[:-1] self._df_test = self._df_test[:-1] # Apply the threshold to get the labels. income_thresh = lambda x: ">50K" in x self._df_train[label_column] = ( self._df_train["income_bracket"].apply(income_thresh)).astype(int) self._df_test[label_column] = ( self._df_test["income_bracket"].apply(income_thresh)).astype(int) self.label_column = label_column self.categorical_columns = categorical_columns self.continuous_columns = continuous_columns def input_train_fn(self): return self._input_fn(self._df_train) def input_test_fn(self): return self._input_fn(self._df_test) # TODO(cais): Turn into minibatch feeder def _input_fn(self, df): """Input data function. Creates a dictionary mapping from each continuous feature column name (k) to the values of that column stored in a constant Tensor. Args: df: data feed Returns: feature columns and labels """ continuous_cols = {k: tf.constant(df[k].values) for k in self.continuous_columns} # Creates a dictionary mapping from each categorical feature column name (k) # to the values of that column stored in a tf.SparseTensor. categorical_cols = {k: tf.SparseTensor( indices=[[i, 0] for i in range(df[k].size)], values=df[k].values, shape=[df[k].size, 1]) for k in self.categorical_columns} # Merges the two dictionaries into one. feature_cols = dict(continuous_cols.items() + categorical_cols.items()) # Converts the label column into a constant Tensor. label = tf.constant(df[self.label_column].values) # Returns the feature columns and the label. return feature_cols, label def _create_experiment_fn(output_dir): # pylint: disable=unused-argument """Experiment creation function.""" (columns, label_column, wide_columns, deep_columns, categorical_columns, continuous_columns) = census_model_config() census_data_source = CensusDataSource(FLAGS.data_dir, TRAIN_DATA_URL, TEST_DATA_URL, columns, label_column, categorical_columns, continuous_columns) os.environ["TF_CONFIG"] = json.dumps({ "cluster": { tf.contrib.learn.TaskType.PS: ["fake_ps"] * FLAGS.num_parameter_servers }, "task": { "index": FLAGS.worker_index } }) config = run_config.RunConfig(master=FLAGS.master_grpc_url) estimator = tf.contrib.learn.DNNLinearCombinedClassifier( model_dir=FLAGS.model_dir, linear_feature_columns=wide_columns, dnn_feature_columns=deep_columns, dnn_hidden_units=[5], config=config) return tf.contrib.learn.Experiment( estimator=estimator, train_input_fn=census_data_source.input_train_fn, eval_input_fn=census_data_source.input_test_fn, train_steps=FLAGS.train_steps, eval_steps=FLAGS.eval_steps ) def main(unused_argv): print("Worker index: %d" % FLAGS.worker_index) learn_runner.run(experiment_fn=_create_experiment_fn, output_dir=FLAGS.output_dir, schedule=FLAGS.schedule) if __name__ == "__main__": tf.app.run()
apache-2.0
8,363,691,429,774,034,000
39.688172
93
0.652572
false
yangxiaoze/Unblock-Youku
test/run-all-tests.py
43
3290
#!/usr/bin/env python """ Allow you smoothly surf on many websites blocking non-mainland visitors. Copyright (C) 2012, 2013 Bo Zhu http://zhuzhu.org This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ import subprocess import time import sys import os # http://goo.gl/2wtRL # os.chdir(os.path.dirname(sys.argv[0])) if os.path.dirname(sys.argv[0]) != '': os.chdir(os.path.dirname(sys.argv[0])) print 'PhantomJS', try: version = subprocess.check_output(['phantomjs', '--version']) print version sys.stdout.flush() except Exception as exp: print 'is not installed.' print 'Please install it and try again.' sys.stdout.flush() sys.exit(-1) server_process = None def start_server(): global server_process print 'To start the server, and wait for 21 seconds to set up...' sys.stdout.flush() server_process = subprocess.Popen( ['node', '../server/server.js', '--production', '--port=8888']) time.sleep(21) def stop_server(): time.sleep(1) print 'To stop the server...', sys.stdout.flush() server_process.terminate() server_process.wait() print 'done.' sys.stdout.flush() # http://goo.gl/xaBer def red_alert(text): print "\033[7;31m" + text + "\033[0m" sys.stdout.flush() def run_all_tests(): print print 'To run all test-*.js files...' sys.stdout.flush() num_failed = 0 num_passed = 0 for file_name in os.listdir('.'): if file_name.startswith('test-') and file_name.endswith('.js'): if file_name.endswith('-proxy.js'): command = ['phantomjs', '--proxy=127.0.0.1:8888', file_name] else: command = ['phantomjs', file_name] print print ' '.join(command) sys.stdout.flush() return_value = subprocess.call(command) time.sleep(2) # sleep 2 seconds between tests if return_value != 0: num_failed += 1 red_alert(file_name + ' FAILED!') else: num_passed += 1 print file_name + ' passed.' sys.stdout.flush() print sys.stdout.flush() if num_failed > 0: red_alert('Final results: ' + str(num_failed) + ' TESTS FAILED' + ' (out of ' + str(num_failed + num_passed) + ')') else: print 'All %d tests passed.' % (num_passed + num_failed) print sys.stdout.flush() return num_failed if __name__ == '__main__': exit_code = -1 try: start_server() exit_code = run_all_tests() finally: stop_server() sys.exit(exit_code)
agpl-3.0
8,629,015,176,296,238,000
27.859649
76
0.605167
false
AlexBoogaard/Sick-Beard-Torrent-Edition
lib/unidecode/x068.py
252
4674
data = ( 'Zhi ', # 0x00 'Liu ', # 0x01 'Mei ', # 0x02 'Hoy ', # 0x03 'Rong ', # 0x04 'Zha ', # 0x05 '[?] ', # 0x06 'Biao ', # 0x07 'Zhan ', # 0x08 'Jie ', # 0x09 'Long ', # 0x0a 'Dong ', # 0x0b 'Lu ', # 0x0c 'Sayng ', # 0x0d 'Li ', # 0x0e 'Lan ', # 0x0f 'Yong ', # 0x10 'Shu ', # 0x11 'Xun ', # 0x12 'Shuan ', # 0x13 'Qi ', # 0x14 'Zhen ', # 0x15 'Qi ', # 0x16 'Li ', # 0x17 'Yi ', # 0x18 'Xiang ', # 0x19 'Zhen ', # 0x1a 'Li ', # 0x1b 'Su ', # 0x1c 'Gua ', # 0x1d 'Kan ', # 0x1e 'Bing ', # 0x1f 'Ren ', # 0x20 'Xiao ', # 0x21 'Bo ', # 0x22 'Ren ', # 0x23 'Bing ', # 0x24 'Zi ', # 0x25 'Chou ', # 0x26 'Yi ', # 0x27 'Jie ', # 0x28 'Xu ', # 0x29 'Zhu ', # 0x2a 'Jian ', # 0x2b 'Zui ', # 0x2c 'Er ', # 0x2d 'Er ', # 0x2e 'You ', # 0x2f 'Fa ', # 0x30 'Gong ', # 0x31 'Kao ', # 0x32 'Lao ', # 0x33 'Zhan ', # 0x34 'Li ', # 0x35 'Yin ', # 0x36 'Yang ', # 0x37 'He ', # 0x38 'Gen ', # 0x39 'Zhi ', # 0x3a 'Chi ', # 0x3b 'Ge ', # 0x3c 'Zai ', # 0x3d 'Luan ', # 0x3e 'Fu ', # 0x3f 'Jie ', # 0x40 'Hang ', # 0x41 'Gui ', # 0x42 'Tao ', # 0x43 'Guang ', # 0x44 'Wei ', # 0x45 'Kuang ', # 0x46 'Ru ', # 0x47 'An ', # 0x48 'An ', # 0x49 'Juan ', # 0x4a 'Yi ', # 0x4b 'Zhuo ', # 0x4c 'Ku ', # 0x4d 'Zhi ', # 0x4e 'Qiong ', # 0x4f 'Tong ', # 0x50 'Sang ', # 0x51 'Sang ', # 0x52 'Huan ', # 0x53 'Jie ', # 0x54 'Jiu ', # 0x55 'Xue ', # 0x56 'Duo ', # 0x57 'Zhui ', # 0x58 'Yu ', # 0x59 'Zan ', # 0x5a 'Kasei ', # 0x5b 'Ying ', # 0x5c 'Masu ', # 0x5d '[?] ', # 0x5e 'Zhan ', # 0x5f 'Ya ', # 0x60 'Nao ', # 0x61 'Zhen ', # 0x62 'Dang ', # 0x63 'Qi ', # 0x64 'Qiao ', # 0x65 'Hua ', # 0x66 'Kuai ', # 0x67 'Jiang ', # 0x68 'Zhuang ', # 0x69 'Xun ', # 0x6a 'Suo ', # 0x6b 'Sha ', # 0x6c 'Zhen ', # 0x6d 'Bei ', # 0x6e 'Ting ', # 0x6f 'Gua ', # 0x70 'Jing ', # 0x71 'Bo ', # 0x72 'Ben ', # 0x73 'Fu ', # 0x74 'Rui ', # 0x75 'Tong ', # 0x76 'Jue ', # 0x77 'Xi ', # 0x78 'Lang ', # 0x79 'Liu ', # 0x7a 'Feng ', # 0x7b 'Qi ', # 0x7c 'Wen ', # 0x7d 'Jun ', # 0x7e 'Gan ', # 0x7f 'Cu ', # 0x80 'Liang ', # 0x81 'Qiu ', # 0x82 'Ting ', # 0x83 'You ', # 0x84 'Mei ', # 0x85 'Bang ', # 0x86 'Long ', # 0x87 'Peng ', # 0x88 'Zhuang ', # 0x89 'Di ', # 0x8a 'Xuan ', # 0x8b 'Tu ', # 0x8c 'Zao ', # 0x8d 'Ao ', # 0x8e 'Gu ', # 0x8f 'Bi ', # 0x90 'Di ', # 0x91 'Han ', # 0x92 'Zi ', # 0x93 'Zhi ', # 0x94 'Ren ', # 0x95 'Bei ', # 0x96 'Geng ', # 0x97 'Jian ', # 0x98 'Huan ', # 0x99 'Wan ', # 0x9a 'Nuo ', # 0x9b 'Jia ', # 0x9c 'Tiao ', # 0x9d 'Ji ', # 0x9e 'Xiao ', # 0x9f 'Lu ', # 0xa0 'Huan ', # 0xa1 'Shao ', # 0xa2 'Cen ', # 0xa3 'Fen ', # 0xa4 'Song ', # 0xa5 'Meng ', # 0xa6 'Wu ', # 0xa7 'Li ', # 0xa8 'Li ', # 0xa9 'Dou ', # 0xaa 'Cen ', # 0xab 'Ying ', # 0xac 'Suo ', # 0xad 'Ju ', # 0xae 'Ti ', # 0xaf 'Jie ', # 0xb0 'Kun ', # 0xb1 'Zhuo ', # 0xb2 'Shu ', # 0xb3 'Chan ', # 0xb4 'Fan ', # 0xb5 'Wei ', # 0xb6 'Jing ', # 0xb7 'Li ', # 0xb8 'Bing ', # 0xb9 'Fumoto ', # 0xba 'Shikimi ', # 0xbb 'Tao ', # 0xbc 'Zhi ', # 0xbd 'Lai ', # 0xbe 'Lian ', # 0xbf 'Jian ', # 0xc0 'Zhuo ', # 0xc1 'Ling ', # 0xc2 'Li ', # 0xc3 'Qi ', # 0xc4 'Bing ', # 0xc5 'Zhun ', # 0xc6 'Cong ', # 0xc7 'Qian ', # 0xc8 'Mian ', # 0xc9 'Qi ', # 0xca 'Qi ', # 0xcb 'Cai ', # 0xcc 'Gun ', # 0xcd 'Chan ', # 0xce 'Te ', # 0xcf 'Fei ', # 0xd0 'Pai ', # 0xd1 'Bang ', # 0xd2 'Pou ', # 0xd3 'Hun ', # 0xd4 'Zong ', # 0xd5 'Cheng ', # 0xd6 'Zao ', # 0xd7 'Ji ', # 0xd8 'Li ', # 0xd9 'Peng ', # 0xda 'Yu ', # 0xdb 'Yu ', # 0xdc 'Gu ', # 0xdd 'Hun ', # 0xde 'Dong ', # 0xdf 'Tang ', # 0xe0 'Gang ', # 0xe1 'Wang ', # 0xe2 'Di ', # 0xe3 'Xi ', # 0xe4 'Fan ', # 0xe5 'Cheng ', # 0xe6 'Zhan ', # 0xe7 'Qi ', # 0xe8 'Yuan ', # 0xe9 'Yan ', # 0xea 'Yu ', # 0xeb 'Quan ', # 0xec 'Yi ', # 0xed 'Sen ', # 0xee 'Ren ', # 0xef 'Chui ', # 0xf0 'Leng ', # 0xf1 'Qi ', # 0xf2 'Zhuo ', # 0xf3 'Fu ', # 0xf4 'Ke ', # 0xf5 'Lai ', # 0xf6 'Zou ', # 0xf7 'Zou ', # 0xf8 'Zhuo ', # 0xf9 'Guan ', # 0xfa 'Fen ', # 0xfb 'Fen ', # 0xfc 'Chen ', # 0xfd 'Qiong ', # 0xfe 'Nie ', # 0xff )
gpl-3.0
1,463,020,589,174,493,700
17.116279
21
0.394737
false
MSA-Argentina/ojota
ojota/base.py
2
20415
""" This file is part of Ojota. Ojota is free software: you can redistribute it and/or modify it under the terms of the GNU LESSER GENERAL PUBLIC LICENSE as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Ojota is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with Ojota. If not, see <http://www.gnu.org/licenses/>. """ from __future__ import absolute_import from collections import MutableSequence from json import dumps from threading import current_thread import ojota.sources from ojota.sources import JSONSource from ojota.cache import Cache import six def current_data_code(data_code): """Sets the current data path.""" Ojota._data_codes[current_thread()] = data_code def get_current_data_code(): data_code = Ojota._data_codes.get(current_thread(), "") return data_code def set_data_source(data_path): ojota.sources._DATA_SOURCE = data_path def preload(*args): for arg in args: if hasattr(arg, "preload"): arg.preload() class Relation(object): """Adds a relation to another object.""" def __init__(self, attr_fk, to_class, related_name=None): """Constructor for the relation class Arguments: attr_fk -- a String with the foreign key attribute name to_class -- the class that the relation makes reference to related_name -- the name of the attribute for the backward relation Default None """ self.attr_fk = attr_fk self.to_class = to_class self.related_name = related_name def get_property(self): """Returns the property in which the relation will be referenced.""" def _inner(method_self): """Inner function to return the property for the relation.""" fk = getattr(method_self, self.attr_fk) return self.to_class.one(fk) ret = property(_inner) return ret def set_reversed_property(self, from_class): """Returns the property in which the backwards relation will be referenced.""" def _inner(method_self): """Inner function to return the property for the backwards relation.""" pk = method_self.primary_key params = {self.attr_fk: pk} return from_class.many(**params) if self.related_name: prop = property(_inner) setattr(self.to_class, self.related_name, prop) self.to_class.backwards_relations.append(self.related_name) class Callback(object): def __init__(self, field_name, function): self.field_name = field_name self.function = function def get_property(self): """Returns the property in which the relation will be referenced.""" def _inner(method_self): """Inner function to return the property for the relation.""" fk = getattr(method_self, self.field_name) return self.function(fk) ret = property(_inner) return ret class WSRelation(Relation): """Adds a relation to another object.""" def __init__(self, attr_fk, to_class, related_name=None, ws_call=None, plural_name=None): """Constructor for the relation class Arguments: attr_fk -- a String with the foreign key attribute name to_class -- the class that the relation makes reference to related_name -- the name of the attribute for the backward relation Default None ws_call -- the name of the webservice command plural_name -- basename of ws_call, only needed if plural_name should be changed. """ self.attr_fk = attr_fk self.to_class = to_class self.related_name = related_name self.ws_call = ws_call self.plural_name = plural_name def get_property(self): """Returns the property in which the relation will be referenced.""" def _ws_inner(method_self): """Inner function to return the property for the relation.""" _klass = self.to_class() self.__plural_name = _klass.__class__.plural_name self.__get_all_cmd = _klass.data_source.get_all_cmd if self.plural_name is not None: plural_name = self.plural_name else: plural_name = method_self.plural_name if not self.ws_call: _klass.data_source.get_all_cmd = self.ws_call ret = _klass.one(getattr(method_self, self.attr_fk)) else: _klass.__class__.plural_name = "/".join( (plural_name, getattr(method_self, self.attr_fk))) _klass.data_source.get_all_cmd = self.ws_call ret = _klass.many() _klass.__class__.plural_name = self.__plural_name _klass.data_source.get_all_cmd = self.__get_all_cmd return ret def _inner(method_self): """Inner function to return the property for the relation.""" fk = getattr(method_self, self.attr_fk) return self.to_class.one(fk) if self.ws_call is not None: ret = property(_ws_inner) else: ret = property(_inner) return ret class OjotaSet(MutableSequence): def __init__(self, ojota_class, data): super(OjotaSet, self).__init__() self._list = list(data) self.ojota_class = ojota_class def __len__(self): return len(self._list) def __getitem__(self, indexes): if isinstance(indexes, slice): list_ = self._list[indexes.start:indexes.stop:indexes.step] ret = OjotaSet(self.ojota_class, list_) else: ret = self.ojota_class(**self._list[indexes]) return ret def __delitem__(self, ii): del self._list[ii] def __setitem__(self, ii, val): raise NotImplementedError def __str__(self): return self.__repr__() def __repr__(self): return "OjotaSet containing %s %s" % (len(self._list), self.ojota_class.plural_name) def insert(self, ii, val): self._list.insert(ii, val) def append(self, val): list_idx = len(self._list) self.insert(list_idx, val) def many(self, **kwargs): elems = self.ojota_class._filter(self._list, kwargs) return OjotaSet(self.ojota_class, elems) def one(self, **kwargs): return self.ojota_class.one(**kwargs) class MetaOjota(type): """Metaclass for Ojota""" def __init__(self, *args, **kwargs): self.relations = {} self.backwards_relations = [] for attr, value in list(self.__dict__.items()): if isinstance(value, Relation): value.set_reversed_property(self) setattr(self, attr, value.get_property()) self.relations[value.attr_fk] = (value.to_class, attr, value) elif isinstance(value, Callback): setattr(self, attr, value.get_property()) return super(MetaOjota, self).__init__(*args, **kwargs) class Ojota(six.with_metaclass(MetaOjota, object)): """Base class to create instances of serialized data in the source files. """ _data_codes = {} plural_name = None data_in_root = True pk_field = "pk" required_fields = None cache = Cache() data_source = JSONSource() default_order = None queryset_type = OjotaSet prefilter = None cache_name = None @property def primary_key(self): """Returns the primary key value.""" return getattr(self, self.pk_field) @classmethod def get_plural_name(cls): if cls.plural_name is None: plural_name = "%ss" % cls.__name__ else: plural_name = cls.plural_name return plural_name def __init__(self, _pk=None, **kwargs): """Constructor.""" self.fields = [] if self.required_fields is None: self.required_fields = [] else: self.required_fields = list(self.required_fields) if self.pk_field not in self.required_fields: self.required_fields.append(self.pk_field) for key in self.required_fields: if key not in kwargs: raise AttributeError("The field '%s' is required" % key) for key, val in list(kwargs.items()): self.fields.append(key) setattr(self, key, val) @classmethod def get_current_data_code(cls): return get_current_data_code() @classmethod def get_cache_name(cls): if cls.cache_name is not None: cache_name = '_cache_' + cls.cache_name else: cache_name = '_cache_' + cls.get_plural_name() if not cls.data_in_root and get_current_data_code(): cache_name += '_' + get_current_data_code() return cache_name @classmethod def _read_all_from_datasource(cls): """Reads the data from the datasource, makes a dictionary with the key specified in the key parameter. Allows to filter by subdirectories when the data is not on the root according to the data path.""" cache_name = cls.get_cache_name() if cache_name not in cls.cache: elements = cls.data_source.fetch_elements(cls) if cls.prefilter is not None: elements_ = cls._filter(list(elements.values()), cls.prefilter) elements = {} for elem in elements_: elements[elem[cls.pk_field]] = elem cls.cache.set(name=cache_name, elems=elements) else: elements = cls.cache.get(cache_name) return elements @classmethod def _read_item_from_datasource(cls, pk): """Reads the data form the datasource if support index search.""" cache_name = cls.get_cache_name() element = cls.data_source.fetch_element(cls, pk) if cache_name in cls.cache: cache = cls.cache.get(cache_name) cache.update(element) cls.cache.set(name=cache_name, elems=cache) else: cache = element return cache[pk] @classmethod def _objetize(cls, data): """Return the data into an element.""" return cls.queryset_type(cls, data) @classmethod def _test_expression(cls, expression, value, element_data): """Finds out if a value in a given field matches an expression. Arguments: expression -- a string with the comparison expression. If the expression is a field name it will be compared with equal. In case that the field has "__" and an operation appended the it is compared with the appended expression. The availiable expressions allowed are: "=", "exact", "iexact", "contains", "icontains", "in", "gt", "gte", "lt", "lte", "startswith", "istartswith", "endswith", "iendswith", "range" and "ne" """ expression_parts = expression.split('__') if len(expression_parts) == 1: field = expression operation = '=' else: field, operation = expression_parts r = True try: if operation in ('=', 'exact'): r = element_data[field] == value elif operation == 'iexact': r = str(element_data[field]).lower() == str(value).lower() elif operation == 'contains': r = value in element_data[field] elif operation == 'icontains': r = str(value).lower() in str(element_data[field]).lower() elif operation == 'in': r = element_data[field] in value elif operation == 'gt': r = element_data[field] > value elif operation == 'gte': r = element_data[field] >= value elif operation == 'lt': r = element_data[field] < value elif operation == 'lte': r = element_data[field] <= value elif operation == 'startswith': r = str(element_data[field]).startswith(str(value)) elif operation == 'istartswith': r = str(element_data[field]).lower().startswith( str(value).lower()) elif operation == 'endswith': r = str(element_data[field]).endswith(str(value)) elif operation == 'iendswith': r = str(element_data[field]).lower().endswith( str(value).lower()) elif operation == 'range': r = value[0] <= element_data[field] <= value[1] elif operation == 'ne': r = element_data[field] != value else: raise AttributeError( "The operation %s does not exist" % operation) except KeyError: r = False # TODO date operations # TODO regex operations return r @classmethod def _filter(cls, data, filters): """Applies filter to data. Arguments: data -- an iterable containing the data filters -- a dictionary with the filters """ filtrados = [] for element_data in data: add = True for expression, value in list(filters.items()): if not cls._test_expression(expression, value, element_data): add = False break if add: filtrados.append(element_data) return filtrados @classmethod def _sort(cls, data_list, order_fields): """Sort a list by a given field or field froups. Arguments: data_list -- a list with the data order_fields -- a string with the order fields """ order_fields = [x.strip() for x in order_fields.split(',')] for order_field in reversed(order_fields): if order_field.startswith('-'): reverse = True order_field = order_field[1:] else: reverse = False def _key_func(item): elem_data = item.get(order_field, "") if elem_data is None: elem_data = "" return elem_data data_list = sorted(data_list, key=_key_func, reverse=reverse) return data_list @classmethod def all(cls): return cls.many() @classmethod def many(cls, **kargs): """Returns all the elements that match the conditions.""" elements = list(cls._read_all_from_datasource().values()) order_fields = cls.default_order if 'sorted' in kargs: order_fields = kargs['sorted'] del kargs['sorted'] if kargs: elements = cls._filter(elements, kargs) if order_fields: elements = cls._sort(elements, order_fields) list_ = cls._objetize(elements) return list_ @classmethod def one(cls, pk=None, **kargs): """Returns the first element that matches the conditions.""" element = None if pk is not None: kargs[cls.pk_field] = pk if list(kargs.keys()) == [cls.pk_field]: pk = kargs[cls.pk_field] if hasattr(cls.data_source, 'get_cmd'): elem = cls._read_item_from_datasource(pk) element = cls._objetize([elem])[0] else: all_elems = cls._read_all_from_datasource() if pk in all_elems: element = cls(**all_elems[pk]) else: result = cls.many(**kargs) if result: if len(result) > 1: raise IndexError("one is returning more than one element") else: element = result[0] return element @classmethod def first(cls, *args, **kwargs): elements = cls.many(*args, **kwargs) if elements is not None and len(elements): return elements[0] def __eq__(self, other): """Compare the equality of two elements.""" same_pk = self.primary_key == other.primary_key same_class = self.__class__ is other.__class__ return same_pk and same_class def __repr__(self): """String representation of the elements.""" return '%s<%s>' % (self.__class__.__name__, self.primary_key) def to_dict(self): return dict([(field, getattr(self, field)) for field in self.fields]) def to_json(self): return dumps(self.to_dict()) def update(self, **kwargs): """Updates the given values.""" for arg, value in list(kwargs.items()): if arg != self.pk_field: if arg not in self.fields: self.fields.append(arg) setattr(self, arg, value) self.dump_values() def dump_values(self, new_data=None, delete=False): """Saves the data into a file.""" elements = self.__class__.many() json_data = [] for element in elements: if element == self: if not delete: data = self.to_dict() else: data = None else: data = element.to_dict() if data is not None: json_data.append(data) if new_data is not None: json_data.append(new_data) self.data_source.save(self.__class__, json_data) cache_name = self.__class__.get_cache_name() self.cache.clear(cache_name) def delete(self): self.dump_values(delete=True) def save(self): """Save function for an object.""" ojota_fields = ("fields", "required_fields", "relations", "backwards_relations") data = self.__dict__ if all([field in list(data.keys()) for field in self.required_fields]): new_data = {} for attr_name, attr_value in list(data.items()): if attr_name not in ojota_fields: self.fields.append(attr_name) new_data[attr_name] = attr_value if self.__class__.one(self.primary_key) is None: self.dump_values(new_data) else: self.update(**new_data) @classmethod def preload(cls): cls.many() class OjotaHierarchy(Ojota): @property def segments(self): return self.primary_key.split(".") @property def root(self): return self.segments[0] @property def last_segment(self): return self.segments[-1] @property def parent(self): if len(self.segments) > 1: return self.one('.'.join(self.segments[:-1])) else: return None def is_parent(self, other): parent_id = '.'.join(self.segments[:-1]) return parent_id == other.primary_key def is_ancestor(self, other): if self.primary_key.startswith(other.primary_key): return True else: return False def is_sibling(self, other): return self.segments[:-1] == other.segments[:-1] def siblings(self): args = {"%s__startswith" % self.pk_field: self.parent.primary_key} elements = self.many(**args) list_ = [] for element in elements: if element.is_parent(self.parent): list_.append(element) return list_ def children(self): args = {"%s__startswith" % self.pk_field: self.primary_key} elements = self.many(**args) list_ = [] for element in elements: if element.is_parent(self): list_.append(element) return list_
lgpl-3.0
-6,583,260,627,071,240,000
32.632619
79
0.556454
false
CloudWareChile/OpenChile
openerp/addons/web_uservoice/__init__.py
9
1038
############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2010-2011 OpenERP s.a. (<http://openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
-2,488,135,235,469,382,000
46.181818
79
0.61079
false
abhishekkrthakur/scikit-learn
benchmarks/bench_20newsgroups.py
377
3555
from __future__ import print_function, division from time import time import argparse import numpy as np from sklearn.dummy import DummyClassifier from sklearn.datasets import fetch_20newsgroups_vectorized from sklearn.metrics import accuracy_score from sklearn.utils.validation import check_array from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import AdaBoostClassifier from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import MultinomialNB ESTIMATORS = { "dummy": DummyClassifier(), "random_forest": RandomForestClassifier(n_estimators=100, max_features="sqrt", min_samples_split=10), "extra_trees": ExtraTreesClassifier(n_estimators=100, max_features="sqrt", min_samples_split=10), "logistic_regression": LogisticRegression(), "naive_bayes": MultinomialNB(), "adaboost": AdaBoostClassifier(n_estimators=10), } ############################################################################### # Data if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('-e', '--estimators', nargs="+", required=True, choices=ESTIMATORS) args = vars(parser.parse_args()) data_train = fetch_20newsgroups_vectorized(subset="train") data_test = fetch_20newsgroups_vectorized(subset="test") X_train = check_array(data_train.data, dtype=np.float32, accept_sparse="csc") X_test = check_array(data_test.data, dtype=np.float32, accept_sparse="csr") y_train = data_train.target y_test = data_test.target print("20 newsgroups") print("=============") print("X_train.shape = {0}".format(X_train.shape)) print("X_train.format = {0}".format(X_train.format)) print("X_train.dtype = {0}".format(X_train.dtype)) print("X_train density = {0}" "".format(X_train.nnz / np.product(X_train.shape))) print("y_train {0}".format(y_train.shape)) print("X_test {0}".format(X_test.shape)) print("X_test.format = {0}".format(X_test.format)) print("X_test.dtype = {0}".format(X_test.dtype)) print("y_test {0}".format(y_test.shape)) print() print("Classifier Training") print("===================") accuracy, train_time, test_time = {}, {}, {} for name in sorted(args["estimators"]): clf = ESTIMATORS[name] try: clf.set_params(random_state=0) except (TypeError, ValueError): pass print("Training %s ... " % name, end="") t0 = time() clf.fit(X_train, y_train) train_time[name] = time() - t0 t0 = time() y_pred = clf.predict(X_test) test_time[name] = time() - t0 accuracy[name] = accuracy_score(y_test, y_pred) print("done") print() print("Classification performance:") print("===========================") print() print("%s %s %s %s" % ("Classifier ", "train-time", "test-time", "Accuracy")) print("-" * 44) for name in sorted(accuracy, key=accuracy.get): print("%s %s %s %s" % (name.ljust(16), ("%.4fs" % train_time[name]).center(10), ("%.4fs" % test_time[name]).center(10), ("%.4f" % accuracy[name]).center(10))) print()
bsd-3-clause
4,156,086,192,375,215,000
35.649485
79
0.562025
false
Tesora/tesora-tempest
tempest/lib/api_schema/response/compute/v2_1/security_groups.py
39
3473
# Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. common_security_group_rule = { 'from_port': {'type': ['integer', 'null']}, 'to_port': {'type': ['integer', 'null']}, 'group': { 'type': 'object', 'properties': { 'tenant_id': {'type': 'string'}, 'name': {'type': 'string'} }, 'additionalProperties': False, }, 'ip_protocol': {'type': ['string', 'null']}, # 'parent_group_id' can be UUID so defining it as 'string' also. 'parent_group_id': {'type': ['string', 'integer', 'null']}, 'ip_range': { 'type': 'object', 'properties': { 'cidr': {'type': 'string'} }, 'additionalProperties': False, # When optional argument is provided in request body # like 'group_id' then, attribute 'cidr' does not # comes in response body. So it is not 'required'. }, 'id': {'type': ['string', 'integer']} } common_security_group = { 'type': 'object', 'properties': { 'id': {'type': ['integer', 'string']}, 'name': {'type': 'string'}, 'tenant_id': {'type': 'string'}, 'rules': { 'type': 'array', 'items': { 'type': ['object', 'null'], 'properties': common_security_group_rule, 'additionalProperties': False, } }, 'description': {'type': 'string'}, }, 'additionalProperties': False, 'required': ['id', 'name', 'tenant_id', 'rules', 'description'], } list_security_groups = { 'status_code': [200], 'response_body': { 'type': 'object', 'properties': { 'security_groups': { 'type': 'array', 'items': common_security_group } }, 'additionalProperties': False, 'required': ['security_groups'] } } get_security_group = create_security_group = update_security_group = { 'status_code': [200], 'response_body': { 'type': 'object', 'properties': { 'security_group': common_security_group }, 'additionalProperties': False, 'required': ['security_group'] } } delete_security_group = { 'status_code': [202] } create_security_group_rule = { 'status_code': [200], 'response_body': { 'type': 'object', 'properties': { 'security_group_rule': { 'type': 'object', 'properties': common_security_group_rule, 'additionalProperties': False, 'required': ['from_port', 'to_port', 'group', 'ip_protocol', 'parent_group_id', 'id', 'ip_range'] } }, 'additionalProperties': False, 'required': ['security_group_rule'] } } delete_security_group_rule = { 'status_code': [202] }
apache-2.0
5,233,262,825,089,575,000
29.734513
78
0.530089
false
EvanK/ansible
lib/ansible/modules/files/assemble.py
12
8528
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2012, Stephen Fromm <[email protected]> # Copyright: (c) 2016, Toshio Kuratomi <[email protected]> # Copyright: (c) 2017, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['stableinterface'], 'supported_by': 'core'} DOCUMENTATION = r''' --- module: assemble short_description: Assemble configuration files from fragments description: - Assembles a configuration file from fragments. - Often a particular program will take a single configuration file and does not support a C(conf.d) style structure where it is easy to build up the configuration from multiple sources. C(assemble) will take a directory of files that can be local or have already been transferred to the system, and concatenate them together to produce a destination file. - Files are assembled in string sorting order. - Puppet calls this idea I(fragments). - This module is also supported for Windows targets. notes: - This module is also supported for Windows targets. version_added: '0.5' options: src: description: - An already existing directory full of source files. type: path required: true dest: description: - A file to create using the concatenation of all of the source files. type: path required: true backup: description: - Create a backup file (if C(yes)), including the timestamp information so you can get the original file back if you somehow clobbered it incorrectly. type: bool default: no delimiter: description: - A delimiter to separate the file contents. type: str version_added: '1.4' remote_src: description: - If C(no), it will search for src at originating/master machine. - If C(yes), it will go to the remote/target machine for the src. type: bool default: no version_added: '1.4' regexp: description: - Assemble files only if C(regex) matches the filename. - If not set, all files are assembled. - Every "\" (backslash) must be escaped as "\\" to comply to YAML syntax. - Uses L(Python regular expressions,http://docs.python.org/2/library/re.html). type: str ignore_hidden: description: - A boolean that controls if files that start with a '.' will be included or not. type: bool default: no version_added: '2.0' validate: description: - The validation command to run before copying into place. - The path to the file to validate is passed in via '%s' which must be present as in the sshd example below. - The command is passed securely so shell features like expansion and pipes won't work. type: str version_added: '2.0' seealso: - module: copy - module: template - module: win_copy author: - Stephen Fromm (@sfromm) extends_documentation_fragment: - decrypt - files ''' EXAMPLES = r''' - name: Assemble from fragments from a directory assemble: src: /etc/someapp/fragments dest: /etc/someapp/someapp.conf - name: Inserted provided delimiter in between each fragment assemble: src: /etc/someapp/fragments dest: /etc/someapp/someapp.conf delimiter: '### START FRAGMENT ###' - name: Assemble a new "sshd_config" file into place, after passing validation with sshd assemble: src: /etc/ssh/conf.d/ dest: /etc/ssh/sshd_config validate: /usr/sbin/sshd -t -f %s ''' import codecs import os import re import tempfile from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.six import b from ansible.module_utils._text import to_native def assemble_from_fragments(src_path, delimiter=None, compiled_regexp=None, ignore_hidden=False, tmpdir=None): ''' assemble a file from a directory of fragments ''' tmpfd, temp_path = tempfile.mkstemp(dir=tmpdir) tmp = os.fdopen(tmpfd, 'wb') delimit_me = False add_newline = False for f in sorted(os.listdir(src_path)): if compiled_regexp and not compiled_regexp.search(f): continue fragment = os.path.join(src_path, f) if not os.path.isfile(fragment) or (ignore_hidden and os.path.basename(fragment).startswith('.')): continue with open(fragment, 'rb') as fragment_fh: fragment_content = fragment_fh.read() # always put a newline between fragments if the previous fragment didn't end with a newline. if add_newline: tmp.write(b('\n')) # delimiters should only appear between fragments if delimit_me: if delimiter: # un-escape anything like newlines delimiter = codecs.escape_decode(delimiter)[0] tmp.write(delimiter) # always make sure there's a newline after the # delimiter, so lines don't run together if delimiter[-1] != b('\n'): tmp.write(b('\n')) tmp.write(fragment_content) delimit_me = True if fragment_content.endswith(b('\n')): add_newline = False else: add_newline = True tmp.close() return temp_path def cleanup(path, result=None): # cleanup just in case if os.path.exists(path): try: os.remove(path) except (IOError, OSError) as e: # don't error on possible race conditions, but keep warning if result is not None: result['warnings'] = ['Unable to remove temp file (%s): %s' % (path, to_native(e))] def main(): module = AnsibleModule( # not checking because of daisy chain to file module argument_spec=dict( src=dict(type='path', required=True), delimiter=dict(type='str'), dest=dict(type='path', required=True), backup=dict(type='bool', default=False), remote_src=dict(type='bool', default=False), regexp=dict(type='str'), ignore_hidden=dict(type='bool', default=False), validate=dict(type='str'), ), add_file_common_args=True, ) changed = False path_hash = None dest_hash = None src = module.params['src'] dest = module.params['dest'] backup = module.params['backup'] delimiter = module.params['delimiter'] regexp = module.params['regexp'] compiled_regexp = None ignore_hidden = module.params['ignore_hidden'] validate = module.params.get('validate', None) result = dict(src=src, dest=dest) if not os.path.exists(src): module.fail_json(msg="Source (%s) does not exist" % src) if not os.path.isdir(src): module.fail_json(msg="Source (%s) is not a directory" % src) if regexp is not None: try: compiled_regexp = re.compile(regexp) except re.error as e: module.fail_json(msg="Invalid Regexp (%s) in \"%s\"" % (to_native(e), regexp)) if validate and "%s" not in validate: module.fail_json(msg="validate must contain %%s: %s" % validate) path = assemble_from_fragments(src, delimiter, compiled_regexp, ignore_hidden, module.tmpdir) path_hash = module.sha1(path) result['checksum'] = path_hash # Backwards compat. This won't return data if FIPS mode is active try: pathmd5 = module.md5(path) except ValueError: pathmd5 = None result['md5sum'] = pathmd5 if os.path.exists(dest): dest_hash = module.sha1(dest) if path_hash != dest_hash: if validate: (rc, out, err) = module.run_command(validate % path) result['validation'] = dict(rc=rc, stdout=out, stderr=err) if rc != 0: cleanup(path) module.fail_json(msg="failed to validate: rc:%s error:%s" % (rc, err)) if backup and dest_hash is not None: result['backup_file'] = module.backup_local(dest) module.atomic_move(path, dest, unsafe_writes=module.params['unsafe_writes']) changed = True cleanup(path, result) # handle file permissions file_args = module.load_file_common_arguments(module.params) result['changed'] = module.set_fs_attributes_if_different(file_args, changed) # Mission complete result['msg'] = "OK" module.exit_json(**result) if __name__ == '__main__': main()
gpl-3.0
-4,841,225,055,742,347,000
31.8
112
0.640596
false
lupyuen/RaspberryPiImage
home/pi/GrovePi/Software/Python/others/temboo/Library/Amazon/SQS/AddPermission.py
4
5495
# -*- coding: utf-8 -*- ############################################################################### # # AddPermission # Adds a permission to a queue for a specific principal user. # # Python versions 2.6, 2.7, 3.x # # Copyright 2014, Temboo Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, # either express or implied. See the License for the specific # language governing permissions and limitations under the License. # # ############################################################################### from temboo.core.choreography import Choreography from temboo.core.choreography import InputSet from temboo.core.choreography import ResultSet from temboo.core.choreography import ChoreographyExecution import json class AddPermission(Choreography): def __init__(self, temboo_session): """ Create a new instance of the AddPermission Choreo. A TembooSession object, containing a valid set of Temboo credentials, must be supplied. """ super(AddPermission, self).__init__(temboo_session, '/Library/Amazon/SQS/AddPermission') def new_input_set(self): return AddPermissionInputSet() def _make_result_set(self, result, path): return AddPermissionResultSet(result, path) def _make_execution(self, session, exec_id, path): return AddPermissionChoreographyExecution(session, exec_id, path) class AddPermissionInputSet(InputSet): """ An InputSet with methods appropriate for specifying the inputs to the AddPermission Choreo. The InputSet object is used to specify input parameters when executing this Choreo. """ def set_AWSAccessKeyId(self, value): """ Set the value of the AWSAccessKeyId input for this Choreo. ((required, string) The Access Key ID provided by Amazon Web Services.) """ super(AddPermissionInputSet, self)._set_input('AWSAccessKeyId', value) def set_AWSAccountId1(self, value): """ Set the value of the AWSAccountId1 input for this Choreo. ((required, integer) The AWS account number of the user that will be granted access to a specified action. Enter account number omitting any dashes.) """ super(AddPermissionInputSet, self)._set_input('AWSAccountId1', value) def set_AWSAccountId(self, value): """ Set the value of the AWSAccountId input for this Choreo. ((required, integer) The AWS account number of the queue owner. Enter account number omitting any dashes.) """ super(AddPermissionInputSet, self)._set_input('AWSAccountId', value) def set_AWSSecretKeyId(self, value): """ Set the value of the AWSSecretKeyId input for this Choreo. ((required, string) The Secret Key ID provided by Amazon Web Services.) """ super(AddPermissionInputSet, self)._set_input('AWSSecretKeyId', value) def set_ActionName(self, value): """ Set the value of the ActionName input for this Choreo. ((required, string) The action to allow for a specified user. Valid values: SendMessage, ReceiveMessage, DeleteMessage,ChangeMessageVisibility, GetQueueAttributes.) """ super(AddPermissionInputSet, self)._set_input('ActionName', value) def set_Label(self, value): """ Set the value of the Label input for this Choreo. ((required, string) The unique identifier for the new permission that is being set.) """ super(AddPermissionInputSet, self)._set_input('Label', value) def set_QueueName(self, value): """ Set the value of the QueueName input for this Choreo. ((required, string) The name of the queue that you're granting access to.) """ super(AddPermissionInputSet, self)._set_input('QueueName', value) def set_ResponseFormat(self, value): """ Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are "xml" (the default) and "json".) """ super(AddPermissionInputSet, self)._set_input('ResponseFormat', value) def set_UserRegion(self, value): """ Set the value of the UserRegion input for this Choreo. ((optional, string) The AWS region that corresponds to the SQS endpoint you wish to access. The default region is "us-east-1". See description below for valid values.) """ super(AddPermissionInputSet, self)._set_input('UserRegion', value) class AddPermissionResultSet(ResultSet): """ A ResultSet with methods tailored to the values returned by the AddPermission Choreo. The ResultSet object is used to retrieve the results of a Choreo execution. """ def getJSONFromString(self, str): return json.loads(str) def get_Response(self): """ Retrieve the value for the "Response" output from this Choreo execution. (The response from Amazon.) """ return self._output.get('Response', None) class AddPermissionChoreographyExecution(ChoreographyExecution): def _make_result_set(self, response, path): return AddPermissionResultSet(response, path)
apache-2.0
-732,031,863,467,823,600
44.040984
230
0.679345
false
nerzhul/ansible
lib/ansible/utils/module_docs_fragments/junos.py
40
3023
# # (c) 2015, Peter Sprygada <[email protected]> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. class ModuleDocFragment(object): # Standard files documentation fragment DOCUMENTATION = """ options: host: description: - Specifies the DNS host name or address for connecting to the remote device over the specified transport. The value of host is used as the destination address for the transport. required: true port: description: - Specifies the port to use when building the connection to the remote device. The port value will default to the well known SSH port of 22 (for C(transport=cli)) or port 830 (for C(transport=netconf)) device. required: false default: 22 username: description: - Configures the username to use to authenticate the connection to the remote device. This value is used to authenticate the SSH session. If the value is not specified in the task, the value of environment variable C(ANSIBLE_NET_USERNAME) will be used instead. required: false password: description: - Specifies the password to use to authenticate the connection to the remote device. This value is used to authenticate the SSH session. If the value is not specified in the task, the value of environment variable C(ANSIBLE_NET_PASSWORD) will be used instead. required: false default: null timeout: description: - Specifies the timeout in seconds for communicating with the network device for either connecting or sending commands. If the timeout is exceeded before the operation is completed, the module will error. require: false default: 10 ssh_keyfile: description: - Specifies the SSH key to use to authenticate the connection to the remote device. This value is the path to the key used to authenticate the SSH session. If the value is not specified in the task, the value of environment variable C(ANSIBLE_NET_SSH_KEYFILE) will be used instead. required: false provider: description: - Convenience method that allows all I(junos) arguments to be passed as a dict object. All constraints (required, choices, etc) must be met either by individual arguments or values in this dict. required: false default: null """
gpl-3.0
1,338,705,183,339,291,000
38.25974
83
0.709229
false
gerrive/horizon
openstack_dashboard/dashboards/project/stacks/tests.py
6
40882
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import re from django.conf import settings from django.core import exceptions from django.core.urlresolvers import reverse from django import http from django.test.utils import override_settings # noqa from django.utils import html from mox3.mox import IsA # noqa import six from heatclient.common import template_format as hc_format from openstack_dashboard import api from openstack_dashboard.test import helpers as test from openstack_dashboard.dashboards.project.stacks import forms from openstack_dashboard.dashboards.project.stacks import mappings from openstack_dashboard.dashboards.project.stacks import tables INDEX_URL = reverse('horizon:project:stacks:index') DETAIL_URL = 'horizon:project:stacks:detail' class MockResource(object): def __init__(self, resource_type, physical_resource_id): self.resource_type = resource_type self.physical_resource_id = physical_resource_id class MappingsTests(test.TestCase): def test_mappings(self): def assertMappingUrl(url, resource_type, physical_resource_id): mock = MockResource(resource_type, physical_resource_id) mock_url = mappings.resource_to_url(mock) self.assertEqual(url, mock_url) assertMappingUrl( '/project/networks/subnets/aaa/detail', 'OS::Neutron::Subnet', 'aaa') assertMappingUrl( None, 'OS::Neutron::Subnet', None) assertMappingUrl( None, None, None) assertMappingUrl( None, 'AWS::AutoScaling::LaunchConfiguration', 'aaa') assertMappingUrl( '/project/instances/aaa/', 'AWS::EC2::Instance', 'aaa') assertMappingUrl( '/project/containers/aaa/', 'OS::Swift::Container', 'aaa') assertMappingUrl( None, 'Foo::Bar::Baz', 'aaa') assertMappingUrl( '/project/instances/aaa/', 'OS::Nova::Server', 'aaa') assertMappingUrl( '/project/stacks/stack/aaa/', 'OS::Heat::ResourceGroup', 'aaa') def test_stack_output(self): self.assertEqual(u'<pre>foo</pre>', mappings.stack_output('foo')) self.assertEqual(u'', mappings.stack_output(None)) outputs = ['one', 'two', 'three'] # On Python 3, the pretty JSON output doesn't add space before newline if six.PY3: expected_text = """[\n "one",\n "two",\n "three"\n]""" else: expected_text = """[\n "one", \n "two", \n "three"\n]""" self.assertEqual(u'<pre>%s</pre>' % html.escape(expected_text), mappings.stack_output(outputs)) outputs = {'foo': 'bar'} expected_text = """{\n "foo": "bar"\n}""" self.assertEqual(u'<pre>%s</pre>' % html.escape(expected_text), mappings.stack_output(outputs)) self.assertEqual( u'<a href="http://www.example.com/foo" target="_blank">' 'http://www.example.com/foo</a>', mappings.stack_output('http://www.example.com/foo')) class StackTests(test.TestCase): @override_settings(API_RESULT_PAGE_SIZE=2) @test.create_stubs({api.heat: ('stacks_list',)}) def test_index_paginated(self): stacks = self.stacks.list()[:5] api.heat.stacks_list(IsA(http.HttpRequest), marker=None, paginate=True, sort_dir='desc') \ .AndReturn([stacks, True, True]) api.heat.stacks_list(IsA(http.HttpRequest), marker=None, paginate=True, sort_dir='desc') \ .AndReturn([stacks[:2], True, True]) api.heat.stacks_list(IsA(http.HttpRequest), marker=stacks[2].id, paginate=True, sort_dir='desc') \ .AndReturn([stacks[2:4], True, True]) api.heat.stacks_list(IsA(http.HttpRequest), marker=stacks[4].id, paginate=True, sort_dir='desc') \ .AndReturn([stacks[4:], True, True]) self.mox.ReplayAll() url = reverse('horizon:project:stacks:index') res = self.client.get(url) # get all self.assertEqual(len(res.context['stacks_table'].data), len(stacks)) self.assertTemplateUsed(res, 'project/stacks/index.html') res = self.client.get(url) # get first page with 2 items self.assertEqual(len(res.context['stacks_table'].data), settings.API_RESULT_PAGE_SIZE) url = "%s?%s=%s" % (reverse('horizon:project:stacks:index'), tables.StacksTable._meta.pagination_param, stacks[2].id) res = self.client.get(url) # get second page (items 2-4) self.assertEqual(len(res.context['stacks_table'].data), settings.API_RESULT_PAGE_SIZE) url = "%s?%s=%s" % (reverse('horizon:project:stacks:index'), tables.StacksTable._meta.pagination_param, stacks[4].id) res = self.client.get(url) # get third page (item 5) self.assertEqual(len(res.context['stacks_table'].data), 1) @override_settings(API_RESULT_PAGE_SIZE=2) @test.create_stubs({api.heat: ('stacks_list',)}) def test_index_prev_paginated(self): stacks = self.stacks.list()[:3] api.heat.stacks_list(IsA(http.HttpRequest), marker=None, paginate=True, sort_dir='desc') \ .AndReturn([stacks, True, False]) api.heat.stacks_list(IsA(http.HttpRequest), marker=None, paginate=True, sort_dir='desc') \ .AndReturn([stacks[:2], True, True]) api.heat.stacks_list(IsA(http.HttpRequest), marker=stacks[2].id, paginate=True, sort_dir='desc') \ .AndReturn([stacks[2:], True, True]) api.heat.stacks_list(IsA(http.HttpRequest), marker=stacks[2].id, paginate=True, sort_dir='asc') \ .AndReturn([stacks[:2], True, True]) self.mox.ReplayAll() url = reverse('horizon:project:stacks:index') res = self.client.get(url) # get all self.assertEqual(len(res.context['stacks_table'].data), len(stacks)) self.assertTemplateUsed(res, 'project/stacks/index.html') res = self.client.get(url) # get first page with 2 items self.assertEqual(len(res.context['stacks_table'].data), settings.API_RESULT_PAGE_SIZE) url = "%s?%s=%s" % (reverse('horizon:project:stacks:index'), tables.StacksTable._meta.pagination_param, stacks[2].id) res = self.client.get(url) # get second page (item 3) self.assertEqual(len(res.context['stacks_table'].data), 1) url = "%s?%s=%s" % (reverse('horizon:project:stacks:index'), tables.StacksTable._meta.prev_pagination_param, stacks[2].id) res = self.client.get(url) # prev back to get first page with 2 pages self.assertEqual(len(res.context['stacks_table'].data), settings.API_RESULT_PAGE_SIZE) @test.create_stubs({api.heat: ('stack_create', 'template_validate'), api.neutron: ('network_list_for_tenant', )}) def test_launch_stack(self): template = self.stack_templates.first() stack = self.stacks.first() api.heat.template_validate(IsA(http.HttpRequest), files={}, template=hc_format.parse(template.data)) \ .AndReturn(json.loads(template.validate)) api.heat.stack_create(IsA(http.HttpRequest), stack_name=stack.stack_name, timeout_mins=60, disable_rollback=True, template=None, parameters=IsA(dict), password='password', files=None) api.neutron.network_list_for_tenant(IsA(http.HttpRequest), self.tenant.id) \ .AndReturn(self.networks.list()) api.neutron.network_list_for_tenant(IsA(http.HttpRequest), self.tenant.id) \ .AndReturn(self.networks.list()) self.mox.ReplayAll() url = reverse('horizon:project:stacks:select_template') res = self.client.get(url) self.assertTemplateUsed(res, 'project/stacks/select_template.html') form_data = {'template_source': 'raw', 'template_data': template.data, 'method': forms.TemplateForm.__name__} res = self.client.post(url, form_data) self.assertTemplateUsed(res, 'project/stacks/create.html') url = reverse('horizon:project:stacks:launch') form_data = {'template_source': 'raw', 'template_data': template.data, 'password': 'password', 'parameters': template.validate, 'stack_name': stack.stack_name, "timeout_mins": 60, "disable_rollback": True, "__param_DBUsername": "admin", "__param_LinuxDistribution": "F17", "__param_InstanceType": "m1.small", "__param_KeyName": "test", "__param_DBPassword": "admin", "__param_DBRootPassword": "admin", "__param_DBName": "wordpress", "__param_Network": self.networks.list()[0]['id'], 'method': forms.CreateStackForm.__name__} res = self.client.post(url, form_data) self.assertRedirectsNoFollow(res, INDEX_URL) @test.create_stubs({api.heat: ('stack_create', 'template_validate'), api.neutron: ('network_list_for_tenant', )}) def test_launch_stack_with_environment(self): template = self.stack_templates.first() environment = self.stack_environments.first() stack = self.stacks.first() api.heat.template_validate(IsA(http.HttpRequest), files={}, template=hc_format.parse(template.data), environment=environment.data) \ .AndReturn(json.loads(template.validate)) api.heat.stack_create(IsA(http.HttpRequest), stack_name=stack.stack_name, timeout_mins=60, disable_rollback=True, template=None, environment=environment.data, parameters=IsA(dict), password='password', files=None) api.neutron.network_list_for_tenant(IsA(http.HttpRequest), self.tenant.id) \ .AndReturn(self.networks.list()) api.neutron.network_list_for_tenant(IsA(http.HttpRequest), self.tenant.id) \ .AndReturn(self.networks.list()) self.mox.ReplayAll() url = reverse('horizon:project:stacks:select_template') res = self.client.get(url) self.assertTemplateUsed(res, 'project/stacks/select_template.html') form_data = {'template_source': 'raw', 'template_data': template.data, 'environment_source': 'raw', 'environment_data': environment.data, 'method': forms.TemplateForm.__name__} res = self.client.post(url, form_data) self.assertTemplateUsed(res, 'project/stacks/create.html') url = reverse('horizon:project:stacks:launch') form_data = {'template_source': 'raw', 'template_data': template.data, 'environment_source': 'raw', 'environment_data': environment.data, 'password': 'password', 'parameters': template.validate, 'stack_name': stack.stack_name, "timeout_mins": 60, "disable_rollback": True, "__param_DBUsername": "admin", "__param_LinuxDistribution": "F17", "__param_InstanceType": "m1.small", "__param_KeyName": "test", "__param_DBPassword": "admin", "__param_DBRootPassword": "admin", "__param_DBName": "wordpress", "__param_Network": self.networks.list()[0]['id'], 'method': forms.CreateStackForm.__name__} res = self.client.post(url, form_data) self.assertRedirectsNoFollow(res, INDEX_URL) @test.create_stubs({api.heat: ('template_validate',)}) def test_launch_stack_with_hidden_parameters(self): template = { 'data': ('heat_template_version: 2013-05-23\n' 'parameters:\n' ' public_string:\n' ' type: string\n' ' secret_string:\n' ' type: string\n' ' hidden: true\n'), 'validate': { 'Description': 'No description', 'Parameters': { 'public_string': { 'Label': 'public_string', 'Description': '', 'Type': 'String', 'NoEcho': 'false' }, 'secret_string': { 'Label': 'secret_string', 'Description': '', 'Type': 'String', 'NoEcho': 'true' } } } } api.heat.template_validate(IsA(http.HttpRequest), files={}, template=hc_format.parse(template['data'])) \ .AndReturn(template['validate']) self.mox.ReplayAll() url = reverse('horizon:project:stacks:select_template') res = self.client.get(url) self.assertTemplateUsed(res, 'project/stacks/select_template.html') form_data = {'template_source': 'raw', 'template_data': template['data'], 'method': forms.TemplateForm.__name__} res = self.client.post(url, form_data) self.assertTemplateUsed(res, 'project/stacks/create.html') # ensure the fields were rendered correctly self.assertContains(res, '<input class="form-control" ' 'id="id___param_public_string" ' 'name="__param_public_string" ' 'type="text" />', html=True) self.assertContains(res, '<input class="form-control" ' 'id="id___param_secret_string" ' 'name="__param_secret_string" ' 'type="password" />', html=True) @test.create_stubs({api.heat: ('template_validate',)}) def test_launch_stack_with_parameter_group(self): template = { 'data': ('heat_template_version: 2013-05-23\n' 'parameters:\n' ' last_param:\n' ' type: string\n' ' first_param:\n' ' type: string\n' ' middle_param:\n' ' type: string\n' 'parameter_groups:\n' '- parameters:\n' ' - first_param\n' ' - middle_param\n' ' - last_param\n'), 'validate': { 'Description': 'No description', 'Parameters': { 'last_param': { 'Label': 'last_param', 'Description': '', 'Type': 'String', 'NoEcho': 'false' }, 'first_param': { 'Label': 'first_param', 'Description': '', 'Type': 'String', 'NoEcho': 'false' }, 'middle_param': { 'Label': 'middle_param', 'Description': '', 'Type': 'String', 'NoEcho': 'true' } }, 'ParameterGroups': [ { 'parameters': [ 'first_param', 'middle_param', 'last_param' ] } ] } } api.heat.template_validate(IsA(http.HttpRequest), files={}, template=hc_format.parse(template['data'])) \ .AndReturn(template['validate']) self.mox.ReplayAll() url = reverse('horizon:project:stacks:select_template') res = self.client.get(url) self.assertTemplateUsed(res, 'project/stacks/select_template.html') form_data = {'template_source': 'raw', 'template_data': template['data'], 'method': forms.TemplateForm.__name__} res = self.client.post(url, form_data) self.assertTemplateUsed(res, 'project/stacks/create.html') # ensure the fields were rendered in the correct order regex = re.compile('^.*>first_param<.*>middle_param<.*>last_param<.*$', flags=re.DOTALL) self.assertRegexpMatches(res.content.decode('utf-8'), regex) @test.create_stubs({api.heat: ('stack_create', 'template_validate')}) def test_launch_stack_parameter_types(self): template = { 'data': ('heat_template_version: 2013-05-23\n' 'parameters:\n' ' param1:\n' ' type: string\n' ' param2:\n' ' type: number\n' ' param3:\n' ' type: json\n' ' param4:\n' ' type: comma_delimited_list\n' ' param5:\n' ' type: boolean\n'), 'validate': { "Description": "No description", "Parameters": { "param1": { "Type": "String", "NoEcho": "false", "Description": "", "Label": "param1" }, "param2": { "Type": "Number", "NoEcho": "false", "Description": "", "Label": "param2" }, "param3": { "Type": "Json", "NoEcho": "false", "Description": "", "Label": "param3" }, "param4": { "Type": "CommaDelimitedList", "NoEcho": "false", "Description": "", "Label": "param4" }, "param5": { "Type": "Boolean", "NoEcho": "false", "Description": "", "Label": "param5" } } } } stack = self.stacks.first() api.heat.template_validate(IsA(http.HttpRequest), files={}, template=hc_format.parse(template['data'])) \ .AndReturn(template['validate']) api.heat.stack_create(IsA(http.HttpRequest), stack_name=stack.stack_name, timeout_mins=60, disable_rollback=True, template=hc_format.parse(template['data']), parameters={'param1': 'some string', 'param2': 42, 'param3': '{"key": "value"}', 'param4': 'a,b,c', 'param5': True}, password='password', files={}) self.mox.ReplayAll() url = reverse('horizon:project:stacks:select_template') res = self.client.get(url) self.assertTemplateUsed(res, 'project/stacks/select_template.html') form_data = {'template_source': 'raw', 'template_data': template['data'], 'method': forms.TemplateForm.__name__} res = self.client.post(url, form_data) self.assertTemplateUsed(res, 'project/stacks/create.html') # ensure the fields were rendered correctly self.assertContains(res, '<input class="form-control" ' 'id="id___param_param1" ' 'name="__param_param1" ' 'type="text" />', html=True) self.assertContains(res, '<input class="form-control" ' 'id="id___param_param2" ' 'name="__param_param2" ' 'type="number" />', html=True) self.assertContains(res, '<input class="form-control" ' 'id="id___param_param3" ' 'name="__param_param3" ' 'type="text" />', html=True) self.assertContains(res, '<input class="form-control" ' 'id="id___param_param4" ' 'name="__param_param4" ' 'type="text" />', html=True) self.assertContains(res, '<input id="id___param_param5" ' 'name="__param_param5" ' 'type="checkbox" />', html=True) # post some sample data and make sure it validates url = reverse('horizon:project:stacks:launch') form_data = {'template_source': 'raw', 'template_data': template['data'], 'password': 'password', 'parameters': json.dumps(template['validate']), 'stack_name': stack.stack_name, "timeout_mins": 60, "disable_rollback": True, "__param_param1": "some string", "__param_param2": 42, "__param_param3": '{"key": "value"}', "__param_param4": "a,b,c", "__param_param5": True, 'method': forms.CreateStackForm.__name__} res = self.client.post(url, form_data) self.assertRedirectsNoFollow(res, INDEX_URL) @test.create_stubs({api.heat: ('stack_update', 'stack_get', 'template_get', 'template_validate'), api.neutron: ('network_list_for_tenant', )}) def test_edit_stack_template(self): template = self.stack_templates.first() stack = self.stacks.first() # GET to template form api.heat.stack_get(IsA(http.HttpRequest), stack.id).AndReturn(stack) # POST template form, validation api.heat.template_validate(IsA(http.HttpRequest), files={}, template=hc_format.parse(template.data)) \ .AndReturn(json.loads(template.validate)) # GET to edit form api.heat.stack_get(IsA(http.HttpRequest), stack.id).AndReturn(stack) api.heat.template_get(IsA(http.HttpRequest), stack.id) \ .AndReturn(json.loads(template.validate)) # POST to edit form api.heat.stack_get(IsA(http.HttpRequest), stack.id).AndReturn(stack) fields = { 'stack_name': stack.stack_name, 'disable_rollback': True, 'timeout_mins': 61, 'password': 'password', 'template': None, 'parameters': IsA(dict), 'files': None } api.heat.stack_update(IsA(http.HttpRequest), stack_id=stack.id, **fields) api.neutron.network_list_for_tenant(IsA(http.HttpRequest), self.tenant.id) \ .AndReturn(self.networks.list()) self.mox.ReplayAll() url = reverse('horizon:project:stacks:change_template', args=[stack.id]) res = self.client.get(url) self.assertTemplateUsed(res, 'project/stacks/change_template.html') form_data = {'template_source': 'raw', 'template_data': template.data, 'method': forms.ChangeTemplateForm.__name__} res = self.client.post(url, form_data) url = reverse('horizon:project:stacks:edit_stack', args=[stack.id, ]) form_data = {'template_source': 'raw', 'template_data': template.data, 'password': 'password', 'parameters': template.validate, 'stack_name': stack.stack_name, 'stack_id': stack.id, "timeout_mins": 61, "disable_rollback": True, "__param_DBUsername": "admin", "__param_LinuxDistribution": "F17", "__param_InstanceType": "m1.small", "__param_KeyName": "test", "__param_DBPassword": "admin", "__param_DBRootPassword": "admin", "__param_DBName": "wordpress", "__param_Network": self.networks.list()[0]['id'], 'method': forms.EditStackForm.__name__} res = self.client.post(url, form_data) self.assertRedirectsNoFollow(res, INDEX_URL) def test_launch_stack_form_invalid_name_digit(self): self._test_launch_stack_invalid_name('2_StartWithDigit') def test_launch_stack_form_invalid_name_underscore(self): self._test_launch_stack_invalid_name('_StartWithUnderscore') def test_launch_stack_form_invalid_name_point(self): self._test_launch_stack_invalid_name('.StartWithPoint') @test.create_stubs({api.neutron: ('network_list_for_tenant', )}) def _test_launch_stack_invalid_name(self, name): api.neutron.network_list_for_tenant(IsA(http.HttpRequest), self.tenant.id) \ .AndReturn(self.networks.list()) self.mox.ReplayAll() template = self.stack_templates.first() url = reverse('horizon:project:stacks:launch') form_data = {'template_source': 'raw', 'template_data': template.data, 'password': 'password', 'parameters': template.validate, 'stack_name': name, "timeout_mins": 60, "disable_rollback": True, "__param_DBUsername": "admin", "__param_LinuxDistribution": "F17", "__param_InstanceType": "m1.small", "__param_KeyName": "test", "__param_DBPassword": "admin", "__param_DBRootPassword": "admin", "__param_DBName": "wordpress", "__param_Network": self.networks.list()[0]['id'], 'method': forms.CreateStackForm.__name__} res = self.client.post(url, form_data) error = ('Name must start with a letter and may only contain letters, ' 'numbers, underscores, periods and hyphens.') self.assertFormErrors(res, 1) self.assertFormError(res, "form", 'stack_name', error) def _test_stack_action(self, action): stack = self.stacks.first() api.heat.stacks_list(IsA(http.HttpRequest), marker=None, paginate=True, sort_dir='desc') \ .AndReturn([self.stacks.list(), True, True]) getattr(api.heat, 'action_%s' % action)(IsA(http.HttpRequest), stack.id).AndReturn(stack) self.mox.ReplayAll() form_data = {"action": "stacks__%s__%s" % (action, stack.id)} res = self.client.post(INDEX_URL, form_data) self.assertNoFormErrors(res) self.assertRedirectsNoFollow(res, INDEX_URL) @test.create_stubs({api.heat: ('stacks_list', 'action_check',)}) def test_check_stack(self): self._test_stack_action('check') @test.create_stubs({api.heat: ('stacks_list', 'action_suspend',)}) def test_suspend_stack(self): self._test_stack_action('suspend') @test.create_stubs({api.heat: ('stacks_list', 'action_resume',)}) def test_resume_stack(self): self._test_stack_action('resume') @test.create_stubs({api.heat: ('stack_preview', 'template_validate')}) def test_preview_stack(self): template = self.stack_templates.first() stack = self.stacks.first() api.heat.template_validate(IsA(http.HttpRequest), files={}, template=hc_format.parse(template.data)) \ .AndReturn(json.loads(template.validate)) api.heat.stack_preview(IsA(http.HttpRequest), stack_name=stack.stack_name, timeout_mins=60, disable_rollback=True, template=None, parameters=IsA(dict), files=None).AndReturn(stack) self.mox.ReplayAll() url = reverse('horizon:project:stacks:preview_template') res = self.client.get(url) self.assertTemplateUsed(res, 'project/stacks/preview_template.html') form_data = {'template_source': 'raw', 'template_data': template.data, 'method': forms.PreviewTemplateForm.__name__} res = self.client.post(url, form_data) self.assertTemplateUsed(res, 'project/stacks/preview.html') url = reverse('horizon:project:stacks:preview') form_data = {'template_source': 'raw', 'template_data': template.data, 'parameters': template.validate, 'stack_name': stack.stack_name, "timeout_mins": 60, "disable_rollback": True, "__param_DBUsername": "admin", "__param_LinuxDistribution": "F17", "__param_InstanceType": "m1.small", "__param_KeyName": "test", "__param_DBPassword": "admin", "__param_DBRootPassword": "admin", "__param_DBName": "wordpress", 'method': forms.PreviewStackForm.__name__} res = self.client.post(url, form_data) self.assertTemplateUsed(res, 'project/stacks/preview_details.html') self.assertEqual(res.context['stack_preview']['stack_name'], stack.stack_name) @test.create_stubs({api.heat: ('stack_get', 'template_get')}) def test_detail_stack_topology(self): stack = self.stacks.first() template = self.stack_templates.first() api.heat.stack_get(IsA(http.HttpRequest), stack.id) \ .MultipleTimes().AndReturn(stack) api.heat.template_get(IsA(http.HttpRequest), stack.id) \ .AndReturn(json.loads(template.validate)) self.mox.ReplayAll() url = '?'.join([reverse(DETAIL_URL, args=[stack.id]), '='.join(['tab', 'stack_details__stack_topology'])]) res = self.client.get(url) tab = res.context['tab_group'].get_tab('topology') d3_data = tab.data['d3_data'] self.assertEqual(tab.template_name, 'project/stacks/_detail_topology.html') # status is CREATE_COMPLETE, so we expect the topology to display it self.assertIn('info_box', d3_data) self.assertIn('stack-green.svg', d3_data) self.assertIn('Create Complete', d3_data) @test.create_stubs({api.heat: ('stack_get', 'template_get')}) def test_detail_stack_overview(self): stack = self.stacks.first() template = self.stack_templates.first() api.heat.stack_get(IsA(http.HttpRequest), stack.id) \ .MultipleTimes().AndReturn(stack) api.heat.template_get(IsA(http.HttpRequest), stack.id) \ .AndReturn(json.loads(template.validate)) self.mox.ReplayAll() url = '?'.join([reverse(DETAIL_URL, args=[stack.id]), '='.join(['tab', 'stack_details__stack_overview'])]) res = self.client.get(url) tab = res.context['tab_group'].get_tab('overview') overview_data = tab.data['stack'] self.assertEqual(tab.template_name, 'project/stacks/_detail_overview.html') self.assertEqual(stack.stack_name, overview_data.stack_name) @test.create_stubs({api.heat: ('stack_get', 'template_get')}) def test_detail_stack_resources(self): stack = self.stacks.first() template = self.stack_templates.first() api.heat.stack_get(IsA(http.HttpRequest), stack.id) \ .MultipleTimes().AndReturn(stack) api.heat.template_get(IsA(http.HttpRequest), stack.id) \ .AndReturn(json.loads(template.validate)) self.mox.ReplayAll() url = '?'.join([reverse(DETAIL_URL, args=[stack.id]), '='.join(['tab', 'stack_details__resource_overview'])]) res = self.client.get(url) tab = res.context['tab_group'].get_tab('resources') self.assertEqual(tab.template_name, 'project/stacks/_detail_resources.html') @test.create_stubs({api.heat: ('stack_get', 'template_get')}) def test_detail_stack_template(self): stack = self.stacks.first() template = self.stack_templates.first() api.heat.stack_get(IsA(http.HttpRequest), stack.id) \ .AndReturn(stack) api.heat.template_get(IsA(http.HttpRequest), stack.id) \ .AndReturn(json.loads(template.validate)) self.mox.ReplayAll() url = '?'.join([reverse(DETAIL_URL, args=[stack.id]), '='.join(['tab', 'stack_details__stack_template'])]) res = self.client.get(url) tab = res.context['tab_group'].get_tab('stack_template') template_data = tab.data['stack_template'] self.assertEqual(tab.template_name, 'project/stacks/_stack_template.html') self.assertIn(json.loads(template.validate)['Description'], template_data) @test.create_stubs({api.heat: ('resource_get', 'resource_metadata_get')}) def test_resource_view(self): stack = self.stacks.first() resource = self.heat_resources.first() metadata = {} api.heat.resource_get( IsA(http.HttpRequest), stack.id, resource.resource_name) \ .AndReturn(resource) api.heat.resource_metadata_get( IsA(http.HttpRequest), stack.id, resource.resource_name) \ .AndReturn(metadata) self.mox.ReplayAll() url = reverse('horizon:project:stacks:resource', args=[stack.id, resource.resource_name]) res = self.client.get(url) self.assertTemplateUsed(res, 'horizon/common/_detail.html') self.assertTemplateUsed(res, 'project/stacks/_resource_overview.html') self.assertEqual(res.context['resource'].logical_resource_id, resource.logical_resource_id) class TemplateFormTests(test.TestCase): class SimpleFile(object): def __init__(self, name, data): self.name = name self.data = data def read(self): return self.data def test_create_upload_form_attributes(self): attrs = forms.create_upload_form_attributes( 'env', 'url', 'Environment') self.assertEqual(attrs['data-envsource-url'], 'Environment') def test_clean_file_upload_form_url(self): kwargs = {'next_view': 'Launch Stack'} t = forms.TemplateForm({}, **kwargs) precleaned = { 'template_url': 'http://templateurl.com', } t.clean_uploaded_files('template', 'template', precleaned, {}) self.assertEqual(precleaned['template_url'], 'http://templateurl.com') def test_clean_file_upload_form_multiple(self): kwargs = {'next_view': 'Launch Stack'} t = forms.TemplateForm({}, **kwargs) precleaned = { 'template_url': 'http://templateurl.com', 'template_data': 'http://templateurl.com', } self.assertRaises( exceptions.ValidationError, t.clean_uploaded_files, 'template', 'template', precleaned, {}) def test_clean_file_upload_form_invalid_json(self): kwargs = {'next_view': 'Launch Stack'} t = forms.TemplateForm({}, **kwargs) precleaned = { 'template_data': 'http://templateurl.com', } json_str = '{notvalidjson::::::json/////json' files = {'template_upload': self.SimpleFile('template_name', json_str)} self.assertRaises( exceptions.ValidationError, t.clean_uploaded_files, 'template', 'template', precleaned, files) def test_clean_file_upload_form_valid_data(self): kwargs = {'next_view': 'Launch Stack'} t = forms.TemplateForm({}, **kwargs) precleaned = { 'template_data': 'http://templateurl.com', } json_str = '{"isvalid":"json"}' files = {'template_upload': self.SimpleFile('template_name', json_str)} t.clean_uploaded_files('template', 'template', precleaned, files) self.assertEqual( json_str, precleaned['template_data'])
apache-2.0
-2,433,564,267,593,068,000
40.801636
80
0.491292
false
andreif/blues
blues/python.py
3
1054
""" Python Blueprint ================ Does not install python itself, only develop and setup tools. Contains pip helper for other blueprints to use. **Fabric environment:** .. code-block:: yaml blueprints: - blues.python """ from fabric.decorators import task from refabric.api import run, info from refabric.context_managers import sudo from . import debian __all__ = ['setup'] pip_log_file = '/tmp/pip.log' @task def setup(): """ Install python develop tools """ install() def install(): with sudo(): info('Install python dependencies') debian.apt_get('install', 'python-dev', 'python-setuptools') run('easy_install pip') run('touch {}'.format(pip_log_file)) debian.chmod(pip_log_file, mode=777) pip('install', 'setuptools', '--upgrade') def pip(command, *options): info('Running pip {}', command) # TODO: change pip log location, per env? per user? run('pip {0} {1} -v --log={2} --log-file={2}'.format(command, ' '.join(options), pip_log_file))
mit
1,132,598,305,589,506,800
20.08
99
0.624288
false
Jimdo/ansible-modules-core
cloud/amazon/rds_param_group.py
10
10698
#!/usr/bin/python # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' --- module: rds_param_group version_added: "1.5" short_description: manage RDS parameter groups description: - Creates, modifies, and deletes RDS parameter groups. This module has a dependency on python-boto >= 2.5. options: state: description: - Specifies whether the group should be present or absent. required: true default: present aliases: [] choices: [ 'present' , 'absent' ] name: description: - Database parameter group identifier. required: true default: null aliases: [] description: description: - Database parameter group description. Only set when a new group is added. required: false default: null aliases: [] engine: description: - The type of database for this group. Required for state=present. required: false default: null aliases: [] choices: [ 'mysql5.1', 'mysql5.5', 'mysql5.6', 'oracle-ee-11.2', 'oracle-se-11.2', 'oracle-se1-11.2', 'postgres9.3', 'postgres9.4', 'sqlserver-ee-10.5', 'sqlserver-ee-11.0', 'sqlserver-ex-10.5', 'sqlserver-ex-11.0', 'sqlserver-se-10.5', 'sqlserver-se-11.0', 'sqlserver-web-10.5', 'sqlserver-web-11.0'] immediate: description: - Whether to apply the changes immediately, or after the next reboot of any associated instances. required: false default: null aliases: [] params: description: - Map of parameter names and values. Numeric values may be represented as K for kilo (1024), M for mega (1024^2), G for giga (1024^3), or T for tera (1024^4), and these values will be expanded into the appropriate number before being set in the parameter group. required: false default: null aliases: [] choices: [ 'mysql5.1', 'mysql5.5', 'mysql5.6', 'oracle-ee-11.2', 'oracle-se-11.2', 'oracle-se1-11.2', 'postgres9.3', 'postgres9.4', 'sqlserver-ee-10.5', 'sqlserver-ee-11.0', 'sqlserver-ex-10.5', 'sqlserver-ex-11.0', 'sqlserver-se-10.5', 'sqlserver-se-11.0', 'sqlserver-web-10.5', 'sqlserver-web-11.0'] region: description: - The AWS region to use. If not specified then the value of the AWS_REGION or EC2_REGION environment variable, if any, is used. required: true default: null aliases: ['aws_region', 'ec2_region'] author: Scott Anderson extends_documentation_fragment: aws ''' EXAMPLES = ''' # Add or change a parameter group, in this case setting auto_increment_increment to 42 * 1024 - rds_param_group: state: present name: norwegian_blue description: 'My Fancy Ex Parrot Group' engine: 'mysql5.6' params: auto_increment_increment: "42K" # Remove a parameter group - rds_param_group: state: absent name: norwegian_blue ''' VALID_ENGINES = [ 'mysql5.1', 'mysql5.5', 'mysql5.6', 'oracle-ee-11.2', 'oracle-se-11.2', 'oracle-se1-11.2', 'postgres9.3', 'postgres9.4', 'sqlserver-ee-10.5', 'sqlserver-ee-11.0', 'sqlserver-ex-10.5', 'sqlserver-ex-11.0', 'sqlserver-se-10.5', 'sqlserver-se-11.0', 'sqlserver-web-10.5', 'sqlserver-web-11.0', ] try: import boto.rds from boto.exception import BotoServerError HAS_BOTO = True except ImportError: HAS_BOTO = False # returns a tuple: (whether or not a parameter was changed, the remaining parameters that weren't found in this parameter group) class NotModifiableError(StandardError): def __init__(self, error_message, *args): super(NotModifiableError, self).__init__(error_message, *args) self.error_message = error_message def __repr__(self): return 'NotModifiableError: %s' % self.error_message def __str__(self): return 'NotModifiableError: %s' % self.error_message INT_MODIFIERS = { 'K': 1024, 'M': pow(1024, 2), 'G': pow(1024, 3), 'T': pow(1024, 4), } TRUE_VALUES = ('on', 'true', 'yes', '1',) def set_parameter(param, value, immediate): """ Allows setting parameters with 10M = 10* 1024 * 1024 and so on. """ converted_value = value if param.type == 'string': converted_value = str(value) elif param.type == 'integer': if isinstance(value, basestring): try: for modifier in INT_MODIFIERS.keys(): if value.endswith(modifier): converted_value = int(value[:-1]) * INT_MODIFIERS[modifier] converted_value = int(converted_value) except ValueError: # may be based on a variable (ie. {foo*3/4}) so # just pass it on through to boto converted_value = str(value) elif type(value) == bool: converted_value = 1 if value else 0 else: converted_value = int(value) elif param.type == 'boolean': if isinstance(value, basestring): converted_value = value in TRUE_VALUES else: converted_value = bool(value) param.value = converted_value param.apply(immediate) def modify_group(group, params, immediate=False): """ Set all of the params in a group to the provided new params. Raises NotModifiableError if any of the params to be changed are read only. """ changed = {} new_params = dict(params) for key in new_params.keys(): if group.has_key(key): param = group[key] new_value = new_params[key] try: old_value = param.value except ValueError: # some versions of boto have problems with retrieving # integer values from params that may have their value # based on a variable (ie. {foo*3/4}), so grab it in a # way that bypasses the property functions old_value = param._value if old_value != new_value: if not param.is_modifiable: raise NotModifiableError('Parameter %s is not modifiable.' % key) changed[key] = {'old': param.value, 'new': new_value} set_parameter(param, new_value, immediate) del new_params[key] return changed, new_params def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( state = dict(required=True, choices=['present', 'absent']), name = dict(required=True), engine = dict(required=False, choices=VALID_ENGINES), description = dict(required=False), params = dict(required=False, aliases=['parameters'], type='dict'), immediate = dict(required=False, type='bool'), ) ) module = AnsibleModule(argument_spec=argument_spec) if not HAS_BOTO: module.fail_json(msg='boto required for this module') state = module.params.get('state') group_name = module.params.get('name').lower() group_engine = module.params.get('engine') group_description = module.params.get('description') group_params = module.params.get('params') or {} immediate = module.params.get('immediate') or False if state == 'present': for required in ['name', 'description', 'engine', 'params']: if not module.params.get(required): module.fail_json(msg = str("Parameter %s required for state='present'" % required)) else: for not_allowed in ['description', 'engine', 'params']: if module.params.get(not_allowed): module.fail_json(msg = str("Parameter %s not allowed for state='absent'" % not_allowed)) # Retrieve any AWS settings from the environment. region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) if not region: module.fail_json(msg = str("Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set.")) try: conn = boto.rds.connect_to_region(region, **aws_connect_kwargs) except boto.exception.BotoServerError, e: module.fail_json(msg = e.error_message) group_was_added = False try: changed = False try: all_groups = conn.get_all_dbparameter_groups(group_name, max_records=100) exists = len(all_groups) > 0 except BotoServerError, e: if e.error_code != 'DBParameterGroupNotFound': module.fail_json(msg = e.error_message) exists = False if state == 'absent': if exists: conn.delete_parameter_group(group_name) changed = True else: changed = {} if not exists: new_group = conn.create_parameter_group(group_name, engine=group_engine, description=group_description) group_was_added = True # If a "Marker" is present, this group has more attributes remaining to check. Get the next batch, but only # if there are parameters left to set. marker = None while len(group_params): next_group = conn.get_all_dbparameters(group_name, marker=marker) changed_params, group_params = modify_group(next_group, group_params, immediate) changed.update(changed_params) if hasattr(next_group, 'Marker'): marker = next_group.Marker else: break except BotoServerError, e: module.fail_json(msg = e.error_message) except NotModifiableError, e: msg = e.error_message if group_was_added: msg = '%s The group "%s" was added first.' % (msg, group_name) module.fail_json(msg=msg) module.exit_json(changed=changed) # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * main()
gpl-3.0
1,912,550,408,965,219,300
34.190789
305
0.609647
false
KrzysztofStachanczyk/Sensors-WWW-website
www/env/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/langthaimodel.py
2930
11275
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Communicator client code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### # 255: Control characters that usually does not exist in any text # 254: Carriage/Return # 253: symbol (punctuation) that does not belong to word # 252: 0 - 9 # The following result for thai was collected from a limited sample (1M). # Character Mapping Table: TIS620CharToOrderMap = ( 255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 253,182,106,107,100,183,184,185,101, 94,186,187,108,109,110,111, # 40 188,189,190, 89, 95,112,113,191,192,193,194,253,253,253,253,253, # 50 253, 64, 72, 73,114, 74,115,116,102, 81,201,117, 90,103, 78, 82, # 60 96,202, 91, 79, 84,104,105, 97, 98, 92,203,253,253,253,253,253, # 70 209,210,211,212,213, 88,214,215,216,217,218,219,220,118,221,222, 223,224, 99, 85, 83,225,226,227,228,229,230,231,232,233,234,235, 236, 5, 30,237, 24,238, 75, 8, 26, 52, 34, 51,119, 47, 58, 57, 49, 53, 55, 43, 20, 19, 44, 14, 48, 3, 17, 25, 39, 62, 31, 54, 45, 9, 16, 2, 61, 15,239, 12, 42, 46, 18, 21, 76, 4, 66, 63, 22, 10, 1, 36, 23, 13, 40, 27, 32, 35, 86,240,241,242,243,244, 11, 28, 41, 29, 33,245, 50, 37, 6, 7, 67, 77, 38, 93,246,247, 68, 56, 59, 65, 69, 60, 70, 80, 71, 87,248,249,250,251,252,253, ) # Model Table: # total sequences: 100% # first 512 sequences: 92.6386% # first 1024 sequences:7.3177% # rest sequences: 1.0230% # negative sequences: 0.0436% ThaiLangModel = ( 0,1,3,3,3,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,0,0,3,3,3,0,3,3,3,3, 0,3,3,0,0,0,1,3,0,3,3,2,3,3,0,1,2,3,3,3,3,0,2,0,2,0,0,3,2,1,2,2, 3,0,3,3,2,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,0,3,2,3,0,2,2,2,3, 0,2,3,0,0,0,0,1,0,1,2,3,1,1,3,2,2,0,1,1,0,0,1,0,0,0,0,0,0,0,1,1, 3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,3,3,2,3,2,3,3,2,2,2, 3,1,2,3,0,3,3,2,2,1,2,3,3,1,2,0,1,3,0,1,0,0,1,0,0,0,0,0,0,0,1,1, 3,3,2,2,3,3,3,3,1,2,3,3,3,3,3,2,2,2,2,3,3,2,2,3,3,2,2,3,2,3,2,2, 3,3,1,2,3,1,2,2,3,3,1,0,2,1,0,0,3,1,2,1,0,0,1,0,0,0,0,0,0,1,0,1, 3,3,3,3,3,3,2,2,3,3,3,3,2,3,2,2,3,3,2,2,3,2,2,2,2,1,1,3,1,2,1,1, 3,2,1,0,2,1,0,1,0,1,1,0,1,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0, 3,3,3,2,3,2,3,3,2,2,3,2,3,3,2,3,1,1,2,3,2,2,2,3,2,2,2,2,2,1,2,1, 2,2,1,1,3,3,2,1,0,1,2,2,0,1,3,0,0,0,1,1,0,0,0,0,0,2,3,0,0,2,1,1, 3,3,2,3,3,2,0,0,3,3,0,3,3,0,2,2,3,1,2,2,1,1,1,0,2,2,2,0,2,2,1,1, 0,2,1,0,2,0,0,2,0,1,0,0,1,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,1,0, 3,3,2,3,3,2,0,0,3,3,0,2,3,0,2,1,2,2,2,2,1,2,0,0,2,2,2,0,2,2,1,1, 0,2,1,0,2,0,0,2,0,1,1,0,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0, 3,3,2,3,2,3,2,0,2,2,1,3,2,1,3,2,1,2,3,2,2,3,0,2,3,2,2,1,2,2,2,2, 1,2,2,0,0,0,0,2,0,1,2,0,1,1,1,0,1,0,3,1,1,0,0,0,0,0,0,0,0,0,1,0, 3,3,2,3,3,2,3,2,2,2,3,2,2,3,2,2,1,2,3,2,2,3,1,3,2,2,2,3,2,2,2,3, 3,2,1,3,0,1,1,1,0,2,1,1,1,1,1,0,1,0,1,1,0,0,0,0,0,0,0,0,0,2,0,0, 1,0,0,3,0,3,3,3,3,3,0,0,3,0,2,2,3,3,3,3,3,0,0,0,1,1,3,0,0,0,0,2, 0,0,1,0,0,0,0,0,0,0,2,3,0,0,0,3,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0, 2,0,3,3,3,3,0,0,2,3,0,0,3,0,3,3,2,3,3,3,3,3,0,0,3,3,3,0,0,0,3,3, 0,0,3,0,0,0,0,2,0,0,2,1,1,3,0,0,1,0,0,2,3,0,1,0,0,0,0,0,0,0,1,0, 3,3,3,3,2,3,3,3,3,3,3,3,1,2,1,3,3,2,2,1,2,2,2,3,1,1,2,0,2,1,2,1, 2,2,1,0,0,0,1,1,0,1,0,1,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0, 3,0,2,1,2,3,3,3,0,2,0,2,2,0,2,1,3,2,2,1,2,1,0,0,2,2,1,0,2,1,2,2, 0,1,1,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,3,3,2,1,3,3,1,1,3,0,2,3,1,1,3,2,1,1,2,0,2,2,3,2,1,1,1,1,1,2, 3,0,0,1,3,1,2,1,2,0,3,0,0,0,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0, 3,3,1,1,3,2,3,3,3,1,3,2,1,3,2,1,3,2,2,2,2,1,3,3,1,2,1,3,1,2,3,0, 2,1,1,3,2,2,2,1,2,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2, 3,3,2,3,2,3,3,2,3,2,3,2,3,3,2,1,0,3,2,2,2,1,2,2,2,1,2,2,1,2,1,1, 2,2,2,3,0,1,3,1,1,1,1,0,1,1,0,2,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,3,3,2,3,2,2,1,1,3,2,3,2,3,2,0,3,2,2,1,2,0,2,2,2,1,2,2,2,2,1, 3,2,1,2,2,1,0,2,0,1,0,0,1,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,1, 3,3,3,3,3,2,3,1,2,3,3,2,2,3,0,1,1,2,0,3,3,2,2,3,0,1,1,3,0,0,0,0, 3,1,0,3,3,0,2,0,2,1,0,0,3,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,3,2,3,2,3,3,0,1,3,1,1,2,1,2,1,1,3,1,1,0,2,3,1,1,1,1,1,1,1,1, 3,1,1,2,2,2,2,1,1,1,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1, 3,2,2,1,1,2,1,3,3,2,3,2,2,3,2,2,3,1,2,2,1,2,0,3,2,1,2,2,2,2,2,1, 3,2,1,2,2,2,1,1,1,1,0,0,1,1,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,3,3,3,3,3,3,1,3,3,0,2,1,0,3,2,0,0,3,1,0,1,1,0,1,0,0,0,0,0,1, 1,0,0,1,0,3,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,0,2,2,2,3,0,0,1,3,0,3,2,0,3,2,2,3,3,3,3,3,1,0,2,2,2,0,2,2,1,2, 0,2,3,0,0,0,0,1,0,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1, 3,0,2,3,1,3,3,2,3,3,0,3,3,0,3,2,2,3,2,3,3,3,0,0,2,2,3,0,1,1,1,3, 0,0,3,0,0,0,2,2,0,1,3,0,1,2,2,2,3,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1, 3,2,3,3,2,0,3,3,2,2,3,1,3,2,1,3,2,0,1,2,2,0,2,3,2,1,0,3,0,0,0,0, 3,0,0,2,3,1,3,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,1,3,2,2,2,1,2,0,1,3,1,1,3,1,3,0,0,2,1,1,1,1,2,1,1,1,0,2,1,0,1, 1,2,0,0,0,3,1,1,0,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,0,3,1,0,0,0,1,0, 3,3,3,3,2,2,2,2,2,1,3,1,1,1,2,0,1,1,2,1,2,1,3,2,0,0,3,1,1,1,1,1, 3,1,0,2,3,0,0,0,3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,2,3,0,3,3,0,2,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0, 0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,2,3,1,3,0,0,1,2,0,0,2,0,3,3,2,3,3,3,2,3,0,0,2,2,2,0,0,0,2,2, 0,0,1,0,0,0,0,3,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0, 0,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,1,2,3,1,3,3,0,0,1,0,3,0,0,0,0,0, 0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,1,2,3,1,2,3,1,0,3,0,2,2,1,0,2,1,1,2,0,1,0,0,1,1,1,1,0,1,0,0, 1,0,0,0,0,1,1,0,3,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,3,3,2,1,0,1,1,1,3,1,2,2,2,2,2,2,1,1,1,1,0,3,1,0,1,3,1,1,1,1, 1,1,0,2,0,1,3,1,1,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,1, 3,0,2,2,1,3,3,2,3,3,0,1,1,0,2,2,1,2,1,3,3,1,0,0,3,2,0,0,0,0,2,1, 0,1,0,0,0,0,1,2,0,1,1,3,1,1,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0, 0,0,3,0,0,1,0,0,0,3,0,0,3,0,3,1,0,1,1,1,3,2,0,0,0,3,0,0,0,0,2,0, 0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0, 3,3,1,3,2,1,3,3,1,2,2,0,1,2,1,0,1,2,0,0,0,0,0,3,0,0,0,3,0,0,0,0, 3,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,0,1,2,0,3,3,3,2,2,0,1,1,0,1,3,0,0,0,2,2,0,0,0,0,3,1,0,1,0,0,0, 0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,0,2,3,1,2,0,0,2,1,0,3,1,0,1,2,0,1,1,1,1,3,0,0,3,1,1,0,2,2,1,1, 0,2,0,0,0,0,0,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,0,0,3,1,2,0,0,2,2,0,1,2,0,1,0,1,3,1,2,1,0,0,0,2,0,3,0,0,0,1,0, 0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,0,1,1,2,2,0,0,0,2,0,2,1,0,1,1,0,1,1,1,2,1,0,0,1,1,1,0,2,1,1,1, 0,1,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,1, 0,0,0,2,0,1,3,1,1,1,1,0,0,0,0,3,2,0,1,0,0,0,1,2,0,0,0,1,0,0,0,0, 0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 1,0,2,3,2,2,0,0,0,1,0,0,0,0,2,3,2,1,2,2,3,0,0,0,2,3,1,0,0,0,1,1, 0,0,1,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0, 3,3,2,2,0,1,0,0,0,0,2,0,2,0,1,0,0,0,1,1,0,0,0,2,1,0,1,0,1,1,0,0, 0,1,0,2,0,0,1,0,3,0,1,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,1,0,0,1,0,0,0,0,0,1,1,2,0,0,0,0,1,0,0,1,3,1,0,0,0,0,1,1,0,0, 0,1,0,0,0,0,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0, 3,3,1,1,1,1,2,3,0,0,2,1,1,1,1,1,0,2,1,1,0,0,0,2,1,0,1,2,1,1,0,1, 2,1,0,3,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 1,3,1,0,0,0,0,0,0,0,3,0,0,0,3,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1, 0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,2,0,0,0,0,0,0,1,2,1,0,1,1,0,2,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,2,0,0,0,1,3,0,1,0,0,0,2,0,0,0,0,0,0,0,1,2,0,0,0,0,0, 3,3,0,0,1,1,2,0,0,1,2,1,0,1,1,1,0,1,1,0,0,2,1,1,0,1,0,0,1,1,1,0, 0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0, 2,2,2,1,0,0,0,0,1,0,0,0,0,3,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0, 2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 2,3,0,0,1,1,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 1,1,0,1,2,0,1,2,0,0,1,1,0,2,0,1,0,0,1,0,0,0,0,1,0,0,0,2,0,0,0,0, 1,0,0,1,0,1,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,1,0,0,0,0,0,0,0,1,1,0,1,1,0,2,1,3,0,0,0,0,1,1,0,0,0,0,0,0,0,3, 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 2,0,1,0,1,0,0,2,0,0,2,0,0,1,1,2,0,0,1,1,0,0,0,1,0,0,0,1,1,0,0,0, 1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0, 1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0, 2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 2,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,3,0,0,0, 2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0, 1,0,0,0,0,0,0,0,0,1,0,0,0,0,2,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,1,1,0,0,2,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, ) TIS620ThaiModel = { 'charToOrderMap': TIS620CharToOrderMap, 'precedenceMatrix': ThaiLangModel, 'mTypicalPositiveRatio': 0.926386, 'keepEnglishLetter': False, 'charsetName': "TIS-620" } # flake8: noqa
gpl-3.0
4,566,932,265,352,882,700
55.375
73
0.540931
false
opatut/mini
mini/filters.py
1
4898
from mini import app from mini.util import AnonymousUser from mini.models import User, Email from datetime import datetime as dt, date as d, timedelta as td from flask import Markup import time, os, pygments, pygments.lexers, pygments.formatters, git, re from os.path import * from json import dumps # UTILITY to wrap a date/time in a span tag with a title def date_title(s, fmt, with_title=True): if with_title: return Markup("<span title=\"%s UTC\">%s</span>" % (datetime(s, False), fmt)) return fmt # shorten a hexsha @app.template_filter() def shortsha(s): return s[:10] # shorten a string @app.template_filter() def shorten(s, length): return s[:length] + ("..." if len(s) > length else "") # shorten a string @app.template_filter() def first_line(s): return s.splitlines()[0] @app.template_filter() def splitlines(s): return s.splitlines() @app.template_filter() def parentpath(s): return normpath(join(s, "..")) @app.template_filter() def filesize(s): s = int(s) for x in ['B', 'KB','MB','GB','TB']: if s < 1024.0: return "%3.f %s" % (s, x) s /= 1024.0 # convert unix timestamp to datetime @app.template_filter() def git_committer_time(commit): return dt.fromtimestamp(commit.committed_date + commit.committer_tz_offset) # find a user @app.template_filter() def git_user(u): mail = Email.query.filter_by(email=u.email).first() if not mail: return AnonymousUser(u.name.encode("utf-8").decode("utf-8"), u.email) return mail.user # format a timestamp in default time format (00:00:00) @app.template_filter() def time(s, with_title=True, small=False): return date_title(s, s.strftime("%H:%M" + ("" if small else ":%S")), with_title) # format a timestamp in default date format (0000-00-00) @app.template_filter() def date(s, with_title=True): return date_title(s, s.strftime("%Y-%m-%d"), with_title) # format a timestamp in default format (0000-00-00 00:00:00) @app.template_filter() def datetime(s, with_title=True): return date_title(s, s.strftime("%Y-%m-%d %H:%M:%S"), with_title) # format a timestamp as human readable date @app.template_filter() def date_human(s, with_title=True, capitalize=False, always_format=False): if not always_format and dt.utcnow().date() == s.date(): val = "today" elif not always_format and dt.utcnow().date() == s.date() - td(days=1): val = "yesterday" else: val = s.strftime("%B %d, %Y") if capitalize: val = val.capitalize() return date_title(s, val, with_title) @app.template_filter() def date_nice(s): return s.strftime("%b %d, %Y") @app.template_filter() def js_date(s): return s.strftime("Date.UTC(%Y, %m-1, %d)") # in JS, months start at 0 @app.template_filter() def json(s): return dumps(s) @app.template_filter() def filetype(blob): if type(blob) == git.Tree: return "folder" if blob.size == 0: return "empty-file" ext = extension(blob) mimetype = blob.mime_type IMAGE_TYPES = ("png", "jpg", "jpeg", "tga", "gif", "bmp") if ext in IMAGE_TYPES: return "image" if mimetype.split("/")[0] == "text": return "textfile" return "file" @app.template_filter() def extension(file): return splitext(file.name)[1][1:].lower() @app.template_filter() def pathsplit(pathstr, maxsplit=None): """split relative path into list""" path = [pathstr] while True: oldpath = path[:] path[:1] = list(os.path.split(path[0])) if path[0] == '': path = path[1:] elif path[1] == '': path = path[:1] + path[2:] if path == oldpath: return path if maxsplit is not None and len(path) > maxsplit: return path @app.template_filter() def highlightsheet(s): return pygments.formatters.HtmlFormatter(style = s).get_style_defs('.highlight') @app.template_filter() def highlight(s, filename): s = s.strip() try: lexer = pygments.lexers.get_lexer_for_filename(filename) except pygments.util.ClassNotFound: lexer = pygments.lexers.TextLexer() formatter = pygments.formatters.HtmlFormatter(linenos = True) return Markup(pygments.highlight(s, lexer, formatter)) @app.template_filter() def find_readme(tree): for x in tree.blobs: if x.name.upper().startswith("README"): return x @app.template_filter() def diffLineType(line): if line[:3] == "---": return "from" elif line[:3] == "+++": return "to" elif line[:2] == "@@": return "section" elif line[:1] == "-": return "deletion" elif line[:1] == "+": return "insertion" return "context" @app.template_filter() def diffParseSection(line): m = re.search('^@@\s*-([0-9]+),[0-9]+\s+\+([0-9]+),[0-9]+\s*@@.*$', line) return (int(m.group(1)), int(m.group(2)))
gpl-3.0
-3,074,237,716,313,656,000
26.363128
96
0.621886
false
jangorecki/h2o-3
py2/testdir_single_jvm/test_GBMGrid_basic_many.py
20
3838
import unittest, sys, time sys.path.extend(['.','..','../..','py']) import h2o2 as h2o import h2o_cmd, h2o_import as h2i from h2o_test import dump_json, verboseprint, OutputObj import h2o_jobs DO_CLASSIFICATION = True DO_FAIL_CASE = False DO_FROM_TO_STEP = False class Basic(unittest.TestCase): def tearDown(self): h2o.check_sandbox_for_errors() @classmethod def setUpClass(cls): h2o.init(1, java_heap_GB=4) @classmethod def tearDownClass(cls): h2o.tear_down_cloud() def test_GBMGrid_basic_many(self): trainFilename = 'prostate.csv' train_key = 'prostate.hex' timeoutSecs = 300 csvPathname = "logreg/" + trainFilename parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname, hex_key=train_key, schema='put') pA = h2o_cmd.ParseObj(parseResult) iA = h2o_cmd.InspectObj(pA.parse_key) parse_key = pA.parse_key numRows = iA.numRows numCols = iA.numCols labelList = iA.labelList labelListUsed = list(labelList) numColsUsed = numCols parameters = { 'validation_frame': train_key, 'ignored_columns': "['ID']", # this has to have [] 'response_column': 'CAPSULE', # 'balance_classes': # 'max_after_balance_size': # ?? # 'ntrees': '[8, 10]', 'ntrees': 8, # 'max_depth': '[8, 9]', 'max_depth': 8, # ?? # 'min_rows': '[1, 2]', 'min_rows': 1, 'nbins': 40, # ?? # 'learn_rate': "[0.1, 0.2]", 'learn_rate': 0.1, # FIX! doesn't like it? # 'loss': 'Bernoulli', # FIX..no variable importance for GBM yet? # 'variable_importance': False, # 'seed': } jobs = [] # kick off 5 of these GBM grid jobs, with different tree choices start = time.time() totalGBMGridJobs = 0 for i in range(5): modelKey = 'GBMGrid_prostate_%s', i bmResult = h2o.n0.build_model( algo='gbm', model_id=modelKey, training_frame=parse_key, parameters=parameters, timeoutSecs=60) bm = OutputObj(bmResult, 'bm') print "GBMResult:", h2o.dump_json(bm) # FIX! is this right for gridded? job_key = bm.jobs[0].key.name # FIX! this isn't a full formed name (%) model_key = bm.jobs[0].dest.name jobs.append( (job_key, model_key) ) totalGBMGridJobs += 1 h2o_jobs.pollWaitJobs(timeoutSecs=300) elapsed = time.time() - start print "All GBM jobs completed in", elapsed, "seconds." print "totalGBMGridJobs:", totalGBMGridJobs for job_key, model_key in jobs: modelResult = h2o.n0.models(key=model_key) model = OutputObj(modelResult['models'][0]['output'], 'model') cmmResult = h2o.n0.compute_model_metrics(model=model_key, frame=parse_key, timeoutSecs=60) cmm = OutputObj(cmmResult, 'cmm') print "\nLook!, can use dot notation: cmm.cm.confusion.matrix", cmm.cm.confusion_matrix, "\n" mmResult = h2o.n0.model_metrics(model=model_key, frame=parse_key, timeoutSecs=60) mmResultShort = mmResult['model_metrics'][0] del mmResultShort['frame'] # too much! mm = OutputObj(mmResultShort, 'mm') prResult = h2o.n0.predict(model=model_key, frame=parse_key, timeoutSecs=60) pr = OutputObj(prResult['model_metrics'][0]['predictions'], 'pr') # too slow! # h2o_cmd.runStoreView() if __name__ == '__main__': h2o.unit_main()
apache-2.0
616,914,211,153,584,900
32.666667
109
0.544294
false
epfl-cosmo/lammps
tools/moltemplate/moltemplate/nbody_by_type_lib.py
8
22855
#!/usr/bin/env python # Author: Andrew Jewett (jewett.aij at g mail) # http://www.chem.ucsb.edu/~sheagroup # License: 3-clause BSD License (See LICENSE.TXT) # Copyright (c) 2012, Regents of the University of California # All rights reserved. import sys from collections import defaultdict #from collections import namedtuple if sys.version < '2.7': sys.stderr.write('--------------------------------------------------------\n' '----------------- WARNING: OLD PYTHON VERSION ----------\n' ' This program is untested on your python version (' + sys.version + ').\n' ' PLEASE LET ME KNOW IF THIS PROGRAM CRASHES (and upgrade python).\n' ' -Andrew 2013-10-25\n' '--------------------------------------------------------\n' '--------------------------------------------------------\n') from ordereddict import OrderedDict else: from collections import OrderedDict from collections import defaultdict try: from .nbody_graph_search import Ugraph, GraphMatcher from .ttree_lex import MatchesPattern, MatchesAll, InputError except (SystemError, ValueError): # not installed as a package from nbody_graph_search import Ugraph, GraphMatcher from ttree_lex import MatchesPattern, MatchesAll, InputError #import gc def GenInteractions_int(G_system, g_bond_pattern, typepattern_to_coefftypes, canonical_order, # function to sort atoms and bonds atomtypes_int2str, bondtypes_int2str, report_progress=False, # print messages to sys.stderr? check_undefined_atomids_str = None): """ GenInteractions() automatically determines a list of interactions present in a system of bonded atoms (argument "G_system"), which satisfy the bond topology present in "g_bond_pattern", and satisfy the atom and bond type requirements in "typepattern_to_coefftypes". Whenever a set of atoms in "G_system" are bonded together in a way which matches "g_bond_pattern", and when the atom and bond types is consistent with one of the entries in "typepattern_to_coefftypes", the corresponding list of atoms from G_system is appended to the list of results. These results (the list of lists of atoms participating in an interaction) are organized according their corresponding "coefftype", a string which identifies the type of interaction they obey as explained above. results are returned as a dictionary using "coefftype" as the lookup key. Arguments: -- typepattern_to_coefftypes is a list of 2-tuples -- The first element of the 2-tuple is the "typepattern". It contains a string describing a list of atom types and bond types. The typepattern is associated with a "coefftype", which is the second element of the 2-tuple. This is a string which identifies the type of interaction between the atoms. Later on, this string can be used to lookup the force field parameters for this interaction elsewhere.) -- Arguments: G_system, g_bond_pattern, atomtypes_int2str, bondtypes_int2str -- G_system stores a list of atoms and bonds, and their attributes in "Ugraph" format. In this format: Atom ID numbers are represented by indices into the G_system.verts[] list. Bond ID numbers are represented by indices into the G_system.edges[] list. Atom types are represented as integers in the G_system.verts[i].attr list. Bond types are represented as integers in the G_system.edges[i].attr list. They are converted into strings using atomtypes_int2str, and bondtypes_int2str. g_bond_pattern is a graph which specifies the type of bonding between the atoms required for a match. It is in Ugraph format (however the atom and bond types are left blank.) Atom and bond types are supplied by the user in string format. (These strings typically encode integers, but could be any string in principle.) The string-version of the ith atom type is stored in atomtypes_int2str[ G_system.verts[i].attr ] The string-version of the ith bond type is stored in bondtypes_int2str[ G_system.edges[i].attr ] -- The "canonical_order" argument: -- The search for atoms with a given bond pattern often yields redundant matches. There is no difference for example between the angle formed between three consecutively bonded atoms (named, 1, 2, 3, for example), and the angle between the same atoms in reverse order (3, 2, 1). However both triplets of atoms will be returned by the subgraph- matching algorithm when searching for ALL 3-body interactions.) To eliminate this redundancy, the caller must supply a "canonical_order" argument. This is a function which sorts the atoms and bonds in a way which is consistent with the type of N-body interaction being considered. The atoms (and bonds) in a candidate match are rearranged by the canonical_order(). Then the re-ordered list of atom and bond ids is tested against the list of atom/bond ids in the matches-found-so-far, before it is added. """ if report_progress: startatomid = 0 sys.stderr.write(' searching for matching bond patterns:\n') sys.stderr.write(' 0%') # Figure out which atoms from "G_system" bond together in a way which # matches the "g_bond_pattern" argument. Organize these matches by # atom and bond types and store all of the non-redundant ones in # the "interactions_by_type" variable. gm = GraphMatcher(G_system, g_bond_pattern) interactions_by_type = defaultdict(list) for atombondids in gm.Matches(): # "atombondids" is a tuple. # atombondids[0] has atomIDs from G_system corresponding to g_bond_pattern # (These atomID numbers are indices into the G_system.verts[] list.) # atombondids[1] has bondIDs from G_system corresponding to g_bond_pattern # (These bondID numbers are indices into the G_system.edges[] list.) # It's convenient to organize the list of interactions-between- # atoms in a dictionary indexed by atomtypes and bondtypes. # (Because many atoms and bonds typically share the same type, # organizing the results this way makes it faster to check # whether a given interaction matches a "typepattern" defined # by the user. We only have to check once for the whole group.) atombondtypes = \ (tuple([G_system.GetVert(Iv).attr for Iv in atombondids[0]]), tuple([G_system.GetEdge(Ie).attr for Ie in atombondids[1]])) interactions_by_type[atombondtypes].append(atombondids) if report_progress: # GraphMatcher.Matches() searches for matches in an order # that selects a different atomid number from G_system, # starting at 0, and continuing up to the number of atoms (-1) # in the system (G_system.nv-1), and using this as the first # atom in the match (ie match[0][0]). This number can be used # to guess much progress has been made so far. oldatomid = startatomid startatomid = atombondids[0][0] percent_complete = (100 * startatomid) // G_system.GetNumVerts() # report less often as more progress made if percent_complete <= 4: old_pc = (100 * oldatomid) // G_system.GetNumVerts() if percent_complete > old_pc: sys.stderr.write(' ' + str(percent_complete) + '%') elif percent_complete <= 8: pc_d2 = (100 * startatomid) // (2 * G_system.GetNumVerts()) oldpc_d2 = (100 * oldatomid) // (2 * G_system.GetNumVerts()) if pc_d2 > oldpc_d2: sys.stderr.write(' ' + str(percent_complete) + '%') elif percent_complete <= 20: pc_d4 = (100 * startatomid) // (4 * G_system.GetNumVerts()) oldpc_d4 = (100 * oldatomid) // (4 * G_system.GetNumVerts()) if pc_d4 > oldpc_d4: sys.stderr.write(' ' + str(percent_complete) + '%') else: pc_d10 = (100 * startatomid) // (10 * G_system.GetNumVerts()) oldpc_d10 = (100 * oldatomid) // (10 * G_system.GetNumVerts()) if pc_d10 > oldpc_d10: sys.stderr.write(' ' + str(percent_complete) + '%') if report_progress: sys.stderr.write(' 100%\n') #sys.stderr.write(' ...done\n') #sys.stderr.write(' Looking up available atom and bond types...') #coefftype_to_atomids = defaultdict(list) #abids_to_coefftypes = defaultdict(list) coefftype_to_atomids = OrderedDict() abids_to_coefftypes = OrderedDict() # -------------------- reporting progress ----------------------- if report_progress: # The next interval of code is not technically necessary, but it makes # the printed output easier to read by excluding irrelevant interactions # Now, test each match to see if the atoms and bonds involved match # any of the type-patterns in the "typepattern_to_coefftypes" argument. types_atoms_all_str = set([]) types_bonds_all_str = set([]) for typepattern, coefftype in typepattern_to_coefftypes: for atombondtypes, abidslist in interactions_by_type.items(): for Iv in atombondtypes[0]: types_atoms_all_str.add(atomtypes_int2str[Iv]) for Ie in atombondtypes[1]: types_bonds_all_str.add(bondtypes_int2str[Ie]) # ------------------ reporting progress (end) ------------------- # ------------------ check to make sure all interactions are defined ------ if check_undefined_atomids_str: # Checking for missing interactions is a headache. # Please excuse the messy code below. atomids_matched = OrderedDict() # Then loop through all the interactions (tuples of atoms) found by # GraphMatcher, sort the atoms and store them in dictionary # (atomids_matched) which keeps track of which interactions have # been defined (ie have force-field parameters assigned to them). # Initialize them to False, and update as interactions are found. for atombondtypes, abidslist in interactions_by_type.items(): for abids in abidslist: abids = canonical_order(abids) atomids_int = tuple(abids[0]) # NOTE TO SELF: # If in the future, different interactions (type_patterns) have # different symmetries, and canonical_order() varies from # interaction to interaction, then DONT loop over type_pattern: # for type_pattern, coefftype in typepattern_to_coefftypes) # abids = canonical_order(abids, type_pattern) # Why: When checking for undefined interactions, # we just want to make sure that SOME kind of interaction # involving these atoms exists. The gruesome details of # force-field symmetry should not enter into this. # (We certainly don't want to require that different # interactions are simultaneously present for the same set of # atoms for ALL the possible different atom orderings for the # different possible symmetries in the force field you are using # Perhaps, in the future I should just use something like this: # atomids_int = abids[0] # atomids_int.sort() # atomids_int = tuple(atomids_int) # This would work for most molecules. # I suppose that in some some bizarre molecules containing # triangular or square cycles, for example, this would not # distinguish all 3 angles in the triangle, for example. # mistakenly thinking there was only one interaction there. # But these cases are rare.) if not atomids_int in atomids_matched: atomids_matched[atomids_int] = False # (Later on, we'll set some of these to True) # ------------------ check to make sure all interactions are defined (end) count = 0 for typepattern, coefftype in typepattern_to_coefftypes: # ------------------ reporting progress ----------------------- # The next interval of code is not technically necessary, but it makes # the printed output easier to read by excluding irrelevant # interactions if report_progress: # Check to see if the atoms or bonds referred to in typepattern # are (potentially) satisfied by any of the atoms present in the system. # If any of the required atoms for this typepattern are not present # in this system, then skip to the next typepattern. atoms_available_Iv = [False for Iv in range( 0, g_bond_pattern.GetNumVerts())] for Iv in range(0, g_bond_pattern.GetNumVerts()): for type_atom_str in types_atoms_all_str: if MatchesPattern(type_atom_str, typepattern[Iv]): atoms_available_Iv[Iv] = True atoms_available = True for Iv in range(0, g_bond_pattern.GetNumVerts()): if not atoms_available_Iv[Iv]: atoms_available = False bonds_available_Ie = [False for Ie in range( 0, g_bond_pattern.GetNumEdges())] for Ie in range(0, g_bond_pattern.GetNumEdges()): for type_bond_str in types_bonds_all_str: if MatchesPattern(type_bond_str, typepattern[g_bond_pattern.GetNumVerts() + Ie]): bonds_available_Ie[Ie] = True bonds_available = True for Ie in range(0, g_bond_pattern.GetNumEdges()): if not bonds_available_Ie[Ie]: bonds_available = False if atoms_available and bonds_available: # Explanation: # (Again) only if ALL of the atoms and bond requirements for # this type pattern are satisfied by at least SOME of the atoms # present in the this system, ...THEN print a status message. # (Because for complex all-atom force-fields, the number of # possible atom types, and typepatterns far exceeds the number # of atom types typically present in the system. Otherwise # hundreds of kB of irrelevant information can be printed.) sys.stderr.write(' checking ' + coefftype + ' type requirements:' #' (atom-types,bond-types) ' '\n ' + str(typepattern) + '\n') # ------------------ reporting progress (end) ------------------- for atombondtypes, abidslist in interactions_by_type.items(): # express atom & bond types in a tuple of the original string # format types_atoms = [atomtypes_int2str[Iv] for Iv in atombondtypes[0]] types_bonds = [bondtypes_int2str[Ie] for Ie in atombondtypes[1]] type_strings = types_atoms + types_bonds # use string comparisons to check for a match with typepattern if MatchesAll(type_strings, typepattern): # <-see "ttree_lex.py" for abids in abidslist: # Re-order the atoms (and bonds) in a "canonical" way. # Only add new interactions to the list after re-ordering # them and checking that they have not been added earlier. # (...well not when using the same coefftype at least. # This prevents the same triplet of atoms from # being used to calculate the bond-angle twice: # once for 1-2-3 and 3-2-1, for example.) abids = canonical_order(abids) redundant = False if abids in abids_to_coefftypes: coefftypes = abids_to_coefftypes[abids] if coefftype in coefftypes: redundant = True if check_undefined_atomids_str: atomids_int = tuple(abids[0]) atomids_matched[atomids_int] = True if not redundant: # (It's too bad python does not # have an Ordered defaultdict) if coefftype in coefftype_to_atomids: coefftype_to_atomids[coefftype].append(abids[0]) else: coefftype_to_atomids[coefftype] = [abids[0]] if abids in abids_to_coefftypes: abids_to_coefftypes[abids].append(coefftype) else: abids_to_coefftypes[abids] = [coefftype] count += 1 if report_progress: sys.stderr.write(' (found ' + str(count) + ' non-redundant matches)\n') if check_undefined_atomids_str: for atomids_int, found_match in atomids_matched.items(): if not found_match: atomids_str = [check_undefined_atomids_str[Iv] for Iv in atomids_int] raise InputError('Error: A bonded interaction should exist between atoms:\n' + ' ' + (',\n '.join(atomids_str)) + '\n' + ' ...however no interaction between these types of atoms has been defined\n' + ' This usually means that at least one of your atom TYPES is incorrect.\n' + ' If this is not the case, then you can override this error message by\n' + ' invoking moltemplate.sh without the \"-checkff\" argument.\n') return coefftype_to_atomids def GenInteractions_str(bond_pairs, g_bond_pattern, typepattern_to_coefftypes, canonical_order, # function to sort atoms and bonds atomids_str, atomtypes_str, bondids_str, bondtypes_str, report_progress=False, # print messages to sys.stderr? check_undefined=False): assert(len(atomids_str) == len(atomtypes_str)) assert(len(bondids_str) == len(bondtypes_str)) # The atomids and atomtypes and bondtypes are strings. # First we assign a unique integer id to each string. atomids_str2int = {} atomtypes_str2int = {} atomtypes_int2str = [] atomtype_int = 0 for i in range(0, len(atomids_str)): if atomids_str[i] in atomids_str2int: raise InputError('Error: multiple atoms have the same id (' + str(atomids_str[i]) + ')') atomids_str2int[atomids_str[i]] = i #atomtypes_int = len(atomtypes_int)+1 if (not (atomtypes_str[i] in atomtypes_str2int)): atomtypes_str2int[atomtypes_str[i]] = atomtype_int atomtypes_int2str.append(atomtypes_str[i]) atomtype_int += 1 # atomtypes_int.append(atomtype_int) bondids_str2int = {} bondtypes_str2int = {} bondtypes_int2str = [] bondtype_int = 0 for i in range(0, len(bondids_str)): if bondids_str[i] in bondids_str2int: raise InputError('Error: multiple bonds have the same id (' + str(bondids_str[i]) + ')') bondids_str2int[bondids_str[i]] = i #bondtype_int = len(bondtypes_int)+1 if (not (bondtypes_str[i] in bondtypes_str2int)): bondtypes_str2int[bondtypes_str[i]] = bondtype_int bondtypes_int2str.append(bondtypes_str[i]) bondtype_int += 1 # Now convert "bond_pairs" into the UGraph format G_system = Ugraph() for iv in range(0, len(atomtypes_str)): G_system.AddVertex(iv, atomtypes_str2int[atomtypes_str[iv]]) for ie in range(0, len(bond_pairs)): atomid1_str = bond_pairs[ie][0] atomid2_str = bond_pairs[ie][1] if (atomid1_str not in atomids_str2int): raise InputError('Error in Bonds Section:\n' ' ' + atomid1_str + ' is not defined in Atoms section\n') if (atomid2_str not in atomids_str2int): raise InputError('Error in Bonds Section:\n' ' ' + atomid2_str + ' is not defined in Atoms section\n') G_system.AddEdge(atomids_str2int[atomid1_str], atomids_str2int[atomid2_str], bondtypes_str2int[bondtypes_str[ie]]) coefftype_to_atomids_int = GenInteractions_int(G_system, g_bond_pattern, typepattern_to_coefftypes, canonical_order, atomtypes_int2str, bondtypes_int2str, report_progress, (atomids_str if check_undefined else None)) coefftype_to_atomids_str = OrderedDict() for coefftype, atomidss_int in coefftype_to_atomids_int.items(): if report_progress: sys.stderr.write(' processing coefftype: ' + str(coefftype) + '\n') for atomids_int in atomidss_int: if coefftype in coefftype_to_atomids_str: coefftype_to_atomids_str[coefftype].append( [atomids_str[iv] for iv in atomids_int]) else: coefftype_to_atomids_str[coefftype] = \ [[atomids_str[iv] for iv in atomids_int]] # gc.collect() return coefftype_to_atomids_str
gpl-2.0
596,104,645,854,326,400
47.83547
117
0.572916
false
sigma-random/avmplus
build/buildbot/master/custom/buildbot_ext/steps/shellAddons.py
8
16201
# -*- test-case-name: buildbot.test.test_steps,buildbot.test.test_properties -*- # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. from buildbot.steps.shell import ShellCommand from buildbot.steps.transfer import FileDownload from buildbot.status.builder import SUCCESS, FAILURE, SKIPPED, WARNINGS from buildbot.process.buildstep import LoggingBuildStep, RemoteShellCommand import buildbot.status.builder from twisted.python import log import re class BaseShellCommand(ShellCommand): messages=[] script_status="" def __init__(self, **kwargs): ShellCommand.__init__(self, **kwargs) self.messages=[] self.script_status="" def start(self): res="SUCCESS" for result in self.build.results: if result == WARNINGS and res == "SUCCESS": res = "WARNINGS" if result == FAILURE: res = "FAILURE" self.build.setProperty("status", res, "BaseShellCommand") ShellCommand.start(self) def createSummary(self, log): lines=log.readlines() for line in lines: if line.startswith('url:'): items=line.split() url=items[1] items=items[2:] desc='' for item in items: desc="%s %s" % (desc,item) self.addURL(desc, url) if line.startswith("message:"): message = line[len("message:"):].strip() self.messages.append(message) if line.startswith("buildbot_status:"): # valid values: [ SUCCESS | FAILURE | WARNINGS ] self.script_status = line[len("buildbot_status:"):].strip() def getText(self, cmd, results): text=ShellCommand.getText(self,cmd,results) for message in self.messages: text.append('%s' % message) return text def evaluateCommand(self,cmd): if cmd.rc != 0: return buildbot.status.builder.FAILURE if self.script_status != "": if self.script_status == "SUCCESS": return buildbot.status.builder.SUCCESS elif self.script_status == "FAILURE": return buildbot.status.builder.FAILURE elif self.script_status == "WARNINGS": return buildbot.status.builder.WARNINGS else: # This is an unknown status, FAIL the step so that it is investigated return buildbot.status.builder.FAILURE return buildbot.status.builder.SUCCESS class PerformanceShellCommand(ShellCommand): versions=[] numtests=0 def createSummary(self,log1): self.versions=[] self.numtests=0 lines=log1.readlines() for line in lines: if line.startswith('installing'): version=line.split()[1] self.versions.append(version) try: line.index('running test') self.numtests += 1 except: self.numtests+=0 self.versions.sort() def getText(self,cmd,results): text=ShellCommand.getText(self,cmd,results) for version in self.versions: text.append('tested player %s' % version) text.append(' ran %d tests' % self.numtests) return text class SizeReportShellCommand(BaseShellCommand): sizeBytes = '-' sizeKBytes = '-' sizeExtra = '-' def createSummary(self,log1): BaseShellCommand.createSummary(self,log1) lines=log1.readlines() for line in lines: if line.startswith('size_bytes'): self.sizeBytes=line.split()[1] if line.startswith('size_K')>0: self.sizeKBytes=line.split()[1] if line.startswith('size_extra')>0: self.sizeExtra=line.split()[1] def getText(self, cmd, results): text=BaseShellCommand.getText(self,cmd,results) sz="%.2f" % (float(self.sizeKBytes)-1+float(self.sizeExtra)/1024.0) text.append("%s K %d bytes" % (sz,int(self.sizeBytes))) return text class BuildShellCommand(BaseShellCommand): def createSummary(self,log1): BaseShellCommand.createSummary(self, log1) class PerfShellCommand(BaseShellCommand): perfchange = '-' def createSummary(self,log1): BaseShellCommand.createSummary(self, log1) lines=log1.readlines() for line in lines: if line.startswith('perfchange:'): items=line.split() try: self.perfchange=float(items[1][:-1]) except: self.perfchange="error" def getText(self, cmd, results): text=BaseShellCommand.getText(self,cmd,results) text.append("Performance change: %s%%\n" % self.perfchange) return text def evaluateCommand(self,cmd): if cmd.rc != 0: return buildbot.status.builder.FAILURE if self.perfchange > -3: return buildbot.status.builder.SUCCESS elif self.perfchange > -10: return buildbot.status.builder.WARNINGS else: return buildbot.status.builder.FAILURE class PerfNoColorShellCommand(BaseShellCommand): perfchange = '-' def createSummary(self,log1): BaseShellCommand.createSummary(self, log1) lines=log1.readlines() for line in lines: if line.startswith('perfchange:'): items=line.split() try: self.perfchange=float(items[1][:-1]) except: self.perfchange="error" def getText(self, cmd, results): text=BaseShellCommand.getText(self,cmd,results) text.append("Performance change: %s%%\n" % self.perfchange) return text class TestSuiteShellCommand(BaseShellCommand): passes = 0 fails = 0 unexpectedpasses = 0 expectedfails = 0 skipped = 0 asserts = 0 def createSummary(self,log1): BaseShellCommand.createSummary(self,log1) lines=log1.readlines() for line in lines: if line.find('total passes')>-1: fields=line.split() self.passes=int(fields[4]) self.unexpectedpasses=int(fields[7]) if line.find('total failures')>-1: fields=line.split() self.fails=int(fields[4]) self.expectedfails=int(fields[7]) if line.startswith('unexpected'): fields=line.split() try: self.unexpectedpasses=int(fields[3]) except: print("Error parsing unexpected passes") if line.startswith('expected'): fields=line.split() try: self.expectedfails=int(fields[3]) except: print("Error parsing expected failures") if line.startswith('failures'): fields=line.split() self.fails=int(fields[2]) if line.startswith('passes'): fields=line.split() self.passes=int(fields[2]) if line.startswith('tests skipped'): fields=line.split() self.skipped=int(fields[3]) if line.startswith('assertions'): fields=line.split() self.asserts=int(fields[2]) def evaluateCommand(self,cmd): if cmd.rc != 0: return buildbot.status.builder.FAILURE if self.fails>0 or self.unexpectedpasses>0: return buildbot.status.builder.FAILURE if self.passes==0 and self.fails==0 and self.unexpectedpasses==0: return buildbot.status.builder.FAILURE # Before we say it was a success, check to see if there were assertions. # This will only get checked if the above have already passed, this is # the last check prior to passing the step. if self.asserts>0: # Treat assertions as a warning return buildbot.status.builder.FAILURE else: return buildbot.status.builder.SUCCESS def getText(self, cmd, results): text=BaseShellCommand.getText(self,cmd,results) text.append("test results") text.append("passes:%d </br>" % self.passes) text.append("failures:%d</br>" % self.fails) text.append("skipped:%d</br>" % self.skipped) text.append("unexp pass:%d</br>" % self.unexpectedpasses) text.append("exp fails:%d</br>" % self.expectedfails) text.append("assertions:%d</br>" % self.asserts) return text class BuildShellCheckCommand(BaseShellCommand): # Use this if you wish to stop the build entirely on failure haltOnFailure = True def createSummary(self,log1): BaseShellCommand.createSummary(self, log1) def parseSendchangeArguments(args): """This function parses the arguments that the Buildbot patch uploader sends to Buildbot via the "changed files". It takes an argument of a list of files and returns a dictionary with key/value pairs """ parsedArgs = {} for arg in args: try: (key, value) = arg.split(":", 1) value = value.lstrip().rstrip() parsedArgs[key] = value except: pass return parsedArgs class BuildRequestDownload(FileDownload): """This step reads a Change for a filename and downloads it to the slave. """ haltOnFailure = True def __init__(self, isOptional=False, patchDir=".", **kwargs): """arguments: @type patchDir: string @param patchDir: The directory on the master that holds the patches This directory is relative to the base buildmaster directory. ie. /home/buildmaster/project Defaults to '.' 'workdir' is assumed to be 'build' and should be passed if it is anything else. 'isOptional' is assumed to be False; if the patch is optional, pass True. """ self.patchDir = patchDir self.isOptional = isOptional # mastersrc and slavedest get overridden in start() if not 'workdir' in kwargs: kwargs['workdir'] = "build" FileDownload.__init__(self, mastersrc=".", slavedest=".", **kwargs) def start(self): changes = self.step_status.build.getChanges() if len(changes) < 1: return SKIPPED args = parseSendchangeArguments(changes[0].files) if not 'infoFile' in args and self.isOptional: return SKIPPED self.mastersrc = "%s/%s" % (self.patchDir, args['infoFile']) self.slavedest = "%s" % (args['infoFile']) # now that everything is set-up, download the file FileDownload.start(self) class ShellCommandToken(BaseShellCommand): commandOriginal = [] def __init__(self, isOptional=False, patchDir=".", **kwargs): """arguments: @type patchDir: string @param patchDir: The directory on the master that holds the patches This directory is relative to the base buildmaster directory. ie. /home/buildslave/project Defaults to '.' 'workdir' is assumed to be 'build' and should be passed if it is anything else. 'isOptional' is assumed to be False; if the patch is optional, pass True. """ self.patchDir = patchDir self.isOptional = isOptional self.commandOriginal = [] for item in kwargs['command']: self.commandOriginal.append(item) if not 'workdir' in kwargs: kwargs['workdir'] = "build" BaseShellCommand.__init__(self, **kwargs) def start(self): changes = self.step_status.build.getChanges() if len(changes) < 1: return SKIPPED args = parseSendchangeArguments(changes[0].files) if not 'infoFile' in args and self.isOptional: return SKIPPED #log.msg("command CLEAN [BEFORE]: %s" % self.command) #log.msg("commandOriginal: %s" % self.commandOriginal) self.command = [] for item in self.commandOriginal: self.command.append(item) #log.msg("command CLEAN [AFTER]: %s" % self.command) #log.msg("command [BEFORE]: %s" % self.command) f = open(self.patchDir +"/" + args['infoFile']) for line in f.readlines(): if line.startswith("repoPath"): repoPath = line.split()[1] if line.startswith("revision"): revision = line.split()[1] if line.startswith("branch:"): branch = line.split()[1] builderName = self.step_status.build.getBuilder().getName() log.msg("ShellCommandToken -> repoPath: %s" % repoPath) log.msg("ShellCommandToken -> revision: %s" % revision) log.msg("ShellCommandToken -> branch: %s" % branch) log.msg("ShellCommandToken -> builderName: %s" % builderName) for index, item in enumerate(self.command): self.command[index] = self.command[index].replace("$repoPath$", repoPath) self.command[index] = self.command[index].replace("$revision$", revision) self.command[index] = self.command[index].replace("$branch$", branch) self.command[index] = self.command[index].replace("$builderName$", builderName) #log.msg("command [AFTER]: %s" % self.command) #log.msg("workdir [BEFORE]: %s" % self.remote_kwargs['workdir']) self.remote_kwargs['workdir'] = self.remote_kwargs['workdir'].replace("$branch$", branch) #log.msg("workdir [AFTER]: %s" % self.remote_kwargs['workdir']) BaseShellCommand.start(self) class SandboxClone(BaseShellCommand): changeDir = "" dest = "" def __init__(self, dest=".", changeDir=".", **kwargs): """arguments: @type changeDir: string @param changeDir: The directory on the master that holds the processed change requests. This directory is relative to the base buildmaster directory. Defaults to 'changes/processed' """ self.changeDir = changeDir self.dest = dest BaseShellCommand.__init__(self, **kwargs) # need to explicitly tell add our custom arguments to the factory self.addFactoryArguments(changeDir=changeDir, dest=dest) def start(self): changes = self.step_status.build.getChanges() # I think that was only here as a safety check since it used to only # be used for sandbox builds which were supposed to only have a single change #if len(changes) < 1: # return SKIPPED # The list of files changed for this build also contains an additional # entry the is the name of the build trigger file, we need to find that # file so that we can pass the build information along the build process for changefile in changes[0].files: if changefile.startswith("change-"): f = open(self.changeDir +"/" + changefile) for line in f.readlines(): if line.startswith("url:"): hg_url = line[line.find(":")+1:].strip() break self.command = [] self.command.append("hg") self.command.append("clone") self.command.append(hg_url) self.command.append(self.dest) break BaseShellCommand.start(self)
mpl-2.0
-1,644,274,203,162,559,000
34.922395
97
0.575397
false
SUSE-Cloud/nova
nova/tests/api/openstack/compute/plugins/v3/test_server_diagnostics.py
8
3322
# Copyright 2011 Eldar Nugaev # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from lxml import etree from nova.api.openstack import compute from nova.api.openstack.compute.plugins.v3 import server_diagnostics from nova.api.openstack import wsgi from nova.compute import api as compute_api from nova import exception from nova.openstack.common import jsonutils from nova import test from nova.tests.api.openstack import fakes UUID = 'abc' def fake_get_diagnostics(self, _context, instance_uuid): return {'data': 'Some diagnostic info'} def fake_instance_get(self, _context, instance_uuid): if instance_uuid != UUID: raise Exception("Invalid UUID") return {'uuid': instance_uuid} def fake_instance_get_instance_not_found(self, _context, instance_uuid): raise exception.InstanceNotFound(instance_id=instance_uuid) class ServerDiagnosticsTest(test.NoDBTestCase): def setUp(self): super(ServerDiagnosticsTest, self).setUp() self.stubs.Set(compute_api.API, 'get_diagnostics', fake_get_diagnostics) self.stubs.Set(compute_api.API, 'get', fake_instance_get) self.router = compute.APIRouterV3(init_only=('servers', 'os-server-diagnostics')) def test_get_diagnostics(self): req = fakes.HTTPRequestV3.blank( '/servers/%s/os-server-diagnostics' % UUID) res = req.get_response(self.router) output = jsonutils.loads(res.body) self.assertEqual(output, {'data': 'Some diagnostic info'}) def test_get_diagnostics_with_non_existed_instance(self): req = fakes.HTTPRequestV3.blank( '/servers/%s/os-server-diagnostics' % UUID) self.stubs.Set(compute_api.API, 'get', fake_instance_get_instance_not_found) res = req.get_response(self.router) self.assertEqual(res.status_int, 404) class TestServerDiagnosticsXMLSerializer(test.NoDBTestCase): namespace = wsgi.XMLNS_V11 def _tag(self, elem): tagname = elem.tag self.assertEqual(tagname[0], '{') tmp = tagname.partition('}') namespace = tmp[0][1:] self.assertEqual(namespace, self.namespace) return tmp[2] def test_index_serializer(self): serializer = server_diagnostics.ServerDiagnosticsTemplate() exemplar = dict(diag1='foo', diag2='bar') text = serializer.serialize(exemplar) tree = etree.fromstring(text) self.assertEqual('diagnostics', self._tag(tree)) self.assertEqual(len(tree), len(exemplar)) for child in tree: tag = self._tag(child) self.assertTrue(tag in exemplar) self.assertEqual(child.text, exemplar[tag])
apache-2.0
-7,391,060,641,811,129,000
33.968421
78
0.668573
false
grengojbo/st2
st2client/st2client/utils/terminal.py
10
2030
# Licensed to the StackStorm, Inc ('StackStorm') under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import struct import subprocess __all__ = [ 'get_terminal_size' ] def get_terminal_size(default=(80, 20)): """ :return: (lines, cols) """ def ioctl_GWINSZ(fd): import fcntl import termios return struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234')) # try stdin, stdout, stderr for fd in (0, 1, 2): try: return ioctl_GWINSZ(fd) except: pass # try os.ctermid() try: fd = os.open(os.ctermid(), os.O_RDONLY) try: return ioctl_GWINSZ(fd) finally: os.close(fd) except: pass # try `stty size` try: process = subprocess.Popen(['stty', 'size'], shell=False, stdout=subprocess.PIPE, stderr=open(os.devnull, 'w')) result = process.communicate() if process.returncode == 0: return tuple(int(x) for x in result[0].split()) except: pass # try environment variables try: return tuple(int(os.getenv(var)) for var in ('LINES', 'COLUMNS')) except: pass # return default. return default
apache-2.0
314,517,585,102,528,500
30.230769
79
0.607389
false
merll/docker-fabric
dockerfabric/socat.py
1
2300
# -*- coding: utf-8 -*- from __future__ import unicode_literals from fabric.state import env from fabric.utils import puts from .base import ConnectionDict, get_local_port from .tunnel import LocalTunnel class SocketTunnels(ConnectionDict): """ Cache for **socat** tunnels to the remote machine. Instantiation of :class:`SocketTunnel` can be configured with ``env.socat_quiet``, setting the ``quiet`` keyword argument. """ def __getitem__(self, item): """ :param item: Tuple of remote socket name, remote port, and local port number. :type item: tuple :return: Socket tunnel :rtype: SocketTunnel """ def _connect_socket_tunnel(): local_port = get_local_port(init_local_port) svc = SocketTunnel(remote_socket, local_port, env.get('socat_quiet', True)) svc.connect() return svc remote_socket, init_local_port = item key = env.host_string, remote_socket return self.get_or_create_connection(key, _connect_socket_tunnel) socat_tunnels = SocketTunnels() class SocketTunnel(LocalTunnel): """ Establish a tunnel from the local machine to the SSH host and from there start a **socat** process for forwarding traffic between the remote-end `stdout` and a Unix socket. :param remote_socket: Unix socket to connect to on the remote machine. :type remote_socket: unicode :param local_port: Local TCP port to use for the tunnel. :type local_port: int :param quiet: If set to ``False``, the **socat** command line on the SSH channel will be written to `stdout`. :type quiet: bool """ def __init__(self, remote_socket, local_port, quiet=True): dest = 'STDIO' src = 'UNIX-CONNECT:{0}'.format(remote_socket) self.quiet = quiet self._socat_cmd = ' '.join(('socat', dest, src)) super(SocketTunnel, self).__init__(local_port) def get_channel(self, transport, remote_addr, local_peer): channel = transport.open_channel('session') if channel is None: raise Exception("Failed to open channel on the SSH server.") if not self.quiet: puts(self._socat_cmd) channel.exec_command(self._socat_cmd) return channel
mit
-4,617,141,349,413,265,000
34.384615
117
0.642609
false
BartVB/ansible-modules-core
cloud/openstack/_quantum_subnet.py
129
10130
#!/usr/bin/python #coding: utf-8 -*- # (c) 2013, Benno Joy <[email protected]> # # This module is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this software. If not, see <http://www.gnu.org/licenses/>. try: try: from neutronclient.neutron import client except ImportError: from quantumclient.quantum import client from keystoneclient.v2_0 import client as ksclient HAVE_DEPS = True except ImportError: HAVE_DEPS = False DOCUMENTATION = ''' --- module: quantum_subnet deprecated: Deprecated in 2.0. Use os_subnet instead version_added: "1.2" short_description: Add/remove subnet from a network description: - Add/remove subnet from a network options: login_username: description: - login username to authenticate to keystone required: true default: admin login_password: description: - Password of login user required: true default: True login_tenant_name: description: - The tenant name of the login user required: true default: True auth_url: description: - The keystone URL for authentication required: false default: 'http://127.0.0.1:35357/v2.0/' region_name: description: - Name of the region required: false default: None state: description: - Indicate desired state of the resource choices: ['present', 'absent'] default: present network_name: description: - Name of the network to which the subnet should be attached required: true default: None name: description: - The name of the subnet that should be created required: true default: None cidr: description: - The CIDR representation of the subnet that should be assigned to the subnet required: true default: None tenant_name: description: - The name of the tenant for whom the subnet should be created required: false default: None ip_version: description: - The IP version of the subnet 4 or 6 required: false default: 4 enable_dhcp: description: - Whether DHCP should be enabled for this subnet. required: false default: true gateway_ip: description: - The ip that would be assigned to the gateway for this subnet required: false default: None dns_nameservers: description: - DNS nameservers for this subnet, comma-separated required: false default: None version_added: "1.4" allocation_pool_start: description: - From the subnet pool the starting address from which the IP should be allocated required: false default: None allocation_pool_end: description: - From the subnet pool the last IP that should be assigned to the virtual machines required: false default: None requirements: - "python >= 2.6" - "python-neutronclient or python-quantumclient" - "python-keystoneclient" ''' EXAMPLES = ''' # Create a subnet for a tenant with the specified subnet - quantum_subnet: state=present login_username=admin login_password=admin login_tenant_name=admin tenant_name=tenant1 network_name=network1 name=net1subnet cidr=192.168.0.0/24" ''' _os_keystone = None _os_tenant_id = None _os_network_id = None def _get_ksclient(module, kwargs): try: kclient = ksclient.Client(username=kwargs.get('login_username'), password=kwargs.get('login_password'), tenant_name=kwargs.get('login_tenant_name'), auth_url=kwargs.get('auth_url')) except Exception, e: module.fail_json(msg = "Error authenticating to the keystone: %s" %e.message) global _os_keystone _os_keystone = kclient return kclient def _get_endpoint(module, ksclient): try: endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL') except Exception, e: module.fail_json(msg = "Error getting network endpoint: %s" % e.message) return endpoint def _get_neutron_client(module, kwargs): _ksclient = _get_ksclient(module, kwargs) token = _ksclient.auth_token endpoint = _get_endpoint(module, _ksclient) kwargs = { 'token': token, 'endpoint_url': endpoint } try: neutron = client.Client('2.0', **kwargs) except Exception, e: module.fail_json(msg = " Error in connecting to neutron: %s" % e.message) return neutron def _set_tenant_id(module): global _os_tenant_id if not module.params['tenant_name']: tenant_name = module.params['login_tenant_name'] else: tenant_name = module.params['tenant_name'] for tenant in _os_keystone.tenants.list(): if tenant.name == tenant_name: _os_tenant_id = tenant.id break if not _os_tenant_id: module.fail_json(msg = "The tenant id cannot be found, please check the parameters") def _get_net_id(neutron, module): kwargs = { 'tenant_id': _os_tenant_id, 'name': module.params['network_name'], } try: networks = neutron.list_networks(**kwargs) except Exception, e: module.fail_json("Error in listing neutron networks: %s" % e.message) if not networks['networks']: return None return networks['networks'][0]['id'] def _get_subnet_id(module, neutron): global _os_network_id subnet_id = None _os_network_id = _get_net_id(neutron, module) if not _os_network_id: module.fail_json(msg = "network id of network not found.") else: kwargs = { 'tenant_id': _os_tenant_id, 'name': module.params['name'], } try: subnets = neutron.list_subnets(**kwargs) except Exception, e: module.fail_json( msg = " Error in getting the subnet list:%s " % e.message) if not subnets['subnets']: return None return subnets['subnets'][0]['id'] def _create_subnet(module, neutron): neutron.format = 'json' subnet = { 'name': module.params['name'], 'ip_version': module.params['ip_version'], 'enable_dhcp': module.params['enable_dhcp'], 'tenant_id': _os_tenant_id, 'gateway_ip': module.params['gateway_ip'], 'dns_nameservers': module.params['dns_nameservers'], 'network_id': _os_network_id, 'cidr': module.params['cidr'], } if module.params['allocation_pool_start'] and module.params['allocation_pool_end']: allocation_pools = [ { 'start' : module.params['allocation_pool_start'], 'end' : module.params['allocation_pool_end'] } ] subnet.update({'allocation_pools': allocation_pools}) if not module.params['gateway_ip']: subnet.pop('gateway_ip') if module.params['dns_nameservers']: subnet['dns_nameservers'] = module.params['dns_nameservers'].split(',') else: subnet.pop('dns_nameservers') try: new_subnet = neutron.create_subnet(dict(subnet=subnet)) except Exception, e: module.fail_json(msg = "Failure in creating subnet: %s" % e.message) return new_subnet['subnet']['id'] def _delete_subnet(module, neutron, subnet_id): try: neutron.delete_subnet(subnet_id) except Exception, e: module.fail_json( msg = "Error in deleting subnet: %s" % e.message) return True def main(): argument_spec = openstack_argument_spec() argument_spec.update(dict( name = dict(required=True), network_name = dict(required=True), cidr = dict(required=True), tenant_name = dict(default=None), state = dict(default='present', choices=['absent', 'present']), ip_version = dict(default='4', choices=['4', '6']), enable_dhcp = dict(default='true', type='bool'), gateway_ip = dict(default=None), dns_nameservers = dict(default=None), allocation_pool_start = dict(default=None), allocation_pool_end = dict(default=None), )) module = AnsibleModule(argument_spec=argument_spec) if not HAVE_DEPS: module.fail_json(msg='python-keystoneclient and either python-neutronclient or python-quantumclient are required') neutron = _get_neutron_client(module, module.params) _set_tenant_id(module) if module.params['state'] == 'present': subnet_id = _get_subnet_id(module, neutron) if not subnet_id: subnet_id = _create_subnet(module, neutron) module.exit_json(changed = True, result = "Created" , id = subnet_id) else: module.exit_json(changed = False, result = "success" , id = subnet_id) else: subnet_id = _get_subnet_id(module, neutron) if not subnet_id: module.exit_json(changed = False, result = "success") else: _delete_subnet(module, neutron, subnet_id) module.exit_json(changed = True, result = "deleted") # this is magic, see lib/ansible/module.params['common.py from ansible.module_utils.basic import * from ansible.module_utils.openstack import * if __name__ == '__main__': main()
gpl-3.0
-4,502,352,674,164,940,000
32.766667
122
0.611945
false
tomchristie/django
tests/shortcuts/tests.py
109
1737
from django.test import SimpleTestCase, override_settings from django.test.utils import require_jinja2 @override_settings(ROOT_URLCONF='shortcuts.urls') class RenderTests(SimpleTestCase): def test_render(self): response = self.client.get('/render/') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'FOO.BAR../render/\n') self.assertEqual(response['Content-Type'], 'text/html; charset=utf-8') self.assertFalse(hasattr(response.context.request, 'current_app')) def test_render_with_multiple_templates(self): response = self.client.get('/render/multiple_templates/') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'FOO.BAR../render/multiple_templates/\n') def test_render_with_content_type(self): response = self.client.get('/render/content_type/') self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b'FOO.BAR../render/content_type/\n') self.assertEqual(response['Content-Type'], 'application/x-rendertest') def test_render_with_status(self): response = self.client.get('/render/status/') self.assertEqual(response.status_code, 403) self.assertEqual(response.content, b'FOO.BAR../render/status/\n') @require_jinja2 def test_render_with_using(self): response = self.client.get('/render/using/') self.assertEqual(response.content, b'DTL\n') response = self.client.get('/render/using/?using=django') self.assertEqual(response.content, b'DTL\n') response = self.client.get('/render/using/?using=jinja2') self.assertEqual(response.content, b'Jinja2\n')
bsd-3-clause
3,974,838,308,224,309,000
44.710526
85
0.688543
false
aurofable/medhack-server
venv/lib/python2.7/site-packages/sqlalchemy/orm/interfaces.py
17
26335
# orm/interfaces.py # Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ Contains various base classes used throughout the ORM. Defines the now deprecated ORM extension classes as well as ORM internals. Other than the deprecated extensions, this module and the classes within should be considered mostly private. """ from itertools import chain from sqlalchemy import exc as sa_exc from sqlalchemy import util from sqlalchemy.sql import operators deque = __import__('collections').deque mapperutil = util.importlater('sqlalchemy.orm', 'util') collections = None __all__ = ( 'AttributeExtension', 'EXT_CONTINUE', 'EXT_STOP', 'ExtensionOption', 'InstrumentationManager', 'LoaderStrategy', 'MapperExtension', 'MapperOption', 'MapperProperty', 'PropComparator', 'PropertyOption', 'SessionExtension', 'StrategizedOption', 'StrategizedProperty', 'build_path', ) EXT_CONTINUE = util.symbol('EXT_CONTINUE') EXT_STOP = util.symbol('EXT_STOP') ONETOMANY = util.symbol('ONETOMANY') MANYTOONE = util.symbol('MANYTOONE') MANYTOMANY = util.symbol('MANYTOMANY') from deprecated_interfaces import AttributeExtension, SessionExtension, \ MapperExtension class MapperProperty(object): """Manage the relationship of a ``Mapper`` to a single class attribute, as well as that attribute as it appears on individual instances of the class, including attribute instrumentation, attribute access, loading behavior, and dependency calculations. The most common occurrences of :class:`.MapperProperty` are the mapped :class:`.Column`, which is represented in a mapping as an instance of :class:`.ColumnProperty`, and a reference to another class produced by :func:`.relationship`, represented in the mapping as an instance of :class:`.RelationshipProperty`. """ cascade = () """The set of 'cascade' attribute names. This collection is checked before the 'cascade_iterator' method is called. """ def setup(self, context, entity, path, reduced_path, adapter, **kwargs): """Called by Query for the purposes of constructing a SQL statement. Each MapperProperty associated with the target mapper processes the statement referenced by the query context, adding columns and/or criterion as appropriate. """ pass def create_row_processor(self, context, path, reduced_path, mapper, row, adapter): """Return a 3-tuple consisting of three row processing functions. """ return None, None, None def cascade_iterator(self, type_, state, visited_instances=None, halt_on=None): """Iterate through instances related to the given instance for a particular 'cascade', starting with this MapperProperty. Return an iterator3-tuples (instance, mapper, state). Note that the 'cascade' collection on this MapperProperty is checked first for the given type before cascade_iterator is called. See PropertyLoader for the related instance implementation. """ return iter(()) def set_parent(self, parent, init): self.parent = parent def instrument_class(self, mapper): raise NotImplementedError() _compile_started = False _compile_finished = False def init(self): """Called after all mappers are created to assemble relationships between mappers and perform other post-mapper-creation initialization steps. """ self._compile_started = True self.do_init() self._compile_finished = True @property def class_attribute(self): """Return the class-bound descriptor corresponding to this MapperProperty.""" return getattr(self.parent.class_, self.key) def do_init(self): """Perform subclass-specific initialization post-mapper-creation steps. This is a template method called by the ``MapperProperty`` object's init() method. """ pass def post_instrument_class(self, mapper): """Perform instrumentation adjustments that need to occur after init() has completed. """ pass def per_property_preprocessors(self, uow): pass def is_primary(self): """Return True if this ``MapperProperty``'s mapper is the primary mapper for its class. This flag is used to indicate that the ``MapperProperty`` can define attribute instrumentation for the class at the class level (as opposed to the individual instance level). """ return not self.parent.non_primary def merge(self, session, source_state, source_dict, dest_state, dest_dict, load, _recursive): """Merge the attribute represented by this ``MapperProperty`` from source to destination object""" pass def compare(self, operator, value, **kw): """Return a compare operation for the columns represented by this ``MapperProperty`` to the given value, which may be a column value or an instance. 'operator' is an operator from the operators module, or from sql.Comparator. By default uses the PropComparator attached to this MapperProperty under the attribute name "comparator". """ return operator(self.comparator, value) class PropComparator(operators.ColumnOperators): """Defines comparison operations for MapperProperty objects. User-defined subclasses of :class:`.PropComparator` may be created. The built-in Python comparison and math operator methods, such as ``__eq__()``, ``__lt__()``, ``__add__()``, can be overridden to provide new operator behavior. The custom :class:`.PropComparator` is passed to the mapper property via the ``comparator_factory`` argument. In each case, the appropriate subclass of :class:`.PropComparator` should be used:: from sqlalchemy.orm.properties import \\ ColumnProperty,\\ CompositeProperty,\\ RelationshipProperty class MyColumnComparator(ColumnProperty.Comparator): pass class MyCompositeComparator(CompositeProperty.Comparator): pass class MyRelationshipComparator(RelationshipProperty.Comparator): pass """ def __init__(self, prop, mapper, adapter=None): self.prop = self.property = prop self.mapper = mapper self.adapter = adapter def __clause_element__(self): raise NotImplementedError("%r" % self) def adapted(self, adapter): """Return a copy of this PropComparator which will use the given adaption function on the local side of generated expressions. """ return self.__class__(self.prop, self.mapper, adapter) @staticmethod def any_op(a, b, **kwargs): return a.any(b, **kwargs) @staticmethod def has_op(a, b, **kwargs): return a.has(b, **kwargs) @staticmethod def of_type_op(a, class_): return a.of_type(class_) def of_type(self, class_): """Redefine this object in terms of a polymorphic subclass. Returns a new PropComparator from which further criterion can be evaluated. e.g.:: query.join(Company.employees.of_type(Engineer)).\\ filter(Engineer.name=='foo') :param \class_: a class or mapper indicating that criterion will be against this specific subclass. """ return self.operate(PropComparator.of_type_op, class_) def any(self, criterion=None, **kwargs): """Return true if this collection contains any member that meets the given criterion. The usual implementation of ``any()`` is :meth:`.RelationshipProperty.Comparator.any`. :param criterion: an optional ClauseElement formulated against the member class' table or attributes. :param \**kwargs: key/value pairs corresponding to member class attribute names which will be compared via equality to the corresponding values. """ return self.operate(PropComparator.any_op, criterion, **kwargs) def has(self, criterion=None, **kwargs): """Return true if this element references a member which meets the given criterion. The usual implementation of ``has()`` is :meth:`.RelationshipProperty.Comparator.has`. :param criterion: an optional ClauseElement formulated against the member class' table or attributes. :param \**kwargs: key/value pairs corresponding to member class attribute names which will be compared via equality to the corresponding values. """ return self.operate(PropComparator.has_op, criterion, **kwargs) class StrategizedProperty(MapperProperty): """A MapperProperty which uses selectable strategies to affect loading behavior. There is a single strategy selected by default. Alternate strategies can be selected at Query time through the usage of ``StrategizedOption`` objects via the Query.options() method. """ strategy_wildcard_key = None def _get_context_strategy(self, context, reduced_path): key = ('loaderstrategy', reduced_path) cls = None if key in context.attributes: cls = context.attributes[key] elif self.strategy_wildcard_key: key = ('loaderstrategy', (self.strategy_wildcard_key,)) if key in context.attributes: cls = context.attributes[key] if cls: try: return self._strategies[cls] except KeyError: return self.__init_strategy(cls) return self.strategy def _get_strategy(self, cls): try: return self._strategies[cls] except KeyError: return self.__init_strategy(cls) def __init_strategy(self, cls): self._strategies[cls] = strategy = cls(self) return strategy def setup(self, context, entity, path, reduced_path, adapter, **kwargs): self._get_context_strategy(context, reduced_path + (self.key,)).\ setup_query(context, entity, path, reduced_path, adapter, **kwargs) def create_row_processor(self, context, path, reduced_path, mapper, row, adapter): return self._get_context_strategy(context, reduced_path + (self.key,)).\ create_row_processor(context, path, reduced_path, mapper, row, adapter) def do_init(self): self._strategies = {} self.strategy = self.__init_strategy(self.strategy_class) def post_instrument_class(self, mapper): if self.is_primary() and \ not mapper.class_manager._attr_has_impl(self.key): self.strategy.init_class_attribute(mapper) def build_path(entity, key, prev=None): if prev: return prev + (entity, key) else: return (entity, key) def serialize_path(path): if path is None: return None return zip( [m.class_ for m in [path[i] for i in range(0, len(path), 2)]], [path[i] for i in range(1, len(path), 2)] + [None] ) def deserialize_path(path): if path is None: return None p = tuple(chain(*[(mapperutil.class_mapper(cls), key) for cls, key in path])) if p and p[-1] is None: p = p[0:-1] return p class MapperOption(object): """Describe a modification to a Query.""" propagate_to_loaders = False """if True, indicate this option should be carried along Query object generated by scalar or object lazy loaders. """ def process_query(self, query): pass def process_query_conditionally(self, query): """same as process_query(), except that this option may not apply to the given query. Used when secondary loaders resend existing options to a new Query.""" self.process_query(query) class PropertyOption(MapperOption): """A MapperOption that is applied to a property off the mapper or one of its child mappers, identified by a dot-separated key or list of class-bound attributes. """ def __init__(self, key, mapper=None): self.key = key self.mapper = mapper def process_query(self, query): self._process(query, True) def process_query_conditionally(self, query): self._process(query, False) def _process(self, query, raiseerr): paths, mappers = self._get_paths(query, raiseerr) if paths: self.process_query_property(query, paths, mappers) def process_query_property(self, query, paths, mappers): pass def __getstate__(self): d = self.__dict__.copy() d['key'] = ret = [] for token in util.to_list(self.key): if isinstance(token, PropComparator): ret.append((token.mapper.class_, token.key)) else: ret.append(token) return d def __setstate__(self, state): ret = [] for key in state['key']: if isinstance(key, tuple): cls, propkey = key ret.append(getattr(cls, propkey)) else: ret.append(key) state['key'] = tuple(ret) self.__dict__ = state def _find_entity_prop_comparator(self, query, token, mapper, raiseerr): if mapperutil._is_aliased_class(mapper): searchfor = mapper isa = False else: searchfor = mapperutil._class_to_mapper(mapper) isa = True for ent in query._mapper_entities: if searchfor is ent.path_entity or isa \ and searchfor.common_parent(ent.path_entity): return ent else: if raiseerr: if not list(query._mapper_entities): raise sa_exc.ArgumentError( "Query has only expression-based entities - " "can't find property named '%s'." % (token, ) ) else: raise sa_exc.ArgumentError( "Can't find property '%s' on any entity " "specified in this Query. Note the full path " "from root (%s) to target entity must be specified." % (token, ",".join(str(x) for x in query._mapper_entities)) ) else: return None def _find_entity_basestring(self, query, token, raiseerr): for ent in query._mapper_entities: # return only the first _MapperEntity when searching # based on string prop name. Ideally object # attributes are used to specify more exactly. return ent else: if raiseerr: raise sa_exc.ArgumentError( "Query has only expression-based entities - " "can't find property named '%s'." % (token, ) ) else: return None def _get_paths(self, query, raiseerr): path = None entity = None l = [] mappers = [] # _current_path implies we're in a # secondary load with an existing path current_path = list(query._current_path) tokens = deque(self.key) while tokens: token = tokens.popleft() if isinstance(token, basestring): # wildcard token if token.endswith(':*'): return [(token,)], [] sub_tokens = token.split(".", 1) token = sub_tokens[0] tokens.extendleft(sub_tokens[1:]) # exhaust current_path before # matching tokens to entities if current_path: if current_path[1] == token: current_path = current_path[2:] continue else: return [], [] if not entity: entity = self._find_entity_basestring( query, token, raiseerr) if entity is None: return [], [] path_element = entity.path_entity mapper = entity.mapper mappers.append(mapper) if hasattr(mapper.class_, token): prop = getattr(mapper.class_, token).property else: if raiseerr: raise sa_exc.ArgumentError( "Can't find property named '%s' on the " "mapped entity %s in this Query. " % ( token, mapper) ) else: return [], [] elif isinstance(token, PropComparator): prop = token.property # exhaust current_path before # matching tokens to entities if current_path: if current_path[0:2] == \ [token.parententity, prop.key]: current_path = current_path[2:] continue else: return [], [] if not entity: entity = self._find_entity_prop_comparator( query, prop.key, token.parententity, raiseerr) if not entity: return [], [] path_element = entity.path_entity mapper = entity.mapper mappers.append(prop.parent) else: raise sa_exc.ArgumentError( "mapper option expects " "string key or list of attributes") assert prop is not None if raiseerr and not prop.parent.common_parent(mapper): raise sa_exc.ArgumentError("Attribute '%s' does not " "link from element '%s'" % (token, path_element)) path = build_path(path_element, prop.key, path) l.append(path) if getattr(token, '_of_type', None): path_element = mapper = token._of_type else: path_element = mapper = getattr(prop, 'mapper', None) if mapper is None and tokens: raise sa_exc.ArgumentError( "Attribute '%s' of entity '%s' does not " "refer to a mapped entity" % (token, entity) ) if current_path: # ran out of tokens before # current_path was exhausted. assert not tokens return [], [] return l, mappers class StrategizedOption(PropertyOption): """A MapperOption that affects which LoaderStrategy will be used for an operation by a StrategizedProperty. """ chained = False def process_query_property(self, query, paths, mappers): # _get_context_strategy may receive the path in terms of a base # mapper - e.g. options(eagerload_all(Company.employees, # Engineer.machines)) in the polymorphic tests leads to # "(Person, 'machines')" in the path due to the mechanics of how # the eager strategy builds up the path if self.chained: for path in paths: query._attributes[('loaderstrategy', _reduce_path(path))] = \ self.get_strategy_class() else: query._attributes[('loaderstrategy', _reduce_path(paths[-1]))] = \ self.get_strategy_class() def get_strategy_class(self): raise NotImplementedError() def _reduce_path(path): """Convert a (mapper, path) path to use base mappers. This is used to allow more open ended selection of loader strategies, i.e. Mapper -> prop1 -> Subclass -> prop2, where Subclass is a sub-mapper of the mapper referenced by Mapper.prop1. """ return tuple([i % 2 != 0 and element or getattr(element, 'base_mapper', element) for i, element in enumerate(path)]) class LoaderStrategy(object): """Describe the loading behavior of a StrategizedProperty object. The ``LoaderStrategy`` interacts with the querying process in three ways: * it controls the configuration of the ``InstrumentedAttribute`` placed on a class to handle the behavior of the attribute. this may involve setting up class-level callable functions to fire off a select operation when the attribute is first accessed (i.e. a lazy load) * it processes the ``QueryContext`` at statement construction time, where it can modify the SQL statement that is being produced. Simple column attributes may add their represented column to the list of selected columns, *eager loading* properties may add ``LEFT OUTER JOIN`` clauses to the statement. * It produces "row processor" functions at result fetching time. These "row processor" functions populate a particular attribute on a particular mapped instance. """ def __init__(self, parent): self.parent_property = parent self.is_class_level = False self.parent = self.parent_property.parent self.key = self.parent_property.key # TODO: there's no particular reason we need # the separate .init() method at this point. # It's possible someone has written their # own LS object. self.init() def init(self): raise NotImplementedError("LoaderStrategy") def init_class_attribute(self, mapper): pass def setup_query(self, context, entity, path, reduced_path, adapter, **kwargs): pass def create_row_processor(self, context, path, reduced_path, mapper, row, adapter): """Return row processing functions which fulfill the contract specified by MapperProperty.create_row_processor. StrategizedProperty delegates its create_row_processor method directly to this method. """ return None, None, None def __str__(self): return str(self.parent_property) def debug_callable(self, fn, logger, announcement, logfn): if announcement: logger.debug(announcement) if logfn: def call(*args, **kwargs): logger.debug(logfn(*args, **kwargs)) return fn(*args, **kwargs) return call else: return fn class InstrumentationManager(object): """User-defined class instrumentation extension. :class:`.InstrumentationManager` can be subclassed in order to change how class instrumentation proceeds. This class exists for the purposes of integration with other object management frameworks which would like to entirely modify the instrumentation methodology of the ORM, and is not intended for regular usage. For interception of class instrumentation events, see :class:`.InstrumentationEvents`. For an example of :class:`.InstrumentationManager`, see the example :ref:`examples_instrumentation`. The API for this class should be considered as semi-stable, and may change slightly with new releases. """ # r4361 added a mandatory (cls) constructor to this interface. # given that, perhaps class_ should be dropped from all of these # signatures. def __init__(self, class_): pass def manage(self, class_, manager): setattr(class_, '_default_class_manager', manager) def dispose(self, class_, manager): delattr(class_, '_default_class_manager') def manager_getter(self, class_): def get(cls): return cls._default_class_manager return get def instrument_attribute(self, class_, key, inst): pass def post_configure_attribute(self, class_, key, inst): pass def install_descriptor(self, class_, key, inst): setattr(class_, key, inst) def uninstall_descriptor(self, class_, key): delattr(class_, key) def install_member(self, class_, key, implementation): setattr(class_, key, implementation) def uninstall_member(self, class_, key): delattr(class_, key) def instrument_collection_class(self, class_, key, collection_class): global collections if collections is None: from sqlalchemy.orm import collections return collections.prepare_instrumentation(collection_class) def get_instance_dict(self, class_, instance): return instance.__dict__ def initialize_instance_dict(self, class_, instance): pass def install_state(self, class_, instance, state): setattr(instance, '_default_state', state) def remove_state(self, class_, instance): delattr(instance, '_default_state') def state_getter(self, class_): return lambda instance: getattr(instance, '_default_state') def dict_getter(self, class_): return lambda inst: self.get_instance_dict(class_, inst)
mit
-1,921,495,769,978,973,700
32.633461
86
0.587963
false
poojavade/Genomics_Docker
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/SQLAlchemy-0.9.7-py2.7-linux-x86_64.egg/sqlalchemy/orm/mapper.py
10
108109
# orm/mapper.py # Copyright (C) 2005-2014 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Logic to map Python classes to and from selectables. Defines the :class:`~sqlalchemy.orm.mapper.Mapper` class, the central configurational unit which associates a class with a database table. This is a semi-private module; the main configurational API of the ORM is available in :class:`~sqlalchemy.orm.`. """ from __future__ import absolute_import import types import weakref from itertools import chain from collections import deque from .. import sql, util, log, exc as sa_exc, event, schema, inspection from ..sql import expression, visitors, operators, util as sql_util from . import instrumentation, attributes, exc as orm_exc, loading from . import properties from .interfaces import MapperProperty, _InspectionAttr, _MappedAttribute from .base import _class_to_mapper, _state_mapper, class_mapper, \ state_str, _INSTRUMENTOR from .path_registry import PathRegistry import sys _mapper_registry = weakref.WeakKeyDictionary() _already_compiling = False _memoized_configured_property = util.group_expirable_memoized_property() # a constant returned by _get_attr_by_column to indicate # this mapper is not handling an attribute for a particular # column NO_ATTRIBUTE = util.symbol('NO_ATTRIBUTE') # lock used to synchronize the "mapper configure" step _CONFIGURE_MUTEX = util.threading.RLock() @inspection._self_inspects @log.class_logger class Mapper(_InspectionAttr): """Define the correlation of class attributes to database table columns. The :class:`.Mapper` object is instantiated using the :func:`~sqlalchemy.orm.mapper` function. For information about instantiating new :class:`.Mapper` objects, see that function's documentation. When :func:`.mapper` is used explicitly to link a user defined class with table metadata, this is referred to as *classical mapping*. Modern SQLAlchemy usage tends to favor the :mod:`sqlalchemy.ext.declarative` extension for class configuration, which makes usage of :func:`.mapper` behind the scenes. Given a particular class known to be mapped by the ORM, the :class:`.Mapper` which maintains it can be acquired using the :func:`.inspect` function:: from sqlalchemy import inspect mapper = inspect(MyClass) A class which was mapped by the :mod:`sqlalchemy.ext.declarative` extension will also have its mapper available via the ``__mapper__`` attribute. """ _new_mappers = False def __init__(self, class_, local_table=None, properties=None, primary_key=None, non_primary=False, inherits=None, inherit_condition=None, inherit_foreign_keys=None, extension=None, order_by=False, always_refresh=False, version_id_col=None, version_id_generator=None, polymorphic_on=None, _polymorphic_map=None, polymorphic_identity=None, concrete=False, with_polymorphic=None, allow_partial_pks=True, batch=True, column_prefix=None, include_properties=None, exclude_properties=None, passive_updates=True, confirm_deleted_rows=True, eager_defaults=False, legacy_is_orphan=False, _compiled_cache_size=100, ): """Return a new :class:`~.Mapper` object. This function is typically used behind the scenes via the Declarative extension. When using Declarative, many of the usual :func:`.mapper` arguments are handled by the Declarative extension itself, including ``class_``, ``local_table``, ``properties``, and ``inherits``. Other options are passed to :func:`.mapper` using the ``__mapper_args__`` class variable:: class MyClass(Base): __tablename__ = 'my_table' id = Column(Integer, primary_key=True) type = Column(String(50)) alt = Column("some_alt", Integer) __mapper_args__ = { 'polymorphic_on' : type } Explicit use of :func:`.mapper` is often referred to as *classical mapping*. The above declarative example is equivalent in classical form to:: my_table = Table("my_table", metadata, Column('id', Integer, primary_key=True), Column('type', String(50)), Column("some_alt", Integer) ) class MyClass(object): pass mapper(MyClass, my_table, polymorphic_on=my_table.c.type, properties={ 'alt':my_table.c.some_alt }) .. seealso:: :ref:`classical_mapping` - discussion of direct usage of :func:`.mapper` :param class\_: The class to be mapped. When using Declarative, this argument is automatically passed as the declared class itself. :param local_table: The :class:`.Table` or other selectable to which the class is mapped. May be ``None`` if this mapper inherits from another mapper using single-table inheritance. When using Declarative, this argument is automatically passed by the extension, based on what is configured via the ``__table__`` argument or via the :class:`.Table` produced as a result of the ``__tablename__`` and :class:`.Column` arguments present. :param always_refresh: If True, all query operations for this mapped class will overwrite all data within object instances that already exist within the session, erasing any in-memory changes with whatever information was loaded from the database. Usage of this flag is highly discouraged; as an alternative, see the method :meth:`.Query.populate_existing`. :param allow_partial_pks: Defaults to True. Indicates that a composite primary key with some NULL values should be considered as possibly existing within the database. This affects whether a mapper will assign an incoming row to an existing identity, as well as if :meth:`.Session.merge` will check the database first for a particular primary key value. A "partial primary key" can occur if one has mapped to an OUTER JOIN, for example. :param batch: Defaults to ``True``, indicating that save operations of multiple entities can be batched together for efficiency. Setting to False indicates that an instance will be fully saved before saving the next instance. This is used in the extremely rare case that a :class:`.MapperEvents` listener requires being called in between individual row persistence operations. :param column_prefix: A string which will be prepended to the mapped attribute name when :class:`.Column` objects are automatically assigned as attributes to the mapped class. Does not affect explicitly specified column-based properties. See the section :ref:`column_prefix` for an example. :param concrete: If True, indicates this mapper should use concrete table inheritance with its parent mapper. See the section :ref:`concrete_inheritance` for an example. :param confirm_deleted_rows: defaults to True; when a DELETE occurs of one more rows based on specific primary keys, a warning is emitted when the number of rows matched does not equal the number of rows expected. This parameter may be set to False to handle the case where database ON DELETE CASCADE rules may be deleting some of those rows automatically. The warning may be changed to an exception in a future release. .. versionadded:: 0.9.4 - added :paramref:`.mapper.confirm_deleted_rows` as well as conditional matched row checking on delete. :param eager_defaults: if True, the ORM will immediately fetch the value of server-generated default values after an INSERT or UPDATE, rather than leaving them as expired to be fetched on next access. This can be used for event schemes where the server-generated values are needed immediately before the flush completes. By default, this scheme will emit an individual ``SELECT`` statement per row inserted or updated, which note can add significant performance overhead. However, if the target database supports :term:`RETURNING`, the default values will be returned inline with the INSERT or UPDATE statement, which can greatly enhance performance for an application that needs frequent access to just-generated server defaults. .. versionchanged:: 0.9.0 The ``eager_defaults`` option can now make use of :term:`RETURNING` for backends which support it. :param exclude_properties: A list or set of string column names to be excluded from mapping. See :ref:`include_exclude_cols` for an example. :param extension: A :class:`.MapperExtension` instance or list of :class:`.MapperExtension` instances which will be applied to all operations by this :class:`.Mapper`. **Deprecated.** Please see :class:`.MapperEvents`. :param include_properties: An inclusive list or set of string column names to map. See :ref:`include_exclude_cols` for an example. :param inherits: A mapped class or the corresponding :class:`.Mapper` of one indicating a superclass to which this :class:`.Mapper` should *inherit* from. The mapped class here must be a subclass of the other mapper's class. When using Declarative, this argument is passed automatically as a result of the natural class hierarchy of the declared classes. .. seealso:: :ref:`inheritance_toplevel` :param inherit_condition: For joined table inheritance, a SQL expression which will define how the two tables are joined; defaults to a natural join between the two tables. :param inherit_foreign_keys: When ``inherit_condition`` is used and the columns present are missing a :class:`.ForeignKey` configuration, this parameter can be used to specify which columns are "foreign". In most cases can be left as ``None``. :param legacy_is_orphan: Boolean, defaults to ``False``. When ``True``, specifies that "legacy" orphan consideration is to be applied to objects mapped by this mapper, which means that a pending (that is, not persistent) object is auto-expunged from an owning :class:`.Session` only when it is de-associated from *all* parents that specify a ``delete-orphan`` cascade towards this mapper. The new default behavior is that the object is auto-expunged when it is de-associated with *any* of its parents that specify ``delete-orphan`` cascade. This behavior is more consistent with that of a persistent object, and allows behavior to be consistent in more scenarios independently of whether or not an orphanable object has been flushed yet or not. See the change note and example at :ref:`legacy_is_orphan_addition` for more detail on this change. .. versionadded:: 0.8 - the consideration of a pending object as an "orphan" has been modified to more closely match the behavior as that of persistent objects, which is that the object is expunged from the :class:`.Session` as soon as it is de-associated from any of its orphan-enabled parents. Previously, the pending object would be expunged only if de-associated from all of its orphan-enabled parents. The new flag ``legacy_is_orphan`` is added to :func:`.orm.mapper` which re-establishes the legacy behavior. :param non_primary: Specify that this :class:`.Mapper` is in addition to the "primary" mapper, that is, the one used for persistence. The :class:`.Mapper` created here may be used for ad-hoc mapping of the class to an alternate selectable, for loading only. :paramref:`.Mapper.non_primary` is not an often used option, but is useful in some specific :func:`.relationship` cases. .. seealso:: :ref:`relationship_non_primary_mapper` :param order_by: A single :class:`.Column` or list of :class:`.Column` objects for which selection operations should use as the default ordering for entities. By default mappers have no pre-defined ordering. :param passive_updates: Indicates UPDATE behavior of foreign key columns when a primary key column changes on a joined-table inheritance mapping. Defaults to ``True``. When True, it is assumed that ON UPDATE CASCADE is configured on the foreign key in the database, and that the database will handle propagation of an UPDATE from a source column to dependent columns on joined-table rows. When False, it is assumed that the database does not enforce referential integrity and will not be issuing its own CASCADE operation for an update. The unit of work process will emit an UPDATE statement for the dependent columns during a primary key change. .. seealso:: :ref:`passive_updates` - description of a similar feature as used with :func:`.relationship` :param polymorphic_on: Specifies the column, attribute, or SQL expression used to determine the target class for an incoming row, when inheriting classes are present. This value is commonly a :class:`.Column` object that's present in the mapped :class:`.Table`:: class Employee(Base): __tablename__ = 'employee' id = Column(Integer, primary_key=True) discriminator = Column(String(50)) __mapper_args__ = { "polymorphic_on":discriminator, "polymorphic_identity":"employee" } It may also be specified as a SQL expression, as in this example where we use the :func:`.case` construct to provide a conditional approach:: class Employee(Base): __tablename__ = 'employee' id = Column(Integer, primary_key=True) discriminator = Column(String(50)) __mapper_args__ = { "polymorphic_on":case([ (discriminator == "EN", "engineer"), (discriminator == "MA", "manager"), ], else_="employee"), "polymorphic_identity":"employee" } It may also refer to any attribute configured with :func:`.column_property`, or to the string name of one:: class Employee(Base): __tablename__ = 'employee' id = Column(Integer, primary_key=True) discriminator = Column(String(50)) employee_type = column_property( case([ (discriminator == "EN", "engineer"), (discriminator == "MA", "manager"), ], else_="employee") ) __mapper_args__ = { "polymorphic_on":employee_type, "polymorphic_identity":"employee" } .. versionchanged:: 0.7.4 ``polymorphic_on`` may be specified as a SQL expression, or refer to any attribute configured with :func:`.column_property`, or to the string name of one. When setting ``polymorphic_on`` to reference an attribute or expression that's not present in the locally mapped :class:`.Table`, yet the value of the discriminator should be persisted to the database, the value of the discriminator is not automatically set on new instances; this must be handled by the user, either through manual means or via event listeners. A typical approach to establishing such a listener looks like:: from sqlalchemy import event from sqlalchemy.orm import object_mapper @event.listens_for(Employee, "init", propagate=True) def set_identity(instance, *arg, **kw): mapper = object_mapper(instance) instance.discriminator = mapper.polymorphic_identity Where above, we assign the value of ``polymorphic_identity`` for the mapped class to the ``discriminator`` attribute, thus persisting the value to the ``discriminator`` column in the database. .. seealso:: :ref:`inheritance_toplevel` :param polymorphic_identity: Specifies the value which identifies this particular class as returned by the column expression referred to by the ``polymorphic_on`` setting. As rows are received, the value corresponding to the ``polymorphic_on`` column expression is compared to this value, indicating which subclass should be used for the newly reconstructed object. :param properties: A dictionary mapping the string names of object attributes to :class:`.MapperProperty` instances, which define the persistence behavior of that attribute. Note that :class:`.Column` objects present in the mapped :class:`.Table` are automatically placed into ``ColumnProperty`` instances upon mapping, unless overridden. When using Declarative, this argument is passed automatically, based on all those :class:`.MapperProperty` instances declared in the declared class body. :param primary_key: A list of :class:`.Column` objects which define the primary key to be used against this mapper's selectable unit. This is normally simply the primary key of the ``local_table``, but can be overridden here. :param version_id_col: A :class:`.Column` that will be used to keep a running version id of rows in the table. This is used to detect concurrent updates or the presence of stale data in a flush. The methodology is to detect if an UPDATE statement does not match the last known version id, a :class:`~sqlalchemy.orm.exc.StaleDataError` exception is thrown. By default, the column must be of :class:`.Integer` type, unless ``version_id_generator`` specifies an alternative version generator. .. seealso:: :ref:`mapper_version_counter` - discussion of version counting and rationale. :param version_id_generator: Define how new version ids should be generated. Defaults to ``None``, which indicates that a simple integer counting scheme be employed. To provide a custom versioning scheme, provide a callable function of the form:: def generate_version(version): return next_version Alternatively, server-side versioning functions such as triggers, or programmatic versioning schemes outside of the version id generator may be used, by specifying the value ``False``. Please see :ref:`server_side_version_counter` for a discussion of important points when using this option. .. versionadded:: 0.9.0 ``version_id_generator`` supports server-side version number generation. .. seealso:: :ref:`custom_version_counter` :ref:`server_side_version_counter` :param with_polymorphic: A tuple in the form ``(<classes>, <selectable>)`` indicating the default style of "polymorphic" loading, that is, which tables are queried at once. <classes> is any single or list of mappers and/or classes indicating the inherited classes that should be loaded at once. The special value ``'*'`` may be used to indicate all descending classes should be loaded immediately. The second tuple argument <selectable> indicates a selectable that will be used to query for multiple classes. .. seealso:: :ref:`with_polymorphic` - discussion of polymorphic querying techniques. """ self.class_ = util.assert_arg_type(class_, type, 'class_') self.class_manager = None self._primary_key_argument = util.to_list(primary_key) self.non_primary = non_primary if order_by is not False: self.order_by = util.to_list(order_by) else: self.order_by = order_by self.always_refresh = always_refresh if isinstance(version_id_col, MapperProperty): self.version_id_prop = version_id_col self.version_id_col = None else: self.version_id_col = version_id_col if version_id_generator is False: self.version_id_generator = False elif version_id_generator is None: self.version_id_generator = lambda x: (x or 0) + 1 else: self.version_id_generator = version_id_generator self.concrete = concrete self.single = False self.inherits = inherits self.local_table = local_table self.inherit_condition = inherit_condition self.inherit_foreign_keys = inherit_foreign_keys self._init_properties = properties or {} self._delete_orphans = [] self.batch = batch self.eager_defaults = eager_defaults self.column_prefix = column_prefix self.polymorphic_on = expression._clause_element_as_expr( polymorphic_on) self._dependency_processors = [] self.validators = util.immutabledict() self.passive_updates = passive_updates self.legacy_is_orphan = legacy_is_orphan self._clause_adapter = None self._requires_row_aliasing = False self._inherits_equated_pairs = None self._memoized_values = {} self._compiled_cache_size = _compiled_cache_size self._reconstructor = None self._deprecated_extensions = util.to_list(extension or []) self.allow_partial_pks = allow_partial_pks if self.inherits and not self.concrete: self.confirm_deleted_rows = False else: self.confirm_deleted_rows = confirm_deleted_rows self._set_with_polymorphic(with_polymorphic) if isinstance(self.local_table, expression.SelectBase): raise sa_exc.InvalidRequestError( "When mapping against a select() construct, map against " "an alias() of the construct instead." "This because several databases don't allow a " "SELECT from a subquery that does not have an alias." ) if self.with_polymorphic and \ isinstance(self.with_polymorphic[1], expression.SelectBase): self.with_polymorphic = (self.with_polymorphic[0], self.with_polymorphic[1].alias()) # our 'polymorphic identity', a string name that when located in a # result set row indicates this Mapper should be used to construct # the object instance for that row. self.polymorphic_identity = polymorphic_identity # a dictionary of 'polymorphic identity' names, associating those # names with Mappers that will be used to construct object instances # upon a select operation. if _polymorphic_map is None: self.polymorphic_map = {} else: self.polymorphic_map = _polymorphic_map if include_properties is not None: self.include_properties = util.to_set(include_properties) else: self.include_properties = None if exclude_properties: self.exclude_properties = util.to_set(exclude_properties) else: self.exclude_properties = None self.configured = False # prevent this mapper from being constructed # while a configure_mappers() is occurring (and defer a # configure_mappers() until construction succeeds) _CONFIGURE_MUTEX.acquire() try: self.dispatch._events._new_mapper_instance(class_, self) self._configure_inheritance() self._configure_legacy_instrument_class() self._configure_class_instrumentation() self._configure_listeners() self._configure_properties() self._configure_polymorphic_setter() self._configure_pks() Mapper._new_mappers = True self._log("constructed") self._expire_memoizations() finally: _CONFIGURE_MUTEX.release() # major attributes initialized at the classlevel so that # they can be Sphinx-documented. is_mapper = True """Part of the inspection API.""" @property def mapper(self): """Part of the inspection API. Returns self. """ return self @property def entity(self): """Part of the inspection API. Returns self.class\_. """ return self.class_ local_table = None """The :class:`.Selectable` which this :class:`.Mapper` manages. Typically is an instance of :class:`.Table` or :class:`.Alias`. May also be ``None``. The "local" table is the selectable that the :class:`.Mapper` is directly responsible for managing from an attribute access and flush perspective. For non-inheriting mappers, the local table is the same as the "mapped" table. For joined-table inheritance mappers, local_table will be the particular sub-table of the overall "join" which this :class:`.Mapper` represents. If this mapper is a single-table inheriting mapper, local_table will be ``None``. .. seealso:: :attr:`~.Mapper.mapped_table`. """ mapped_table = None """The :class:`.Selectable` to which this :class:`.Mapper` is mapped. Typically an instance of :class:`.Table`, :class:`.Join`, or :class:`.Alias`. The "mapped" table is the selectable that the mapper selects from during queries. For non-inheriting mappers, the mapped table is the same as the "local" table. For joined-table inheritance mappers, mapped_table references the full :class:`.Join` representing full rows for this particular subclass. For single-table inheritance mappers, mapped_table references the base table. .. seealso:: :attr:`~.Mapper.local_table`. """ inherits = None """References the :class:`.Mapper` which this :class:`.Mapper` inherits from, if any. This is a *read only* attribute determined during mapper construction. Behavior is undefined if directly modified. """ configured = None """Represent ``True`` if this :class:`.Mapper` has been configured. This is a *read only* attribute determined during mapper construction. Behavior is undefined if directly modified. .. seealso:: :func:`.configure_mappers`. """ concrete = None """Represent ``True`` if this :class:`.Mapper` is a concrete inheritance mapper. This is a *read only* attribute determined during mapper construction. Behavior is undefined if directly modified. """ tables = None """An iterable containing the collection of :class:`.Table` objects which this :class:`.Mapper` is aware of. If the mapper is mapped to a :class:`.Join`, or an :class:`.Alias` representing a :class:`.Select`, the individual :class:`.Table` objects that comprise the full construct will be represented here. This is a *read only* attribute determined during mapper construction. Behavior is undefined if directly modified. """ primary_key = None """An iterable containing the collection of :class:`.Column` objects which comprise the 'primary key' of the mapped table, from the perspective of this :class:`.Mapper`. This list is against the selectable in :attr:`~.Mapper.mapped_table`. In the case of inheriting mappers, some columns may be managed by a superclass mapper. For example, in the case of a :class:`.Join`, the primary key is determined by all of the primary key columns across all tables referenced by the :class:`.Join`. The list is also not necessarily the same as the primary key column collection associated with the underlying tables; the :class:`.Mapper` features a ``primary_key`` argument that can override what the :class:`.Mapper` considers as primary key columns. This is a *read only* attribute determined during mapper construction. Behavior is undefined if directly modified. """ class_ = None """The Python class which this :class:`.Mapper` maps. This is a *read only* attribute determined during mapper construction. Behavior is undefined if directly modified. """ class_manager = None """The :class:`.ClassManager` which maintains event listeners and class-bound descriptors for this :class:`.Mapper`. This is a *read only* attribute determined during mapper construction. Behavior is undefined if directly modified. """ single = None """Represent ``True`` if this :class:`.Mapper` is a single table inheritance mapper. :attr:`~.Mapper.local_table` will be ``None`` if this flag is set. This is a *read only* attribute determined during mapper construction. Behavior is undefined if directly modified. """ non_primary = None """Represent ``True`` if this :class:`.Mapper` is a "non-primary" mapper, e.g. a mapper that is used only to selet rows but not for persistence management. This is a *read only* attribute determined during mapper construction. Behavior is undefined if directly modified. """ polymorphic_on = None """The :class:`.Column` or SQL expression specified as the ``polymorphic_on`` argument for this :class:`.Mapper`, within an inheritance scenario. This attribute is normally a :class:`.Column` instance but may also be an expression, such as one derived from :func:`.cast`. This is a *read only* attribute determined during mapper construction. Behavior is undefined if directly modified. """ polymorphic_map = None """A mapping of "polymorphic identity" identifiers mapped to :class:`.Mapper` instances, within an inheritance scenario. The identifiers can be of any type which is comparable to the type of column represented by :attr:`~.Mapper.polymorphic_on`. An inheritance chain of mappers will all reference the same polymorphic map object. The object is used to correlate incoming result rows to target mappers. This is a *read only* attribute determined during mapper construction. Behavior is undefined if directly modified. """ polymorphic_identity = None """Represent an identifier which is matched against the :attr:`~.Mapper.polymorphic_on` column during result row loading. Used only with inheritance, this object can be of any type which is comparable to the type of column represented by :attr:`~.Mapper.polymorphic_on`. This is a *read only* attribute determined during mapper construction. Behavior is undefined if directly modified. """ base_mapper = None """The base-most :class:`.Mapper` in an inheritance chain. In a non-inheriting scenario, this attribute will always be this :class:`.Mapper`. In an inheritance scenario, it references the :class:`.Mapper` which is parent to all other :class:`.Mapper` objects in the inheritance chain. This is a *read only* attribute determined during mapper construction. Behavior is undefined if directly modified. """ columns = None """A collection of :class:`.Column` or other scalar expression objects maintained by this :class:`.Mapper`. The collection behaves the same as that of the ``c`` attribute on any :class:`.Table` object, except that only those columns included in this mapping are present, and are keyed based on the attribute name defined in the mapping, not necessarily the ``key`` attribute of the :class:`.Column` itself. Additionally, scalar expressions mapped by :func:`.column_property` are also present here. This is a *read only* attribute determined during mapper construction. Behavior is undefined if directly modified. """ validators = None """An immutable dictionary of attributes which have been decorated using the :func:`~.orm.validates` decorator. The dictionary contains string attribute names as keys mapped to the actual validation method. """ c = None """A synonym for :attr:`~.Mapper.columns`.""" @util.memoized_property def _path_registry(self): return PathRegistry.per_mapper(self) def _configure_inheritance(self): """Configure settings related to inherting and/or inherited mappers being present.""" # a set of all mappers which inherit from this one. self._inheriting_mappers = util.WeakSequence() if self.inherits: if isinstance(self.inherits, type): self.inherits = class_mapper(self.inherits, configure=False) if not issubclass(self.class_, self.inherits.class_): raise sa_exc.ArgumentError( "Class '%s' does not inherit from '%s'" % (self.class_.__name__, self.inherits.class_.__name__)) if self.non_primary != self.inherits.non_primary: np = not self.non_primary and "primary" or "non-primary" raise sa_exc.ArgumentError( "Inheritance of %s mapper for class '%s' is " "only allowed from a %s mapper" % (np, self.class_.__name__, np)) # inherit_condition is optional. if self.local_table is None: self.local_table = self.inherits.local_table self.mapped_table = self.inherits.mapped_table self.single = True elif self.local_table is not self.inherits.local_table: if self.concrete: self.mapped_table = self.local_table for mapper in self.iterate_to_root(): if mapper.polymorphic_on is not None: mapper._requires_row_aliasing = True else: if self.inherit_condition is None: # figure out inherit condition from our table to the # immediate table of the inherited mapper, not its # full table which could pull in other stuff we don't # want (allows test/inheritance.InheritTest4 to pass) self.inherit_condition = sql_util.join_condition( self.inherits.local_table, self.local_table) self.mapped_table = sql.join( self.inherits.mapped_table, self.local_table, self.inherit_condition) fks = util.to_set(self.inherit_foreign_keys) self._inherits_equated_pairs = \ sql_util.criterion_as_pairs( self.mapped_table.onclause, consider_as_foreign_keys=fks) else: self.mapped_table = self.local_table if self.polymorphic_identity is not None and not self.concrete: self._identity_class = self.inherits._identity_class else: self._identity_class = self.class_ if self.version_id_col is None: self.version_id_col = self.inherits.version_id_col self.version_id_generator = self.inherits.version_id_generator elif self.inherits.version_id_col is not None and \ self.version_id_col is not self.inherits.version_id_col: util.warn( "Inheriting version_id_col '%s' does not match inherited " "version_id_col '%s' and will not automatically populate " "the inherited versioning column. " "version_id_col should only be specified on " "the base-most mapper that includes versioning." % (self.version_id_col.description, self.inherits.version_id_col.description) ) if self.order_by is False and \ not self.concrete and \ self.inherits.order_by is not False: self.order_by = self.inherits.order_by self.polymorphic_map = self.inherits.polymorphic_map self.batch = self.inherits.batch self.inherits._inheriting_mappers.append(self) self.base_mapper = self.inherits.base_mapper self.passive_updates = self.inherits.passive_updates self._all_tables = self.inherits._all_tables if self.polymorphic_identity is not None: self.polymorphic_map[self.polymorphic_identity] = self else: self._all_tables = set() self.base_mapper = self self.mapped_table = self.local_table if self.polymorphic_identity is not None: self.polymorphic_map[self.polymorphic_identity] = self self._identity_class = self.class_ if self.mapped_table is None: raise sa_exc.ArgumentError( "Mapper '%s' does not have a mapped_table specified." % self) def _set_with_polymorphic(self, with_polymorphic): if with_polymorphic == '*': self.with_polymorphic = ('*', None) elif isinstance(with_polymorphic, (tuple, list)): if isinstance( with_polymorphic[0], util.string_types + (tuple, list)): self.with_polymorphic = with_polymorphic else: self.with_polymorphic = (with_polymorphic, None) elif with_polymorphic is not None: raise sa_exc.ArgumentError("Invalid setting for with_polymorphic") else: self.with_polymorphic = None if isinstance(self.local_table, expression.SelectBase): raise sa_exc.InvalidRequestError( "When mapping against a select() construct, map against " "an alias() of the construct instead." "This because several databases don't allow a " "SELECT from a subquery that does not have an alias." ) if self.with_polymorphic and \ isinstance(self.with_polymorphic[1], expression.SelectBase): self.with_polymorphic = (self.with_polymorphic[0], self.with_polymorphic[1].alias()) if self.configured: self._expire_memoizations() def _set_concrete_base(self, mapper): """Set the given :class:`.Mapper` as the 'inherits' for this :class:`.Mapper`, assuming this :class:`.Mapper` is concrete and does not already have an inherits.""" assert self.concrete assert not self.inherits assert isinstance(mapper, Mapper) self.inherits = mapper self.inherits.polymorphic_map.update(self.polymorphic_map) self.polymorphic_map = self.inherits.polymorphic_map for mapper in self.iterate_to_root(): if mapper.polymorphic_on is not None: mapper._requires_row_aliasing = True self.batch = self.inherits.batch for mp in self.self_and_descendants: mp.base_mapper = self.inherits.base_mapper self.inherits._inheriting_mappers.append(self) self.passive_updates = self.inherits.passive_updates self._all_tables = self.inherits._all_tables for key, prop in mapper._props.items(): if key not in self._props and \ not self._should_exclude(key, key, local=False, column=None): self._adapt_inherited_property(key, prop, False) def _set_polymorphic_on(self, polymorphic_on): self.polymorphic_on = polymorphic_on self._configure_polymorphic_setter(True) def _configure_legacy_instrument_class(self): if self.inherits: self.dispatch._update(self.inherits.dispatch) super_extensions = set( chain(*[m._deprecated_extensions for m in self.inherits.iterate_to_root()])) else: super_extensions = set() for ext in self._deprecated_extensions: if ext not in super_extensions: ext._adapt_instrument_class(self, ext) def _configure_listeners(self): if self.inherits: super_extensions = set( chain(*[m._deprecated_extensions for m in self.inherits.iterate_to_root()])) else: super_extensions = set() for ext in self._deprecated_extensions: if ext not in super_extensions: ext._adapt_listener(self, ext) def _configure_class_instrumentation(self): """If this mapper is to be a primary mapper (i.e. the non_primary flag is not set), associate this Mapper with the given class_ and entity name. Subsequent calls to ``class_mapper()`` for the class_/entity name combination will return this mapper. Also decorate the `__init__` method on the mapped class to include optional auto-session attachment logic. """ manager = attributes.manager_of_class(self.class_) if self.non_primary: if not manager or not manager.is_mapped: raise sa_exc.InvalidRequestError( "Class %s has no primary mapper configured. Configure " "a primary mapper first before setting up a non primary " "Mapper." % self.class_) self.class_manager = manager self._identity_class = manager.mapper._identity_class _mapper_registry[self] = True return if manager is not None: assert manager.class_ is self.class_ if manager.is_mapped: raise sa_exc.ArgumentError( "Class '%s' already has a primary mapper defined. " "Use non_primary=True to " "create a non primary Mapper. clear_mappers() will " "remove *all* current mappers from all classes." % self.class_) # else: # a ClassManager may already exist as # ClassManager.instrument_attribute() creates # new managers for each subclass if they don't yet exist. _mapper_registry[self] = True self.dispatch.instrument_class(self, self.class_) if manager is None: manager = instrumentation.register_class(self.class_) self.class_manager = manager manager.mapper = self manager.deferred_scalar_loader = util.partial( loading.load_scalar_attributes, self) # The remaining members can be added by any mapper, # e_name None or not. if manager.info.get(_INSTRUMENTOR, False): return event.listen(manager, 'first_init', _event_on_first_init, raw=True) event.listen(manager, 'init', _event_on_init, raw=True) event.listen(manager, 'resurrect', _event_on_resurrect, raw=True) for key, method in util.iterate_attributes(self.class_): if isinstance(method, types.FunctionType): if hasattr(method, '__sa_reconstructor__'): self._reconstructor = method event.listen(manager, 'load', _event_on_load, raw=True) elif hasattr(method, '__sa_validators__'): validation_opts = method.__sa_validation_opts__ for name in method.__sa_validators__: self.validators = self.validators.union( {name: (method, validation_opts)} ) manager.info[_INSTRUMENTOR] = self @classmethod def _configure_all(cls): """Class-level path to the :func:`.configure_mappers` call. """ configure_mappers() def dispose(self): # Disable any attribute-based compilation. self.configured = True if hasattr(self, '_configure_failed'): del self._configure_failed if not self.non_primary and \ self.class_manager is not None and \ self.class_manager.is_mapped and \ self.class_manager.mapper is self: instrumentation.unregister_class(self.class_) def _configure_pks(self): self.tables = sql_util.find_tables(self.mapped_table) self._pks_by_table = {} self._cols_by_table = {} all_cols = util.column_set(chain(*[ col.proxy_set for col in self._columntoproperty])) pk_cols = util.column_set(c for c in all_cols if c.primary_key) # identify primary key columns which are also mapped by this mapper. tables = set(self.tables + [self.mapped_table]) self._all_tables.update(tables) for t in tables: if t.primary_key and pk_cols.issuperset(t.primary_key): # ordering is important since it determines the ordering of # mapper.primary_key (and therefore query.get()) self._pks_by_table[t] = \ util.ordered_column_set(t.primary_key).\ intersection(pk_cols) self._cols_by_table[t] = \ util.ordered_column_set(t.c).\ intersection(all_cols) # determine cols that aren't expressed within our tables; mark these # as "read only" properties which are refreshed upon INSERT/UPDATE self._readonly_props = set( self._columntoproperty[col] for col in self._columntoproperty if not hasattr(col, 'table') or col.table not in self._cols_by_table) # if explicit PK argument sent, add those columns to the # primary key mappings if self._primary_key_argument: for k in self._primary_key_argument: if k.table not in self._pks_by_table: self._pks_by_table[k.table] = util.OrderedSet() self._pks_by_table[k.table].add(k) # otherwise, see that we got a full PK for the mapped table elif self.mapped_table not in self._pks_by_table or \ len(self._pks_by_table[self.mapped_table]) == 0: raise sa_exc.ArgumentError( "Mapper %s could not assemble any primary " "key columns for mapped table '%s'" % (self, self.mapped_table.description)) elif self.local_table not in self._pks_by_table and \ isinstance(self.local_table, schema.Table): util.warn("Could not assemble any primary " "keys for locally mapped table '%s' - " "no rows will be persisted in this Table." % self.local_table.description) if self.inherits and \ not self.concrete and \ not self._primary_key_argument: # if inheriting, the "primary key" for this mapper is # that of the inheriting (unless concrete or explicit) self.primary_key = self.inherits.primary_key else: # determine primary key from argument or mapped_table pks - # reduce to the minimal set of columns if self._primary_key_argument: primary_key = sql_util.reduce_columns( [self.mapped_table.corresponding_column(c) for c in self._primary_key_argument], ignore_nonexistent_tables=True) else: primary_key = sql_util.reduce_columns( self._pks_by_table[self.mapped_table], ignore_nonexistent_tables=True) if len(primary_key) == 0: raise sa_exc.ArgumentError( "Mapper %s could not assemble any primary " "key columns for mapped table '%s'" % (self, self.mapped_table.description)) self.primary_key = tuple(primary_key) self._log("Identified primary key columns: %s", primary_key) def _configure_properties(self): # Column and other ClauseElement objects which are mapped self.columns = self.c = util.OrderedProperties() # object attribute names mapped to MapperProperty objects self._props = util.OrderedDict() # table columns mapped to lists of MapperProperty objects # using a list allows a single column to be defined as # populating multiple object attributes self._columntoproperty = _ColumnMapping(self) # load custom properties if self._init_properties: for key, prop in self._init_properties.items(): self._configure_property(key, prop, False) # pull properties from the inherited mapper if any. if self.inherits: for key, prop in self.inherits._props.items(): if key not in self._props and \ not self._should_exclude(key, key, local=False, column=None): self._adapt_inherited_property(key, prop, False) # create properties for each column in the mapped table, # for those columns which don't already map to a property for column in self.mapped_table.columns: if column in self._columntoproperty: continue column_key = (self.column_prefix or '') + column.key if self._should_exclude( column.key, column_key, local=self.local_table.c.contains_column(column), column=column ): continue # adjust the "key" used for this column to that # of the inheriting mapper for mapper in self.iterate_to_root(): if column in mapper._columntoproperty: column_key = mapper._columntoproperty[column].key self._configure_property(column_key, column, init=False, setparent=True) def _configure_polymorphic_setter(self, init=False): """Configure an attribute on the mapper representing the 'polymorphic_on' column, if applicable, and not already generated by _configure_properties (which is typical). Also create a setter function which will assign this attribute to the value of the 'polymorphic_identity' upon instance construction, also if applicable. This routine will run when an instance is created. """ setter = False if self.polymorphic_on is not None: setter = True if isinstance(self.polymorphic_on, util.string_types): # polymorphic_on specified as a string - link # it to mapped ColumnProperty try: self.polymorphic_on = self._props[self.polymorphic_on] except KeyError: raise sa_exc.ArgumentError( "Can't determine polymorphic_on " "value '%s' - no attribute is " "mapped to this name." % self.polymorphic_on) if self.polymorphic_on in self._columntoproperty: # polymorphic_on is a column that is already mapped # to a ColumnProperty prop = self._columntoproperty[self.polymorphic_on] polymorphic_key = prop.key self.polymorphic_on = prop.columns[0] polymorphic_key = prop.key elif isinstance(self.polymorphic_on, MapperProperty): # polymorphic_on is directly a MapperProperty, # ensure it's a ColumnProperty if not isinstance(self.polymorphic_on, properties.ColumnProperty): raise sa_exc.ArgumentError( "Only direct column-mapped " "property or SQL expression " "can be passed for polymorphic_on") prop = self.polymorphic_on self.polymorphic_on = prop.columns[0] polymorphic_key = prop.key elif not expression._is_column(self.polymorphic_on): # polymorphic_on is not a Column and not a ColumnProperty; # not supported right now. raise sa_exc.ArgumentError( "Only direct column-mapped " "property or SQL expression " "can be passed for polymorphic_on" ) else: # polymorphic_on is a Column or SQL expression and # doesn't appear to be mapped. this means it can be 1. # only present in the with_polymorphic selectable or # 2. a totally standalone SQL expression which we'd # hope is compatible with this mapper's mapped_table col = self.mapped_table.corresponding_column( self.polymorphic_on) if col is None: # polymorphic_on doesn't derive from any # column/expression isn't present in the mapped # table. we will make a "hidden" ColumnProperty # for it. Just check that if it's directly a # schema.Column and we have with_polymorphic, it's # likely a user error if the schema.Column isn't # represented somehow in either mapped_table or # with_polymorphic. Otherwise as of 0.7.4 we # just go with it and assume the user wants it # that way (i.e. a CASE statement) setter = False instrument = False col = self.polymorphic_on if isinstance(col, schema.Column) and ( self.with_polymorphic is None or self.with_polymorphic[1]. corresponding_column(col) is None): raise sa_exc.InvalidRequestError( "Could not map polymorphic_on column " "'%s' to the mapped table - polymorphic " "loads will not function properly" % col.description) else: # column/expression that polymorphic_on derives from # is present in our mapped table # and is probably mapped, but polymorphic_on itself # is not. This happens when # the polymorphic_on is only directly present in the # with_polymorphic selectable, as when use # polymorphic_union. # we'll make a separate ColumnProperty for it. instrument = True key = getattr(col, 'key', None) if key: if self._should_exclude(col.key, col.key, False, col): raise sa_exc.InvalidRequestError( "Cannot exclude or override the " "discriminator column %r" % col.key) else: self.polymorphic_on = col = \ col.label("_sa_polymorphic_on") key = col.key self._configure_property( key, properties.ColumnProperty(col, _instrument=instrument), init=init, setparent=True) polymorphic_key = key else: # no polymorphic_on was set. # check inheriting mappers for one. for mapper in self.iterate_to_root(): # determine if polymorphic_on of the parent # should be propagated here. If the col # is present in our mapped table, or if our mapped # table is the same as the parent (i.e. single table # inheritance), we can use it if mapper.polymorphic_on is not None: if self.mapped_table is mapper.mapped_table: self.polymorphic_on = mapper.polymorphic_on else: self.polymorphic_on = \ self.mapped_table.corresponding_column( mapper.polymorphic_on) # we can use the parent mapper's _set_polymorphic_identity # directly; it ensures the polymorphic_identity of the # instance's mapper is used so is portable to subclasses. if self.polymorphic_on is not None: self._set_polymorphic_identity = \ mapper._set_polymorphic_identity self._validate_polymorphic_identity = \ mapper._validate_polymorphic_identity else: self._set_polymorphic_identity = None return if setter: def _set_polymorphic_identity(state): dict_ = state.dict state.get_impl(polymorphic_key).set( state, dict_, state.manager.mapper.polymorphic_identity, None) def _validate_polymorphic_identity(mapper, state, dict_): if polymorphic_key in dict_ and \ dict_[polymorphic_key] not in \ mapper._acceptable_polymorphic_identities: util.warn( "Flushing object %s with " "incompatible polymorphic identity %r; the " "object may not refresh and/or load correctly" % ( state_str(state), dict_[polymorphic_key] ) ) self._set_polymorphic_identity = _set_polymorphic_identity self._validate_polymorphic_identity = \ _validate_polymorphic_identity else: self._set_polymorphic_identity = None _validate_polymorphic_identity = None @_memoized_configured_property def _version_id_prop(self): if self.version_id_col is not None: return self._columntoproperty[self.version_id_col] else: return None @_memoized_configured_property def _acceptable_polymorphic_identities(self): identities = set() stack = deque([self]) while stack: item = stack.popleft() if item.mapped_table is self.mapped_table: identities.add(item.polymorphic_identity) stack.extend(item._inheriting_mappers) return identities def _adapt_inherited_property(self, key, prop, init): if not self.concrete: self._configure_property(key, prop, init=False, setparent=False) elif key not in self._props: self._configure_property( key, properties.ConcreteInheritedProperty(), init=init, setparent=True) def _configure_property(self, key, prop, init=True, setparent=True): self._log("_configure_property(%s, %s)", key, prop.__class__.__name__) if not isinstance(prop, MapperProperty): prop = self._property_from_column(key, prop) if isinstance(prop, properties.ColumnProperty): col = self.mapped_table.corresponding_column(prop.columns[0]) # if the column is not present in the mapped table, # test if a column has been added after the fact to the # parent table (or their parent, etc.) [ticket:1570] if col is None and self.inherits: path = [self] for m in self.inherits.iterate_to_root(): col = m.local_table.corresponding_column(prop.columns[0]) if col is not None: for m2 in path: m2.mapped_table._reset_exported() col = self.mapped_table.corresponding_column( prop.columns[0]) break path.append(m) # subquery expression, column not present in the mapped # selectable. if col is None: col = prop.columns[0] # column is coming in after _readonly_props was # initialized; check for 'readonly' if hasattr(self, '_readonly_props') and \ (not hasattr(col, 'table') or col.table not in self._cols_by_table): self._readonly_props.add(prop) else: # if column is coming in after _cols_by_table was # initialized, ensure the col is in the right set if hasattr(self, '_cols_by_table') and \ col.table in self._cols_by_table and \ col not in self._cols_by_table[col.table]: self._cols_by_table[col.table].add(col) # if this properties.ColumnProperty represents the "polymorphic # discriminator" column, mark it. We'll need this when rendering # columns in SELECT statements. if not hasattr(prop, '_is_polymorphic_discriminator'): prop._is_polymorphic_discriminator = \ (col is self.polymorphic_on or prop.columns[0] is self.polymorphic_on) self.columns[key] = col for col in prop.columns + prop._orig_columns: for col in col.proxy_set: self._columntoproperty[col] = prop prop.key = key if setparent: prop.set_parent(self, init) if key in self._props and \ getattr(self._props[key], '_mapped_by_synonym', False): syn = self._props[key]._mapped_by_synonym raise sa_exc.ArgumentError( "Can't call map_column=True for synonym %r=%r, " "a ColumnProperty already exists keyed to the name " "%r for column %r" % (syn, key, key, syn) ) if key in self._props and \ not isinstance(prop, properties.ColumnProperty) and \ not isinstance(self._props[key], properties.ColumnProperty): util.warn("Property %s on %s being replaced with new " "property %s; the old property will be discarded" % ( self._props[key], self, prop, )) self._props[key] = prop if not self.non_primary: prop.instrument_class(self) for mapper in self._inheriting_mappers: mapper._adapt_inherited_property(key, prop, init) if init: prop.init() prop.post_instrument_class(self) if self.configured: self._expire_memoizations() def _property_from_column(self, key, prop): """generate/update a :class:`.ColumnProprerty` given a :class:`.Column` object. """ # we were passed a Column or a list of Columns; # generate a properties.ColumnProperty columns = util.to_list(prop) column = columns[0] if not expression._is_column(column): raise sa_exc.ArgumentError( "%s=%r is not an instance of MapperProperty or Column" % (key, prop)) prop = self._props.get(key, None) if isinstance(prop, properties.ColumnProperty): if ( not self._inherits_equated_pairs or (prop.columns[0], column) not in self._inherits_equated_pairs ) and \ not prop.columns[0].shares_lineage(column) and \ prop.columns[0] is not self.version_id_col and \ column is not self.version_id_col: warn_only = prop.parent is not self msg = ("Implicitly combining column %s with column " "%s under attribute '%s'. Please configure one " "or more attributes for these same-named columns " "explicitly." % (prop.columns[-1], column, key)) if warn_only: util.warn(msg) else: raise sa_exc.InvalidRequestError(msg) # existing properties.ColumnProperty from an inheriting # mapper. make a copy and append our column to it prop = prop.copy() prop.columns.insert(0, column) self._log("inserting column to existing list " "in properties.ColumnProperty %s" % (key)) return prop elif prop is None or isinstance(prop, properties.ConcreteInheritedProperty): mapped_column = [] for c in columns: mc = self.mapped_table.corresponding_column(c) if mc is None: mc = self.local_table.corresponding_column(c) if mc is not None: # if the column is in the local table but not the # mapped table, this corresponds to adding a # column after the fact to the local table. # [ticket:1523] self.mapped_table._reset_exported() mc = self.mapped_table.corresponding_column(c) if mc is None: raise sa_exc.ArgumentError( "When configuring property '%s' on %s, " "column '%s' is not represented in the mapper's " "table. Use the `column_property()` function to " "force this column to be mapped as a read-only " "attribute." % (key, self, c)) mapped_column.append(mc) return properties.ColumnProperty(*mapped_column) else: raise sa_exc.ArgumentError( "WARNING: when configuring property '%s' on %s, " "column '%s' conflicts with property '%r'. " "To resolve this, map the column to the class under a " "different name in the 'properties' dictionary. Or, " "to remove all awareness of the column entirely " "(including its availability as a foreign key), " "use the 'include_properties' or 'exclude_properties' " "mapper arguments to control specifically which table " "columns get mapped." % (key, self, column.key, prop)) def _post_configure_properties(self): """Call the ``init()`` method on all ``MapperProperties`` attached to this mapper. This is a deferred configuration step which is intended to execute once all mappers have been constructed. """ self._log("_post_configure_properties() started") l = [(key, prop) for key, prop in self._props.items()] for key, prop in l: self._log("initialize prop %s", key) if prop.parent is self and not prop._configure_started: prop.init() if prop._configure_finished: prop.post_instrument_class(self) self._log("_post_configure_properties() complete") self.configured = True def add_properties(self, dict_of_properties): """Add the given dictionary of properties to this mapper, using `add_property`. """ for key, value in dict_of_properties.items(): self.add_property(key, value) def add_property(self, key, prop): """Add an individual MapperProperty to this mapper. If the mapper has not been configured yet, just adds the property to the initial properties dictionary sent to the constructor. If this Mapper has already been configured, then the given MapperProperty is configured immediately. """ self._init_properties[key] = prop self._configure_property(key, prop, init=self.configured) def _expire_memoizations(self): for mapper in self.iterate_to_root(): _memoized_configured_property.expire_instance(mapper) @property def _log_desc(self): return "(" + self.class_.__name__ + \ "|" + \ (self.local_table is not None and self.local_table.description or str(self.local_table)) +\ (self.non_primary and "|non-primary" or "") + ")" def _log(self, msg, *args): self.logger.info( "%s " + msg, *((self._log_desc,) + args) ) def _log_debug(self, msg, *args): self.logger.debug( "%s " + msg, *((self._log_desc,) + args) ) def __repr__(self): return '<Mapper at 0x%x; %s>' % ( id(self), self.class_.__name__) def __str__(self): return "Mapper|%s|%s%s" % ( self.class_.__name__, self.local_table is not None and self.local_table.description or None, self.non_primary and "|non-primary" or "" ) def _is_orphan(self, state): orphan_possible = False for mapper in self.iterate_to_root(): for (key, cls) in mapper._delete_orphans: orphan_possible = True has_parent = attributes.manager_of_class(cls).has_parent( state, key, optimistic=state.has_identity) if self.legacy_is_orphan and has_parent: return False elif not self.legacy_is_orphan and not has_parent: return True if self.legacy_is_orphan: return orphan_possible else: return False def has_property(self, key): return key in self._props def get_property(self, key, _configure_mappers=True): """return a MapperProperty associated with the given key. """ if _configure_mappers and Mapper._new_mappers: configure_mappers() try: return self._props[key] except KeyError: raise sa_exc.InvalidRequestError( "Mapper '%s' has no property '%s'" % (self, key)) def get_property_by_column(self, column): """Given a :class:`.Column` object, return the :class:`.MapperProperty` which maps this column.""" return self._columntoproperty[column] @property def iterate_properties(self): """return an iterator of all MapperProperty objects.""" if Mapper._new_mappers: configure_mappers() return iter(self._props.values()) def _mappers_from_spec(self, spec, selectable): """given a with_polymorphic() argument, return the set of mappers it represents. Trims the list of mappers to just those represented within the given selectable, if present. This helps some more legacy-ish mappings. """ if spec == '*': mappers = list(self.self_and_descendants) elif spec: mappers = set() for m in util.to_list(spec): m = _class_to_mapper(m) if not m.isa(self): raise sa_exc.InvalidRequestError( "%r does not inherit from %r" % (m, self)) if selectable is None: mappers.update(m.iterate_to_root()) else: mappers.add(m) mappers = [m for m in self.self_and_descendants if m in mappers] else: mappers = [] if selectable is not None: tables = set(sql_util.find_tables(selectable, include_aliases=True)) mappers = [m for m in mappers if m.local_table in tables] return mappers def _selectable_from_mappers(self, mappers, innerjoin): """given a list of mappers (assumed to be within this mapper's inheritance hierarchy), construct an outerjoin amongst those mapper's mapped tables. """ from_obj = self.mapped_table for m in mappers: if m is self: continue if m.concrete: raise sa_exc.InvalidRequestError( "'with_polymorphic()' requires 'selectable' argument " "when concrete-inheriting mappers are used.") elif not m.single: if innerjoin: from_obj = from_obj.join(m.local_table, m.inherit_condition) else: from_obj = from_obj.outerjoin(m.local_table, m.inherit_condition) return from_obj @_memoized_configured_property def _single_table_criterion(self): if self.single and \ self.inherits and \ self.polymorphic_on is not None: return self.polymorphic_on.in_( m.polymorphic_identity for m in self.self_and_descendants) else: return None @_memoized_configured_property def _with_polymorphic_mappers(self): if Mapper._new_mappers: configure_mappers() if not self.with_polymorphic: return [] return self._mappers_from_spec(*self.with_polymorphic) @_memoized_configured_property def _with_polymorphic_selectable(self): if not self.with_polymorphic: return self.mapped_table spec, selectable = self.with_polymorphic if selectable is not None: return selectable else: return self._selectable_from_mappers( self._mappers_from_spec(spec, selectable), False) with_polymorphic_mappers = _with_polymorphic_mappers """The list of :class:`.Mapper` objects included in the default "polymorphic" query. """ @property def selectable(self): """The :func:`.select` construct this :class:`.Mapper` selects from by default. Normally, this is equivalent to :attr:`.mapped_table`, unless the ``with_polymorphic`` feature is in use, in which case the full "polymorphic" selectable is returned. """ return self._with_polymorphic_selectable def _with_polymorphic_args(self, spec=None, selectable=False, innerjoin=False): if self.with_polymorphic: if not spec: spec = self.with_polymorphic[0] if selectable is False: selectable = self.with_polymorphic[1] elif selectable is False: selectable = None mappers = self._mappers_from_spec(spec, selectable) if selectable is not None: return mappers, selectable else: return mappers, self._selectable_from_mappers(mappers, innerjoin) @_memoized_configured_property def _polymorphic_properties(self): return list(self._iterate_polymorphic_properties( self._with_polymorphic_mappers)) def _iterate_polymorphic_properties(self, mappers=None): """Return an iterator of MapperProperty objects which will render into a SELECT.""" if mappers is None: mappers = self._with_polymorphic_mappers if not mappers: for c in self.iterate_properties: yield c else: # in the polymorphic case, filter out discriminator columns # from other mappers, as these are sometimes dependent on that # mapper's polymorphic selectable (which we don't want rendered) for c in util.unique_list( chain(*[ list(mapper.iterate_properties) for mapper in [self] + mappers ]) ): if getattr(c, '_is_polymorphic_discriminator', False) and \ (self.polymorphic_on is None or c.columns[0] is not self.polymorphic_on): continue yield c @util.memoized_property def attrs(self): """A namespace of all :class:`.MapperProperty` objects associated this mapper. This is an object that provides each property based on its key name. For instance, the mapper for a ``User`` class which has ``User.name`` attribute would provide ``mapper.attrs.name``, which would be the :class:`.ColumnProperty` representing the ``name`` column. The namespace object can also be iterated, which would yield each :class:`.MapperProperty`. :class:`.Mapper` has several pre-filtered views of this attribute which limit the types of properties returned, inclding :attr:`.synonyms`, :attr:`.column_attrs`, :attr:`.relationships`, and :attr:`.composites`. .. seealso:: :attr:`.Mapper.all_orm_descriptors` """ if Mapper._new_mappers: configure_mappers() return util.ImmutableProperties(self._props) @util.memoized_property def all_orm_descriptors(self): """A namespace of all :class:`._InspectionAttr` attributes associated with the mapped class. These attributes are in all cases Python :term:`descriptors` associated with the mapped class or its superclasses. This namespace includes attributes that are mapped to the class as well as attributes declared by extension modules. It includes any Python descriptor type that inherits from :class:`._InspectionAttr`. This includes :class:`.QueryableAttribute`, as well as extension types such as :class:`.hybrid_property`, :class:`.hybrid_method` and :class:`.AssociationProxy`. To distinguish between mapped attributes and extension attributes, the attribute :attr:`._InspectionAttr.extension_type` will refer to a constant that distinguishes between different extension types. When dealing with a :class:`.QueryableAttribute`, the :attr:`.QueryableAttribute.property` attribute refers to the :class:`.MapperProperty` property, which is what you get when referring to the collection of mapped properties via :attr:`.Mapper.attrs`. .. versionadded:: 0.8.0 .. seealso:: :attr:`.Mapper.attrs` """ return util.ImmutableProperties( dict(self.class_manager._all_sqla_attributes())) @_memoized_configured_property def synonyms(self): """Return a namespace of all :class:`.SynonymProperty` properties maintained by this :class:`.Mapper`. .. seealso:: :attr:`.Mapper.attrs` - namespace of all :class:`.MapperProperty` objects. """ return self._filter_properties(properties.SynonymProperty) @_memoized_configured_property def column_attrs(self): """Return a namespace of all :class:`.ColumnProperty` properties maintained by this :class:`.Mapper`. .. seealso:: :attr:`.Mapper.attrs` - namespace of all :class:`.MapperProperty` objects. """ return self._filter_properties(properties.ColumnProperty) @_memoized_configured_property def relationships(self): """Return a namespace of all :class:`.RelationshipProperty` properties maintained by this :class:`.Mapper`. .. seealso:: :attr:`.Mapper.attrs` - namespace of all :class:`.MapperProperty` objects. """ return self._filter_properties(properties.RelationshipProperty) @_memoized_configured_property def composites(self): """Return a namespace of all :class:`.CompositeProperty` properties maintained by this :class:`.Mapper`. .. seealso:: :attr:`.Mapper.attrs` - namespace of all :class:`.MapperProperty` objects. """ return self._filter_properties(properties.CompositeProperty) def _filter_properties(self, type_): if Mapper._new_mappers: configure_mappers() return util.ImmutableProperties(util.OrderedDict( (k, v) for k, v in self._props.items() if isinstance(v, type_) )) @_memoized_configured_property def _get_clause(self): """create a "get clause" based on the primary key. this is used by query.get() and many-to-one lazyloads to load this item by primary key. """ params = [(primary_key, sql.bindparam(None, type_=primary_key.type)) for primary_key in self.primary_key] return sql.and_(*[k == v for (k, v) in params]), \ util.column_dict(params) @_memoized_configured_property def _equivalent_columns(self): """Create a map of all *equivalent* columns, based on the determination of column pairs that are equated to one another based on inherit condition. This is designed to work with the queries that util.polymorphic_union comes up with, which often don't include the columns from the base table directly (including the subclass table columns only). The resulting structure is a dictionary of columns mapped to lists of equivalent columns, i.e. { tablea.col1: set([tableb.col1, tablec.col1]), tablea.col2: set([tabled.col2]) } """ result = util.column_dict() def visit_binary(binary): if binary.operator == operators.eq: if binary.left in result: result[binary.left].add(binary.right) else: result[binary.left] = util.column_set((binary.right,)) if binary.right in result: result[binary.right].add(binary.left) else: result[binary.right] = util.column_set((binary.left,)) for mapper in self.base_mapper.self_and_descendants: if mapper.inherit_condition is not None: visitors.traverse( mapper.inherit_condition, {}, {'binary': visit_binary}) return result def _is_userland_descriptor(self, obj): if isinstance(obj, (_MappedAttribute, instrumentation.ClassManager, expression.ColumnElement)): return False else: return True def _should_exclude(self, name, assigned_name, local, column): """determine whether a particular property should be implicitly present on the class. This occurs when properties are propagated from an inherited class, or are applied from the columns present in the mapped table. """ # check for class-bound attributes and/or descriptors, # either local or from an inherited class if local: if self.class_.__dict__.get(assigned_name, None) is not None \ and self._is_userland_descriptor( self.class_.__dict__[assigned_name]): return True else: if getattr(self.class_, assigned_name, None) is not None \ and self._is_userland_descriptor( getattr(self.class_, assigned_name)): return True if self.include_properties is not None and \ name not in self.include_properties and \ (column is None or column not in self.include_properties): self._log("not including property %s" % (name)) return True if self.exclude_properties is not None and \ ( name in self.exclude_properties or (column is not None and column in self.exclude_properties) ): self._log("excluding property %s" % (name)) return True return False def common_parent(self, other): """Return true if the given mapper shares a common inherited parent as this mapper.""" return self.base_mapper is other.base_mapper def _canload(self, state, allow_subtypes): s = self.primary_mapper() if self.polymorphic_on is not None or allow_subtypes: return _state_mapper(state).isa(s) else: return _state_mapper(state) is s def isa(self, other): """Return True if the this mapper inherits from the given mapper.""" m = self while m and m is not other: m = m.inherits return bool(m) def iterate_to_root(self): m = self while m: yield m m = m.inherits @_memoized_configured_property def self_and_descendants(self): """The collection including this mapper and all descendant mappers. This includes not just the immediately inheriting mappers but all their inheriting mappers as well. """ descendants = [] stack = deque([self]) while stack: item = stack.popleft() descendants.append(item) stack.extend(item._inheriting_mappers) return util.WeakSequence(descendants) def polymorphic_iterator(self): """Iterate through the collection including this mapper and all descendant mappers. This includes not just the immediately inheriting mappers but all their inheriting mappers as well. To iterate through an entire hierarchy, use ``mapper.base_mapper.polymorphic_iterator()``. """ return iter(self.self_and_descendants) def primary_mapper(self): """Return the primary mapper corresponding to this mapper's class key (class).""" return self.class_manager.mapper @property def primary_base_mapper(self): return self.class_manager.mapper.base_mapper def identity_key_from_row(self, row, adapter=None): """Return an identity-map key for use in storing/retrieving an item from the identity map. :param row: A :class:`.RowProxy` instance. The columns which are mapped by this :class:`.Mapper` should be locatable in the row, preferably via the :class:`.Column` object directly (as is the case when a :func:`.select` construct is executed), or via string names of the form ``<tablename>_<colname>``. """ pk_cols = self.primary_key if adapter: pk_cols = [adapter.columns[c] for c in pk_cols] return self._identity_class, \ tuple(row[column] for column in pk_cols) def identity_key_from_primary_key(self, primary_key): """Return an identity-map key for use in storing/retrieving an item from an identity map. :param primary_key: A list of values indicating the identifier. """ return self._identity_class, tuple(primary_key) def identity_key_from_instance(self, instance): """Return the identity key for the given instance, based on its primary key attributes. If the instance's state is expired, calling this method will result in a database check to see if the object has been deleted. If the row no longer exists, :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised. This value is typically also found on the instance state under the attribute name `key`. """ return self.identity_key_from_primary_key( self.primary_key_from_instance(instance)) def _identity_key_from_state(self, state): dict_ = state.dict manager = state.manager return self._identity_class, tuple([ manager[self._columntoproperty[col].key]. impl.get(state, dict_, attributes.PASSIVE_OFF) for col in self.primary_key ]) def primary_key_from_instance(self, instance): """Return the list of primary key values for the given instance. If the instance's state is expired, calling this method will result in a database check to see if the object has been deleted. If the row no longer exists, :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised. """ state = attributes.instance_state(instance) return self._primary_key_from_state(state) def _primary_key_from_state(self, state): dict_ = state.dict manager = state.manager return [ manager[self._columntoproperty[col].key]. impl.get(state, dict_, attributes.PASSIVE_OFF) for col in self.primary_key ] def _get_state_attr_by_column(self, state, dict_, column, passive=attributes.PASSIVE_OFF): prop = self._columntoproperty[column] return state.manager[prop.key].impl.get(state, dict_, passive=passive) def _set_state_attr_by_column(self, state, dict_, column, value): prop = self._columntoproperty[column] state.manager[prop.key].impl.set(state, dict_, value, None) def _get_committed_attr_by_column(self, obj, column): state = attributes.instance_state(obj) dict_ = attributes.instance_dict(obj) return self._get_committed_state_attr_by_column(state, dict_, column) def _get_committed_state_attr_by_column( self, state, dict_, column, passive=attributes.PASSIVE_OFF): prop = self._columntoproperty[column] return state.manager[prop.key].impl.\ get_committed_value(state, dict_, passive=passive) def _optimized_get_statement(self, state, attribute_names): """assemble a WHERE clause which retrieves a given state by primary key, using a minimized set of tables. Applies to a joined-table inheritance mapper where the requested attribute names are only present on joined tables, not the base table. The WHERE clause attempts to include only those tables to minimize joins. """ props = self._props tables = set(chain( *[sql_util.find_tables(c, check_columns=True) for key in attribute_names for c in props[key].columns] )) if self.base_mapper.local_table in tables: return None class ColumnsNotAvailable(Exception): pass def visit_binary(binary): leftcol = binary.left rightcol = binary.right if leftcol is None or rightcol is None: return if leftcol.table not in tables: leftval = self._get_committed_state_attr_by_column( state, state.dict, leftcol, passive=attributes.PASSIVE_NO_INITIALIZE) if leftval is attributes.PASSIVE_NO_RESULT or leftval is None: raise ColumnsNotAvailable() binary.left = sql.bindparam(None, leftval, type_=binary.right.type) elif rightcol.table not in tables: rightval = self._get_committed_state_attr_by_column( state, state.dict, rightcol, passive=attributes.PASSIVE_NO_INITIALIZE) if rightval is attributes.PASSIVE_NO_RESULT or \ rightval is None: raise ColumnsNotAvailable() binary.right = sql.bindparam(None, rightval, type_=binary.right.type) allconds = [] try: start = False for mapper in reversed(list(self.iterate_to_root())): if mapper.local_table in tables: start = True elif not isinstance(mapper.local_table, expression.TableClause): return None if start and not mapper.single: allconds.append(visitors.cloned_traverse( mapper.inherit_condition, {}, {'binary': visit_binary} ) ) except ColumnsNotAvailable: return None cond = sql.and_(*allconds) cols = [] for key in attribute_names: cols.extend(props[key].columns) return sql.select(cols, cond, use_labels=True) def cascade_iterator(self, type_, state, halt_on=None): """Iterate each element and its mapper in an object graph, for all relationships that meet the given cascade rule. :param type_: The name of the cascade rule (i.e. save-update, delete, etc.) :param state: The lead InstanceState. child items will be processed per the relationships defined for this object's mapper. the return value are object instances; this provides a strong reference so that they don't fall out of scope immediately. """ visited_states = set() prp, mpp = object(), object() visitables = deque([(deque(self._props.values()), prp, state, state.dict)]) while visitables: iterator, item_type, parent_state, parent_dict = visitables[-1] if not iterator: visitables.pop() continue if item_type is prp: prop = iterator.popleft() if type_ not in prop.cascade: continue queue = deque(prop.cascade_iterator( type_, parent_state, parent_dict, visited_states, halt_on)) if queue: visitables.append((queue, mpp, None, None)) elif item_type is mpp: instance, instance_mapper, corresponding_state, \ corresponding_dict = iterator.popleft() yield instance, instance_mapper, \ corresponding_state, corresponding_dict visitables.append((deque(instance_mapper._props.values()), prp, corresponding_state, corresponding_dict)) @_memoized_configured_property def _compiled_cache(self): return util.LRUCache(self._compiled_cache_size) @_memoized_configured_property def _sorted_tables(self): table_to_mapper = {} for mapper in self.base_mapper.self_and_descendants: for t in mapper.tables: table_to_mapper.setdefault(t, mapper) extra_dependencies = [] for table, mapper in table_to_mapper.items(): super_ = mapper.inherits if super_: extra_dependencies.extend([ (super_table, table) for super_table in super_.tables ]) def skip(fk): # attempt to skip dependencies that are not # significant to the inheritance chain # for two tables that are related by inheritance. # while that dependency may be important, it's technically # not what we mean to sort on here. parent = table_to_mapper.get(fk.parent.table) dep = table_to_mapper.get(fk.column.table) if parent is not None and \ dep is not None and \ dep is not parent and \ dep.inherit_condition is not None: cols = set(sql_util._find_columns(dep.inherit_condition)) if parent.inherit_condition is not None: cols = cols.union(sql_util._find_columns( parent.inherit_condition)) return fk.parent not in cols and fk.column not in cols else: return fk.parent not in cols return False sorted_ = sql_util.sort_tables(table_to_mapper, skip_fn=skip, extra_dependencies=extra_dependencies) ret = util.OrderedDict() for t in sorted_: ret[t] = table_to_mapper[t] return ret def _memo(self, key, callable_): if key in self._memoized_values: return self._memoized_values[key] else: self._memoized_values[key] = value = callable_() return value @util.memoized_property def _table_to_equated(self): """memoized map of tables to collections of columns to be synchronized upwards to the base mapper.""" result = util.defaultdict(list) for table in self._sorted_tables: cols = set(table.c) for m in self.iterate_to_root(): if m._inherits_equated_pairs and \ cols.intersection( util.reduce(set.union, [l.proxy_set for l, r in m._inherits_equated_pairs]) ): result[table].append((m, m._inherits_equated_pairs)) return result def configure_mappers(): """Initialize the inter-mapper relationships of all mappers that have been constructed thus far. This function can be called any number of times, but in most cases is handled internally. """ if not Mapper._new_mappers: return _CONFIGURE_MUTEX.acquire() try: global _already_compiling if _already_compiling: return _already_compiling = True try: # double-check inside mutex if not Mapper._new_mappers: return Mapper.dispatch(Mapper).before_configured() # initialize properties on all mappers # note that _mapper_registry is unordered, which # may randomly conceal/reveal issues related to # the order of mapper compilation for mapper in list(_mapper_registry): if getattr(mapper, '_configure_failed', False): e = sa_exc.InvalidRequestError( "One or more mappers failed to initialize - " "can't proceed with initialization of other " "mappers. Original exception was: %s" % mapper._configure_failed) e._configure_failed = mapper._configure_failed raise e if not mapper.configured: try: mapper._post_configure_properties() mapper._expire_memoizations() mapper.dispatch.mapper_configured( mapper, mapper.class_) except: exc = sys.exc_info()[1] if not hasattr(exc, '_configure_failed'): mapper._configure_failed = exc raise Mapper._new_mappers = False finally: _already_compiling = False finally: _CONFIGURE_MUTEX.release() Mapper.dispatch(Mapper).after_configured() def reconstructor(fn): """Decorate a method as the 'reconstructor' hook. Designates a method as the "reconstructor", an ``__init__``-like method that will be called by the ORM after the instance has been loaded from the database or otherwise reconstituted. The reconstructor will be invoked with no arguments. Scalar (non-collection) database-mapped attributes of the instance will be available for use within the function. Eagerly-loaded collections are generally not yet available and will usually only contain the first element. ORM state changes made to objects at this stage will not be recorded for the next flush() operation, so the activity within a reconstructor should be conservative. """ fn.__sa_reconstructor__ = True return fn def validates(*names, **kw): """Decorate a method as a 'validator' for one or more named properties. Designates a method as a validator, a method which receives the name of the attribute as well as a value to be assigned, or in the case of a collection, the value to be added to the collection. The function can then raise validation exceptions to halt the process from continuing (where Python's built-in ``ValueError`` and ``AssertionError`` exceptions are reasonable choices), or can modify or replace the value before proceeding. The function should otherwise return the given value. Note that a validator for a collection **cannot** issue a load of that collection within the validation routine - this usage raises an assertion to avoid recursion overflows. This is a reentrant condition which is not supported. :param \*names: list of attribute names to be validated. :param include_removes: if True, "remove" events will be sent as well - the validation function must accept an additional argument "is_remove" which will be a boolean. .. versionadded:: 0.7.7 :param include_backrefs: defaults to ``True``; if ``False``, the validation function will not emit if the originator is an attribute event related via a backref. This can be used for bi-directional :func:`.validates` usage where only one validator should emit per attribute operation. .. versionadded:: 0.9.0 .. seealso:: :ref:`simple_validators` - usage examples for :func:`.validates` """ include_removes = kw.pop('include_removes', False) include_backrefs = kw.pop('include_backrefs', True) def wrap(fn): fn.__sa_validators__ = names fn.__sa_validation_opts__ = { "include_removes": include_removes, "include_backrefs": include_backrefs } return fn return wrap def _event_on_load(state, ctx): instrumenting_mapper = state.manager.info[_INSTRUMENTOR] if instrumenting_mapper._reconstructor: instrumenting_mapper._reconstructor(state.obj()) def _event_on_first_init(manager, cls): """Initial mapper compilation trigger. instrumentation calls this one when InstanceState is first generated, and is needed for legacy mutable attributes to work. """ instrumenting_mapper = manager.info.get(_INSTRUMENTOR) if instrumenting_mapper: if Mapper._new_mappers: configure_mappers() def _event_on_init(state, args, kwargs): """Run init_instance hooks. This also includes mapper compilation, normally not needed here but helps with some piecemeal configuration scenarios (such as in the ORM tutorial). """ instrumenting_mapper = state.manager.info.get(_INSTRUMENTOR) if instrumenting_mapper: if Mapper._new_mappers: configure_mappers() if instrumenting_mapper._set_polymorphic_identity: instrumenting_mapper._set_polymorphic_identity(state) def _event_on_resurrect(state): # re-populate the primary key elements # of the dict based on the mapping. instrumenting_mapper = state.manager.info.get(_INSTRUMENTOR) if instrumenting_mapper: for col, val in zip(instrumenting_mapper.primary_key, state.key[1]): instrumenting_mapper._set_state_attr_by_column( state, state.dict, col, val) class _ColumnMapping(dict): """Error reporting helper for mapper._columntoproperty.""" def __init__(self, mapper): self.mapper = mapper def __missing__(self, column): prop = self.mapper._props.get(column) if prop: raise orm_exc.UnmappedColumnError( "Column '%s.%s' is not available, due to " "conflicting property '%s':%r" % ( column.table.name, column.name, column.key, prop)) raise orm_exc.UnmappedColumnError( "No column %s is configured on mapper %s..." % (column, self.mapper))
apache-2.0
-347,847,619,742,065,860
38.556897
78
0.582828
false
anbangleo/NlsdeWeb
Python-3.6.0/Lib/email/headerregistry.py
12
20164
"""Representing and manipulating email headers via custom objects. This module provides an implementation of the HeaderRegistry API. The implementation is designed to flexibly follow RFC5322 rules. Eventually HeaderRegistry will be a public API, but it isn't yet, and will probably change some before that happens. """ from types import MappingProxyType from email import utils from email import errors from email import _header_value_parser as parser class Address: def __init__(self, display_name='', username='', domain='', addr_spec=None): """Create an object representing a full email address. An address can have a 'display_name', a 'username', and a 'domain'. In addition to specifying the username and domain separately, they may be specified together by using the addr_spec keyword *instead of* the username and domain keywords. If an addr_spec string is specified it must be properly quoted according to RFC 5322 rules; an error will be raised if it is not. An Address object has display_name, username, domain, and addr_spec attributes, all of which are read-only. The addr_spec and the string value of the object are both quoted according to RFC5322 rules, but without any Content Transfer Encoding. """ # This clause with its potential 'raise' may only happen when an # application program creates an Address object using an addr_spec # keyword. The email library code itself must always supply username # and domain. if addr_spec is not None: if username or domain: raise TypeError("addrspec specified when username and/or " "domain also specified") a_s, rest = parser.get_addr_spec(addr_spec) if rest: raise ValueError("Invalid addr_spec; only '{}' " "could be parsed from '{}'".format( a_s, addr_spec)) if a_s.all_defects: raise a_s.all_defects[0] username = a_s.local_part domain = a_s.domain self._display_name = display_name self._username = username self._domain = domain @property def display_name(self): return self._display_name @property def username(self): return self._username @property def domain(self): return self._domain @property def addr_spec(self): """The addr_spec (username@domain) portion of the address, quoted according to RFC 5322 rules, but with no Content Transfer Encoding. """ nameset = set(self.username) if len(nameset) > len(nameset-parser.DOT_ATOM_ENDS): lp = parser.quote_string(self.username) else: lp = self.username if self.domain: return lp + '@' + self.domain if not lp: return '<>' return lp def __repr__(self): return "{}(display_name={!r}, username={!r}, domain={!r})".format( self.__class__.__name__, self.display_name, self.username, self.domain) def __str__(self): nameset = set(self.display_name) if len(nameset) > len(nameset-parser.SPECIALS): disp = parser.quote_string(self.display_name) else: disp = self.display_name if disp: addr_spec = '' if self.addr_spec=='<>' else self.addr_spec return "{} <{}>".format(disp, addr_spec) return self.addr_spec def __eq__(self, other): if type(other) != type(self): return False return (self.display_name == other.display_name and self.username == other.username and self.domain == other.domain) class Group: def __init__(self, display_name=None, addresses=None): """Create an object representing an address group. An address group consists of a display_name followed by colon and a list of addresses (see Address) terminated by a semi-colon. The Group is created by specifying a display_name and a possibly empty list of Address objects. A Group can also be used to represent a single address that is not in a group, which is convenient when manipulating lists that are a combination of Groups and individual Addresses. In this case the display_name should be set to None. In particular, the string representation of a Group whose display_name is None is the same as the Address object, if there is one and only one Address object in the addresses list. """ self._display_name = display_name self._addresses = tuple(addresses) if addresses else tuple() @property def display_name(self): return self._display_name @property def addresses(self): return self._addresses def __repr__(self): return "{}(display_name={!r}, addresses={!r}".format( self.__class__.__name__, self.display_name, self.addresses) def __str__(self): if self.display_name is None and len(self.addresses)==1: return str(self.addresses[0]) disp = self.display_name if disp is not None: nameset = set(disp) if len(nameset) > len(nameset-parser.SPECIALS): disp = parser.quote_string(disp) adrstr = ", ".join(str(x) for x in self.addresses) adrstr = ' ' + adrstr if adrstr else adrstr return "{}:{};".format(disp, adrstr) def __eq__(self, other): if type(other) != type(self): return False return (self.display_name == other.display_name and self.addresses == other.addresses) # Header Classes # class BaseHeader(str): """Base class for message headers. Implements generic behavior and provides tools for subclasses. A subclass must define a classmethod named 'parse' that takes an unfolded value string and a dictionary as its arguments. The dictionary will contain one key, 'defects', initialized to an empty list. After the call the dictionary must contain two additional keys: parse_tree, set to the parse tree obtained from parsing the header, and 'decoded', set to the string value of the idealized representation of the data from the value. (That is, encoded words are decoded, and values that have canonical representations are so represented.) The defects key is intended to collect parsing defects, which the message parser will subsequently dispose of as appropriate. The parser should not, insofar as practical, raise any errors. Defects should be added to the list instead. The standard header parsers register defects for RFC compliance issues, for obsolete RFC syntax, and for unrecoverable parsing errors. The parse method may add additional keys to the dictionary. In this case the subclass must define an 'init' method, which will be passed the dictionary as its keyword arguments. The method should use (usually by setting them as the value of similarly named attributes) and remove all the extra keys added by its parse method, and then use super to call its parent class with the remaining arguments and keywords. The subclass should also make sure that a 'max_count' attribute is defined that is either None or 1. XXX: need to better define this API. """ def __new__(cls, name, value): kwds = {'defects': []} cls.parse(value, kwds) if utils._has_surrogates(kwds['decoded']): kwds['decoded'] = utils._sanitize(kwds['decoded']) self = str.__new__(cls, kwds['decoded']) del kwds['decoded'] self.init(name, **kwds) return self def init(self, name, *, parse_tree, defects): self._name = name self._parse_tree = parse_tree self._defects = defects @property def name(self): return self._name @property def defects(self): return tuple(self._defects) def __reduce__(self): return ( _reconstruct_header, ( self.__class__.__name__, self.__class__.__bases__, str(self), ), self.__dict__) @classmethod def _reconstruct(cls, value): return str.__new__(cls, value) def fold(self, *, policy): """Fold header according to policy. The parsed representation of the header is folded according to RFC5322 rules, as modified by the policy. If the parse tree contains surrogateescaped bytes, the bytes are CTE encoded using the charset 'unknown-8bit". Any non-ASCII characters in the parse tree are CTE encoded using charset utf-8. XXX: make this a policy setting. The returned value is an ASCII-only string possibly containing linesep characters, and ending with a linesep character. The string includes the header name and the ': ' separator. """ # At some point we need to only put fws here if it was in the source. header = parser.Header([ parser.HeaderLabel([ parser.ValueTerminal(self.name, 'header-name'), parser.ValueTerminal(':', 'header-sep')]), parser.CFWSList([parser.WhiteSpaceTerminal(' ', 'fws')]), self._parse_tree]) return header.fold(policy=policy) def _reconstruct_header(cls_name, bases, value): return type(cls_name, bases, {})._reconstruct(value) class UnstructuredHeader: max_count = None value_parser = staticmethod(parser.get_unstructured) @classmethod def parse(cls, value, kwds): kwds['parse_tree'] = cls.value_parser(value) kwds['decoded'] = str(kwds['parse_tree']) class UniqueUnstructuredHeader(UnstructuredHeader): max_count = 1 class DateHeader: """Header whose value consists of a single timestamp. Provides an additional attribute, datetime, which is either an aware datetime using a timezone, or a naive datetime if the timezone in the input string is -0000. Also accepts a datetime as input. The 'value' attribute is the normalized form of the timestamp, which means it is the output of format_datetime on the datetime. """ max_count = None # This is used only for folding, not for creating 'decoded'. value_parser = staticmethod(parser.get_unstructured) @classmethod def parse(cls, value, kwds): if not value: kwds['defects'].append(errors.HeaderMissingRequiredValue()) kwds['datetime'] = None kwds['decoded'] = '' kwds['parse_tree'] = parser.TokenList() return if isinstance(value, str): value = utils.parsedate_to_datetime(value) kwds['datetime'] = value kwds['decoded'] = utils.format_datetime(kwds['datetime']) kwds['parse_tree'] = cls.value_parser(kwds['decoded']) def init(self, *args, **kw): self._datetime = kw.pop('datetime') super().init(*args, **kw) @property def datetime(self): return self._datetime class UniqueDateHeader(DateHeader): max_count = 1 class AddressHeader: max_count = None @staticmethod def value_parser(value): address_list, value = parser.get_address_list(value) assert not value, 'this should not happen' return address_list @classmethod def parse(cls, value, kwds): if isinstance(value, str): # We are translating here from the RFC language (address/mailbox) # to our API language (group/address). kwds['parse_tree'] = address_list = cls.value_parser(value) groups = [] for addr in address_list.addresses: groups.append(Group(addr.display_name, [Address(mb.display_name or '', mb.local_part or '', mb.domain or '') for mb in addr.all_mailboxes])) defects = list(address_list.all_defects) else: # Assume it is Address/Group stuff if not hasattr(value, '__iter__'): value = [value] groups = [Group(None, [item]) if not hasattr(item, 'addresses') else item for item in value] defects = [] kwds['groups'] = groups kwds['defects'] = defects kwds['decoded'] = ', '.join([str(item) for item in groups]) if 'parse_tree' not in kwds: kwds['parse_tree'] = cls.value_parser(kwds['decoded']) def init(self, *args, **kw): self._groups = tuple(kw.pop('groups')) self._addresses = None super().init(*args, **kw) @property def groups(self): return self._groups @property def addresses(self): if self._addresses is None: self._addresses = tuple([address for group in self._groups for address in group.addresses]) return self._addresses class UniqueAddressHeader(AddressHeader): max_count = 1 class SingleAddressHeader(AddressHeader): @property def address(self): if len(self.addresses)!=1: raise ValueError(("value of single address header {} is not " "a single address").format(self.name)) return self.addresses[0] class UniqueSingleAddressHeader(SingleAddressHeader): max_count = 1 class MIMEVersionHeader: max_count = 1 value_parser = staticmethod(parser.parse_mime_version) @classmethod def parse(cls, value, kwds): kwds['parse_tree'] = parse_tree = cls.value_parser(value) kwds['decoded'] = str(parse_tree) kwds['defects'].extend(parse_tree.all_defects) kwds['major'] = None if parse_tree.minor is None else parse_tree.major kwds['minor'] = parse_tree.minor if parse_tree.minor is not None: kwds['version'] = '{}.{}'.format(kwds['major'], kwds['minor']) else: kwds['version'] = None def init(self, *args, **kw): self._version = kw.pop('version') self._major = kw.pop('major') self._minor = kw.pop('minor') super().init(*args, **kw) @property def major(self): return self._major @property def minor(self): return self._minor @property def version(self): return self._version class ParameterizedMIMEHeader: # Mixin that handles the params dict. Must be subclassed and # a property value_parser for the specific header provided. max_count = 1 @classmethod def parse(cls, value, kwds): kwds['parse_tree'] = parse_tree = cls.value_parser(value) kwds['decoded'] = str(parse_tree) kwds['defects'].extend(parse_tree.all_defects) if parse_tree.params is None: kwds['params'] = {} else: # The MIME RFCs specify that parameter ordering is arbitrary. kwds['params'] = {utils._sanitize(name).lower(): utils._sanitize(value) for name, value in parse_tree.params} def init(self, *args, **kw): self._params = kw.pop('params') super().init(*args, **kw) @property def params(self): return MappingProxyType(self._params) class ContentTypeHeader(ParameterizedMIMEHeader): value_parser = staticmethod(parser.parse_content_type_header) def init(self, *args, **kw): super().init(*args, **kw) self._maintype = utils._sanitize(self._parse_tree.maintype) self._subtype = utils._sanitize(self._parse_tree.subtype) @property def maintype(self): return self._maintype @property def subtype(self): return self._subtype @property def content_type(self): return self.maintype + '/' + self.subtype class ContentDispositionHeader(ParameterizedMIMEHeader): value_parser = staticmethod(parser.parse_content_disposition_header) def init(self, *args, **kw): super().init(*args, **kw) cd = self._parse_tree.content_disposition self._content_disposition = cd if cd is None else utils._sanitize(cd) @property def content_disposition(self): return self._content_disposition class ContentTransferEncodingHeader: max_count = 1 value_parser = staticmethod(parser.parse_content_transfer_encoding_header) @classmethod def parse(cls, value, kwds): kwds['parse_tree'] = parse_tree = cls.value_parser(value) kwds['decoded'] = str(parse_tree) kwds['defects'].extend(parse_tree.all_defects) def init(self, *args, **kw): super().init(*args, **kw) self._cte = utils._sanitize(self._parse_tree.cte) @property def cte(self): return self._cte # The header factory # _default_header_map = { 'subject': UniqueUnstructuredHeader, 'date': UniqueDateHeader, 'resent-date': DateHeader, 'orig-date': UniqueDateHeader, 'sender': UniqueSingleAddressHeader, 'resent-sender': SingleAddressHeader, 'to': UniqueAddressHeader, 'resent-to': AddressHeader, 'cc': UniqueAddressHeader, 'resent-cc': AddressHeader, 'bcc': UniqueAddressHeader, 'resent-bcc': AddressHeader, 'from': UniqueAddressHeader, 'resent-from': AddressHeader, 'reply-to': UniqueAddressHeader, 'mime-version': MIMEVersionHeader, 'content-type': ContentTypeHeader, 'content-disposition': ContentDispositionHeader, 'content-transfer-encoding': ContentTransferEncodingHeader, } class HeaderRegistry: """A header_factory and header registry.""" def __init__(self, base_class=BaseHeader, default_class=UnstructuredHeader, use_default_map=True): """Create a header_factory that works with the Policy API. base_class is the class that will be the last class in the created header class's __bases__ list. default_class is the class that will be used if "name" (see __call__) does not appear in the registry. use_default_map controls whether or not the default mapping of names to specialized classes is copied in to the registry when the factory is created. The default is True. """ self.registry = {} self.base_class = base_class self.default_class = default_class if use_default_map: self.registry.update(_default_header_map) def map_to_type(self, name, cls): """Register cls as the specialized class for handling "name" headers. """ self.registry[name.lower()] = cls def __getitem__(self, name): cls = self.registry.get(name.lower(), self.default_class) return type('_'+cls.__name__, (cls, self.base_class), {}) def __call__(self, name, value): """Create a header instance for header 'name' from 'value'. Creates a header instance by creating a specialized class for parsing and representing the specified header by combining the factory base_class with a specialized class from the registry or the default_class, and passing the name and value to the constructed class's constructor. """ return self[name](name, value)
mit
-1,601,294,855,012,280,600
33.409556
80
0.602063
false
pamfilos/data.cern.ch
cap/modules/deposit/search.py
5
3516
# -*- coding: utf-8 -*- # # This file is part of CERN Analysis Preservation Framework. # Copyright (C) 2016, 2017 CERN. # # CERN Analysis Preservation Framework is free software; you can redistribute # it and/or modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # CERN Analysis Preservation Framework is distributed in the hope that it will # be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with CERN Analysis Preservation Framework; if not, write to the # Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, # MA 02111-1307, USA. # # In applying this license, CERN does not # waive the privileges and immunities granted to it by virtue of its status # as an Intergovernmental Organization or submit itself to any jurisdiction. """Configuration for deposit search.""" from elasticsearch_dsl import Q, TermsFacet from flask import g from flask_login import current_user from flask_principal import RoleNeed from invenio_access.models import Role from invenio_search import RecordsSearch from invenio_search.api import DefaultFilter from cap.modules.access.permissions import admin_permission_factory from cap.modules.access.utils import login_required @login_required def deposits_filter(): """Filter list of deposits. Permit to the user to see all if: * The user is an admin (see func:`invenio_deposit.permissions:admin_permission_factory`). * It's called outside of a request. Otherwise, it filters out any deposit where user is not the owner. """ if admin_permission_factory(None).can(): return Q('term', **{'_deposit.status': 'draft'}) roles = [role.id for role in Role.query.all() if RoleNeed(role) in g.identity.provides] q = (Q('multi_match', query=g.identity.id, fields=[ '_access.deposit-read.users', '_access.deposit-admin.users' ]) | Q('terms', **{'_access.deposit-read.roles': roles}) | Q('terms', **{'_access.deposit-admin.roles': roles}) ) & Q('term', **{'_deposit.status': 'draft'}) return q class CAPDepositSearch(RecordsSearch): """Default search class.""" class Meta: """Configuration for deposit search.""" index = 'deposits' doc_types = None fields = ('*',) facets = { 'status': TermsFacet(field='_deposit.status'), } default_filter = DefaultFilter(deposits_filter) def get_user_deposits(self): """Get draft deposits that current user owns.""" return self.filter( Q('match', **{'_deposit.status': 'draft'}) & Q('multi_match', query=current_user.id, fields=['_deposit.owners']) ) def get_shared_with_user(self): """Get draft deposits shared with current user .""" return self.filter( Q('match', **{'_deposit.status': 'draft'}) & ~ Q('multi_match', query=current_user.id, fields=['_deposit.owners']) ) def sort_by_latest(self): """Sort by latest (updated).""" return self.sort( {'_updated': {'unmapped_type': 'date', 'order': 'desc'}} )
gpl-2.0
-233,742,865,712,228,480
33.135922
79
0.654721
false
Frankkkkk/arctic
arctic/date/_daterange.py
1
8033
import datetime from six import string_types from ._generalslice import OPEN_OPEN, CLOSED_CLOSED, OPEN_CLOSED, CLOSED_OPEN, GeneralSlice from ._parse import parse INTERVAL_LOOKUP = {(True, True): OPEN_OPEN, (False, False): CLOSED_CLOSED, (True, False): OPEN_CLOSED, (False, True): CLOSED_OPEN } class DateRange(GeneralSlice): """ Represents a bounded datetime range. Ranges may be bounded on either end if a date is specified for the start or end of the range, or unbounded if None is specified for either value. Unbounded ranges will allow all available data to pass through when used as a filter argument on function or method. ===== ==== ============================ =============================== start end interval Meaning ----- ---- ---------------------------- ------------------------------- None None any date a None CLOSED_CLOSED or CLOSED_OPEN date >= a a None OPEN_CLOSED or OPEN_OPEN date > a None b CLOSED_CLOSED or OPEN_CLOSED date <= b None b CLOSED_OPEN or OPEN_OPEN date < b a b CLOSED_CLOSED date >= a and date <= b a b OPEN_CLOSED date > a and date <= b a b CLOSED_OPEN date >= a and date < b a b OPEN_OPEN date > a and date < b ===== ==== ============================ =============================== Parameters ---------- start : `int`, `str` or `datetime.datetime` lower bound date value as an integer, string or datetime object. end : `int`, `str` or `datetime.datetime` upper bound date value as an integer, string or datetime object. interval : `int` CLOSED_CLOSED, OPEN_CLOSED, CLOSED_OPEN or OPEN_OPEN. **Default is CLOSED_CLOSED**. """ def __init__(self, start=None, end=None, interval=CLOSED_CLOSED): def _is_dt_type(x): return isinstance(x, (datetime.datetime, datetime.date)) def _compute_bound(value, desc): if isinstance(value, bytes): return parse(value.decode('ascii')) elif isinstance(value, (int, string_types)): return parse(str(value)) elif _is_dt_type(value): return value elif value is None: return None else: raise TypeError('unsupported type for %s: %s' % (desc, type(value))) super(DateRange, self).__init__(_compute_bound(start, "start"), _compute_bound(end, "end"), 1, interval) if _is_dt_type(self.start) and _is_dt_type(self.end): if self.start > self.end: raise ValueError('start date (%s) cannot be greater than end date (%s)!' % (self.start, self.end)) @property def unbounded(self): """True if range is unbounded on either or both ends, False otherwise.""" return self.start is None or self.end is None def intersection(self, other): """ Create a new DateRange representing the maximal range enclosed by this range and other """ startopen = other.startopen if self.start is None \ else self.startopen if other.start is None \ else other.startopen if self.start < other.start \ else self.startopen if self.start > other.start \ else (self.startopen and other.startopen) endopen = other.endopen if self.end is None \ else self.endopen if other.end is None \ else other.endopen if self.end > other.end \ else self.endopen if self.end < other.end \ else (self.endopen and other.endopen) new_start = self.start if other.start is None \ else other.start if self.start is None \ else max(self.start, other.start) new_end = self.end if other.end is None \ else other.end if self.end is None \ else min(self.end, other.end) interval = INTERVAL_LOOKUP[(startopen, endopen)] return DateRange(new_start, new_end, interval) def as_dates(self): """ Create a new DateRange with the datetimes converted to dates and changing to CLOSED/CLOSED. """ new_start = self.start.date() if self.start and isinstance(self.start, datetime.datetime) else self.start new_end = self.end.date() if self.end and isinstance(self.end, datetime.datetime) else self.end return DateRange(new_start, new_end, CLOSED_CLOSED) def mongo_query(self): """ Convert a DateRange into a MongoDb query string. FIXME: Mongo can only handle datetimes in queries, so we should make this handle the case where start/end are datetime.date and extend accordingly (being careful about the interval logic). """ comps = {OPEN_CLOSED: ('t', 'te'), OPEN_OPEN: ('t', 't'), CLOSED_OPEN: ('te', 't'), CLOSED_CLOSED: ('te', 'te')} query = {} comp = comps[self.interval] if self.start: query['$g' + comp[0]] = self.start if self.end: query['$l' + comp[1]] = self.end return query def get_date_bounds(self): """ Return the upper and lower bounds along with operators that are needed to do an 'in range' test. Useful for SQL commands. Returns ------- tuple: (`str`, `date`, `str`, `date`) (date_gt, start, date_lt, end) e.g.: ('>=', start_date, '<', end_date) """ start = end = None date_gt = '>=' date_lt = '<=' if self: if self.start: start = self.start if self.end: end = self.end if self.startopen: date_gt = '>' if self.endopen: date_lt = '<' return date_gt, start, date_lt, end def __contains__(self, d): if self.interval == CLOSED_CLOSED: return (self.start is None or d >= self.start) and (self.end is None or d <= self.end) elif self.interval == CLOSED_OPEN: return (self.start is None or d >= self.start) and (self.end is None or d < self.end) elif self.interval == OPEN_CLOSED: return (self.start is None or d > self.start) and (self.end is None or d <= self.end) return (self.start is None or d > self.start) and (self.end is None or d < self.end) def __repr__(self): return 'DateRange(start=%r, end=%r)' % (self.start, self.end) def __eq__(self, rhs): if rhs is None or not (hasattr(rhs, "end") and hasattr(rhs, "start")): return False return self.end == rhs.end and self.start == rhs.start def __lt__(self, other): if self.start is None: return True if other.start is None: return False return self.start < other.start def __hash__(self): return hash((self.start, self.end, self.step, self.interval)) def __getitem__(self, key): if key == 0: return self.start elif key == 1: return self.end else: raise IndexError('Index %s not in range (0:1)' % key) def __str__(self): return "%s%s, %s%s" % ( "(" if self.startopen else "[", self.start, self.end, ")" if self.endopen else "]", ) def __setstate__(self, state): """Called by pickle, PyYAML etc to set state.""" self.start = state['start'] self.end = state['end'] self.interval = state.get('interval') or CLOSED_CLOSED self.step = 1
lgpl-2.1
1,438,300,447,782,078,700
37.806763
113
0.529068
false
y12uc231/edx-platform
lms/djangoapps/courseware/tests/test_access.py
14
16756
import datetime import pytz from django.test import TestCase from django.core.urlresolvers import reverse from mock import Mock, patch from opaque_keys.edx.locations import SlashSeparatedCourseKey import courseware.access as access from courseware.masquerade import CourseMasquerade from courseware.tests.factories import UserFactory, StaffFactory, InstructorFactory from courseware.tests.helpers import LoginEnrollmentTestCase from student.tests.factories import AnonymousUserFactory, CourseEnrollmentAllowedFactory, CourseEnrollmentFactory from xmodule.course_module import ( CATALOG_VISIBILITY_CATALOG_AND_ABOUT, CATALOG_VISIBILITY_ABOUT, CATALOG_VISIBILITY_NONE ) from xmodule.modulestore.tests.factories import CourseFactory from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase from util.milestones_helpers import ( set_prerequisite_courses, fulfill_course_milestone, seed_milestone_relationship_types, ) # pylint: disable=missing-docstring # pylint: disable=protected-access class AccessTestCase(LoginEnrollmentTestCase, ModuleStoreTestCase): """ Tests for the various access controls on the student dashboard """ def setUp(self): super(AccessTestCase, self).setUp() course_key = SlashSeparatedCourseKey('edX', 'toy', '2012_Fall') self.course = course_key.make_usage_key('course', course_key.run) self.anonymous_user = AnonymousUserFactory() self.student = UserFactory() self.global_staff = UserFactory(is_staff=True) self.course_staff = StaffFactory(course_key=self.course.course_key) self.course_instructor = InstructorFactory(course_key=self.course.course_key) def test_has_access_to_course(self): self.assertFalse(access._has_access_to_course( None, 'staff', self.course.course_key )) self.assertFalse(access._has_access_to_course( self.anonymous_user, 'staff', self.course.course_key )) self.assertFalse(access._has_access_to_course( self.anonymous_user, 'instructor', self.course.course_key )) self.assertTrue(access._has_access_to_course( self.global_staff, 'staff', self.course.course_key )) self.assertTrue(access._has_access_to_course( self.global_staff, 'instructor', self.course.course_key )) # A user has staff access if they are in the staff group self.assertTrue(access._has_access_to_course( self.course_staff, 'staff', self.course.course_key )) self.assertFalse(access._has_access_to_course( self.course_staff, 'instructor', self.course.course_key )) # A user has staff and instructor access if they are in the instructor group self.assertTrue(access._has_access_to_course( self.course_instructor, 'staff', self.course.course_key )) self.assertTrue(access._has_access_to_course( self.course_instructor, 'instructor', self.course.course_key )) # A user does not have staff or instructor access if they are # not in either the staff or the the instructor group self.assertFalse(access._has_access_to_course( self.student, 'staff', self.course.course_key )) self.assertFalse(access._has_access_to_course( self.student, 'instructor', self.course.course_key )) def test__has_access_string(self): user = Mock(is_staff=True) self.assertFalse(access._has_access_string(user, 'staff', 'not_global')) user._has_global_staff_access.return_value = True self.assertTrue(access._has_access_string(user, 'staff', 'global')) self.assertRaises(ValueError, access._has_access_string, user, 'not_staff', 'global') def test__has_access_error_desc(self): descriptor = Mock() self.assertFalse(access._has_access_error_desc(self.student, 'load', descriptor, self.course.course_key)) self.assertTrue(access._has_access_error_desc(self.course_staff, 'load', descriptor, self.course.course_key)) self.assertTrue(access._has_access_error_desc(self.course_instructor, 'load', descriptor, self.course.course_key)) self.assertFalse(access._has_access_error_desc(self.student, 'staff', descriptor, self.course.course_key)) self.assertTrue(access._has_access_error_desc(self.course_staff, 'staff', descriptor, self.course.course_key)) self.assertTrue(access._has_access_error_desc(self.course_instructor, 'staff', descriptor, self.course.course_key)) self.assertFalse(access._has_access_error_desc(self.student, 'instructor', descriptor, self.course.course_key)) self.assertFalse(access._has_access_error_desc(self.course_staff, 'instructor', descriptor, self.course.course_key)) self.assertTrue(access._has_access_error_desc(self.course_instructor, 'instructor', descriptor, self.course.course_key)) with self.assertRaises(ValueError): access._has_access_error_desc(self.course_instructor, 'not_load_or_staff', descriptor, self.course.course_key) def test__has_access_descriptor(self): # TODO: override DISABLE_START_DATES and test the start date branch of the method user = Mock() descriptor = Mock(user_partitions=[]) # Always returns true because DISABLE_START_DATES is set in test.py self.assertTrue(access._has_access_descriptor(user, 'load', descriptor)) self.assertTrue(access._has_access_descriptor(user, 'instructor', descriptor)) with self.assertRaises(ValueError): access._has_access_descriptor(user, 'not_load_or_staff', descriptor) @patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False}) def test__has_access_descriptor_staff_lock(self): """ Tests that "visible_to_staff_only" overrides start date. """ mock_unit = Mock(user_partitions=[]) mock_unit._class_tags = {} # Needed for detached check in _has_access_descriptor def verify_access(student_should_have_access): """ Verify the expected result from _has_access_descriptor """ self.assertEqual(student_should_have_access, access._has_access_descriptor( self.anonymous_user, 'load', mock_unit, course_key=self.course.course_key) ) # staff always has access self.assertTrue(access._has_access_descriptor( self.course_staff, 'load', mock_unit, course_key=self.course.course_key) ) # No start date, staff lock on mock_unit.visible_to_staff_only = True verify_access(False) # No start date, staff lock off. mock_unit.visible_to_staff_only = False verify_access(True) # Start date in the past, staff lock on. mock_unit.start = datetime.datetime.now(pytz.utc) - datetime.timedelta(days=1) mock_unit.visible_to_staff_only = True verify_access(False) # Start date in the past, staff lock off. mock_unit.visible_to_staff_only = False verify_access(True) # Start date in the future, staff lock on. mock_unit.start = datetime.datetime.now(pytz.utc) + datetime.timedelta(days=1) # release date in the future mock_unit.visible_to_staff_only = True verify_access(False) # Start date in the future, staff lock off. mock_unit.visible_to_staff_only = False verify_access(False) def test__has_access_course_desc_can_enroll(self): yesterday = datetime.datetime.now(pytz.utc) - datetime.timedelta(days=1) tomorrow = datetime.datetime.now(pytz.utc) + datetime.timedelta(days=1) # Non-staff can enroll if authenticated and specifically allowed for that course # even outside the open enrollment period user = UserFactory.create() course = Mock( enrollment_start=tomorrow, enrollment_end=tomorrow, id=SlashSeparatedCourseKey('edX', 'test', '2012_Fall'), enrollment_domain='' ) CourseEnrollmentAllowedFactory(email=user.email, course_id=course.id) self.assertTrue(access._has_access_course_desc(user, 'enroll', course)) # Staff can always enroll even outside the open enrollment period user = StaffFactory.create(course_key=course.id) self.assertTrue(access._has_access_course_desc(user, 'enroll', course)) # Non-staff cannot enroll if it is between the start and end dates and invitation only # and not specifically allowed course = Mock( enrollment_start=yesterday, enrollment_end=tomorrow, id=SlashSeparatedCourseKey('edX', 'test', '2012_Fall'), enrollment_domain='', invitation_only=True ) user = UserFactory.create() self.assertFalse(access._has_access_course_desc(user, 'enroll', course)) # Non-staff can enroll if it is between the start and end dates and not invitation only course = Mock( enrollment_start=yesterday, enrollment_end=tomorrow, id=SlashSeparatedCourseKey('edX', 'test', '2012_Fall'), enrollment_domain='', invitation_only=False ) self.assertTrue(access._has_access_course_desc(user, 'enroll', course)) # Non-staff cannot enroll outside the open enrollment period if not specifically allowed course = Mock( enrollment_start=tomorrow, enrollment_end=tomorrow, id=SlashSeparatedCourseKey('edX', 'test', '2012_Fall'), enrollment_domain='', invitation_only=False ) self.assertFalse(access._has_access_course_desc(user, 'enroll', course)) def test__user_passed_as_none(self): """Ensure has_access handles a user being passed as null""" access.has_access(None, 'staff', 'global', None) def test__catalog_visibility(self): """ Tests the catalog visibility tri-states """ user = UserFactory.create() course_id = SlashSeparatedCourseKey('edX', 'test', '2012_Fall') staff = StaffFactory.create(course_key=course_id) course = Mock( id=course_id, catalog_visibility=CATALOG_VISIBILITY_CATALOG_AND_ABOUT ) self.assertTrue(access._has_access_course_desc(user, 'see_in_catalog', course)) self.assertTrue(access._has_access_course_desc(user, 'see_about_page', course)) self.assertTrue(access._has_access_course_desc(staff, 'see_in_catalog', course)) self.assertTrue(access._has_access_course_desc(staff, 'see_about_page', course)) # Now set visibility to just about page course = Mock( id=SlashSeparatedCourseKey('edX', 'test', '2012_Fall'), catalog_visibility=CATALOG_VISIBILITY_ABOUT ) self.assertFalse(access._has_access_course_desc(user, 'see_in_catalog', course)) self.assertTrue(access._has_access_course_desc(user, 'see_about_page', course)) self.assertTrue(access._has_access_course_desc(staff, 'see_in_catalog', course)) self.assertTrue(access._has_access_course_desc(staff, 'see_about_page', course)) # Now set visibility to none, which means neither in catalog nor about pages course = Mock( id=SlashSeparatedCourseKey('edX', 'test', '2012_Fall'), catalog_visibility=CATALOG_VISIBILITY_NONE ) self.assertFalse(access._has_access_course_desc(user, 'see_in_catalog', course)) self.assertFalse(access._has_access_course_desc(user, 'see_about_page', course)) self.assertTrue(access._has_access_course_desc(staff, 'see_in_catalog', course)) self.assertTrue(access._has_access_course_desc(staff, 'see_about_page', course)) @patch.dict("django.conf.settings.FEATURES", {'ENABLE_PREREQUISITE_COURSES': True, 'MILESTONES_APP': True}) def test_access_on_course_with_pre_requisites(self): """ Test course access when a course has pre-requisite course yet to be completed """ seed_milestone_relationship_types() user = UserFactory.create() pre_requisite_course = CourseFactory.create( org='test_org', number='788', run='test_run' ) pre_requisite_courses = [unicode(pre_requisite_course.id)] course = CourseFactory.create( org='test_org', number='786', run='test_run', pre_requisite_courses=pre_requisite_courses ) set_prerequisite_courses(course.id, pre_requisite_courses) #user should not be able to load course even if enrolled CourseEnrollmentFactory(user=user, course_id=course.id) self.assertFalse(access._has_access_course_desc(user, 'view_courseware_with_prerequisites', course)) # Staff can always access course staff = StaffFactory.create(course_key=course.id) self.assertTrue(access._has_access_course_desc(staff, 'view_courseware_with_prerequisites', course)) # User should be able access after completing required course fulfill_course_milestone(pre_requisite_course.id, user) self.assertTrue(access._has_access_course_desc(user, 'view_courseware_with_prerequisites', course)) @patch.dict("django.conf.settings.FEATURES", {'ENABLE_PREREQUISITE_COURSES': True, 'MILESTONES_APP': True}) def test_courseware_page_unfulfilled_prereqs(self): """ Test courseware access when a course has pre-requisite course yet to be completed """ seed_milestone_relationship_types() pre_requisite_course = CourseFactory.create( org='edX', course='900', run='test_run', ) pre_requisite_courses = [unicode(pre_requisite_course.id)] course = CourseFactory.create( org='edX', course='1000', run='test_run', pre_requisite_courses=pre_requisite_courses, ) set_prerequisite_courses(course.id, pre_requisite_courses) test_password = 't3stp4ss.!' user = UserFactory.create() user.set_password(test_password) user.save() self.login(user.email, test_password) CourseEnrollmentFactory(user=user, course_id=course.id) url = reverse('courseware', args=[unicode(course.id)]) response = self.client.get(url) self.assertRedirects( response, reverse( 'dashboard' ) ) self.assertEqual(response.status_code, 302) fulfill_course_milestone(pre_requisite_course.id, user) response = self.client.get(url) self.assertEqual(response.status_code, 200) class UserRoleTestCase(TestCase): """ Tests for user roles. """ def setUp(self): super(UserRoleTestCase, self).setUp() self.course_key = SlashSeparatedCourseKey('edX', 'toy', '2012_Fall') self.anonymous_user = AnonymousUserFactory() self.student = UserFactory() self.global_staff = UserFactory(is_staff=True) self.course_staff = StaffFactory(course_key=self.course_key) self.course_instructor = InstructorFactory(course_key=self.course_key) def _install_masquerade(self, user, role='student'): """ Installs a masquerade for the specified user. """ user.masquerade_settings = { self.course_key: CourseMasquerade(self.course_key, role=role) } def test_user_role_staff(self): """Ensure that user role is student for staff masqueraded as student.""" self.assertEqual( 'staff', access.get_user_role(self.course_staff, self.course_key) ) # Masquerade staff self._install_masquerade(self.course_staff) self.assertEqual( 'student', access.get_user_role(self.course_staff, self.course_key) ) def test_user_role_instructor(self): """Ensure that user role is student for instructor masqueraded as student.""" self.assertEqual( 'instructor', access.get_user_role(self.course_instructor, self.course_key) ) # Masquerade instructor self._install_masquerade(self.course_instructor) self.assertEqual( 'student', access.get_user_role(self.course_instructor, self.course_key) ) def test_user_role_anonymous(self): """Ensure that user role is student for anonymous user.""" self.assertEqual( 'student', access.get_user_role(self.anonymous_user, self.course_key) )
agpl-3.0
-5,003,193,154,016,205,000
42.979003
128
0.657257
false
bryx-inc/boto
tests/unit/ec2/test_blockdevicemapping.py
111
6739
from tests.compat import unittest from boto.ec2.connection import EC2Connection from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping from tests.compat import OrderedDict from tests.unit import AWSMockServiceTestCase class BlockDeviceTypeTests(unittest.TestCase): def setUp(self): self.block_device_type = BlockDeviceType() def check_that_attribute_has_been_set(self, name, value, attribute): self.block_device_type.endElement(name, value, None) self.assertEqual(getattr(self.block_device_type, attribute), value) def test_endElement_sets_correct_attributes_with_values(self): for arguments in [("volumeId", 1, "volume_id"), ("virtualName", "some name", "ephemeral_name"), ("snapshotId", 1, "snapshot_id"), ("volumeSize", 1, "size"), ("status", "some status", "status"), ("attachTime", 1, "attach_time"), ("somethingRandom", "somethingRandom", "somethingRandom")]: self.check_that_attribute_has_been_set(arguments[0], arguments[1], arguments[2]) def test_endElement_with_name_NoDevice_value_true(self): self.block_device_type.endElement("NoDevice", 'true', None) self.assertEqual(self.block_device_type.no_device, True) def test_endElement_with_name_NoDevice_value_other(self): self.block_device_type.endElement("NoDevice", 'something else', None) self.assertEqual(self.block_device_type.no_device, False) def test_endElement_with_name_deleteOnTermination_value_true(self): self.block_device_type.endElement("deleteOnTermination", "true", None) self.assertEqual(self.block_device_type.delete_on_termination, True) def test_endElement_with_name_deleteOnTermination_value_other(self): self.block_device_type.endElement("deleteOnTermination", 'something else', None) self.assertEqual(self.block_device_type.delete_on_termination, False) def test_endElement_with_name_encrypted_value_true(self): self.block_device_type.endElement("Encrypted", "true", None) self.assertEqual(self.block_device_type.encrypted, True) def test_endElement_with_name_Encrypted_value_other(self): self.block_device_type.endElement("Encrypted", 'something else', None) self.assertEqual(self.block_device_type.encrypted, False) class BlockDeviceMappingTests(unittest.TestCase): def setUp(self): self.block_device_mapping = BlockDeviceMapping() def block_device_type_eq(self, b1, b2): if isinstance(b1, BlockDeviceType) and isinstance(b2, BlockDeviceType): return all([b1.connection == b2.connection, b1.ephemeral_name == b2.ephemeral_name, b1.no_device == b2.no_device, b1.volume_id == b2.volume_id, b1.snapshot_id == b2.snapshot_id, b1.status == b2.status, b1.attach_time == b2.attach_time, b1.delete_on_termination == b2.delete_on_termination, b1.size == b2.size, b1.encrypted == b2.encrypted]) def test_startElement_with_name_ebs_sets_and_returns_current_value(self): retval = self.block_device_mapping.startElement("ebs", None, None) assert self.block_device_type_eq(retval, BlockDeviceType(self.block_device_mapping)) def test_startElement_with_name_virtualName_sets_and_returns_current_value(self): retval = self.block_device_mapping.startElement("virtualName", None, None) assert self.block_device_type_eq(retval, BlockDeviceType(self.block_device_mapping)) def test_endElement_with_name_device_sets_current_name_dev_null(self): self.block_device_mapping.endElement("device", "/dev/null", None) self.assertEqual(self.block_device_mapping.current_name, "/dev/null") def test_endElement_with_name_device_sets_current_name(self): self.block_device_mapping.endElement("deviceName", "some device name", None) self.assertEqual(self.block_device_mapping.current_name, "some device name") def test_endElement_with_name_item_sets_current_name_key_to_current_value(self): self.block_device_mapping.current_name = "some name" self.block_device_mapping.current_value = "some value" self.block_device_mapping.endElement("item", "some item", None) self.assertEqual(self.block_device_mapping["some name"], "some value") class TestLaunchConfiguration(AWSMockServiceTestCase): connection_class = EC2Connection def default_body(self): # This is a dummy response return b""" <DescribeLaunchConfigurationsResponse> </DescribeLaunchConfigurationsResponse> """ def test_run_instances_block_device_mapping(self): # Same as the test in ``unit/ec2/autoscale/test_group.py:TestLaunchConfiguration``, # but with modified request parameters (due to a mismatch between EC2 & # Autoscaling). self.set_http_response(status_code=200) dev_sdf = BlockDeviceType(snapshot_id='snap-12345') dev_sdg = BlockDeviceType(snapshot_id='snap-12346', delete_on_termination=True, encrypted=True) class OrderedBlockDeviceMapping(OrderedDict, BlockDeviceMapping): pass bdm = OrderedBlockDeviceMapping() bdm.update(OrderedDict((('/dev/sdf', dev_sdf), ('/dev/sdg', dev_sdg)))) response = self.service_connection.run_instances( image_id='123456', instance_type='m1.large', security_groups=['group1', 'group2'], block_device_map=bdm ) self.assert_request_parameters({ 'Action': 'RunInstances', 'BlockDeviceMapping.1.DeviceName': '/dev/sdf', 'BlockDeviceMapping.1.Ebs.DeleteOnTermination': 'false', 'BlockDeviceMapping.1.Ebs.SnapshotId': 'snap-12345', 'BlockDeviceMapping.2.DeviceName': '/dev/sdg', 'BlockDeviceMapping.2.Ebs.DeleteOnTermination': 'true', 'BlockDeviceMapping.2.Ebs.SnapshotId': 'snap-12346', 'BlockDeviceMapping.2.Ebs.Encrypted': 'true', 'ImageId': '123456', 'InstanceType': 'm1.large', 'MaxCount': 1, 'MinCount': 1, 'SecurityGroup.1': 'group1', 'SecurityGroup.2': 'group2', }, ignore_params_values=[ 'Version', 'AWSAccessKeyId', 'SignatureMethod', 'SignatureVersion', 'Timestamp' ]) if __name__ == "__main__": unittest.main()
mit
-2,947,765,186,996,055,000
45.157534
103
0.644606
false
rahul67/hue
apps/zookeeper/src/zookeeper/forms.py
38
1216
#!/usr/bin/env python # Licensed to Cloudera, Inc. under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. Cloudera, Inc. licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from django import forms from django.forms.widgets import Textarea, HiddenInput class CreateZNodeForm(forms.Form): name = forms.CharField(max_length=64) data = forms.CharField(required=False, widget=Textarea) sequence = forms.BooleanField(required=False) class EditZNodeForm(forms.Form): data = forms.CharField(required=False, widget=Textarea) version = forms.IntegerField(required=False, widget=HiddenInput)
apache-2.0
-3,322,291,735,640,719,000
38.225806
74
0.775493
false
alex/changes
migrations/versions/1c5907e309f1_add_failurereason.py
4
1593
"""Add FailureReason Revision ID: 1c5907e309f1 Revises: 4a12e7f0159d Create Date: 2014-06-02 15:31:02.991394 """ # revision identifiers, used by Alembic. revision = '1c5907e309f1' down_revision = '4a12e7f0159d' from alembic import op import sqlalchemy as sa def upgrade(): ### commands auto generated by Alembic - please adjust! ### op.create_table( 'failurereason', sa.Column('id', sa.GUID(), nullable=False), sa.Column('step_id', sa.GUID(), nullable=False), sa.Column('job_id', sa.GUID(), nullable=False), sa.Column('build_id', sa.GUID(), nullable=False), sa.Column('project_id', sa.GUID(), nullable=False), sa.Column('reason', sa.String(length=32), nullable=False), sa.Column('date_created', sa.DateTime(), nullable=False), sa.ForeignKeyConstraint(['build_id'], ['build.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['job_id'], ['job.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['project_id'], ['project.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['step_id'], ['jobstep.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('step_id', 'reason', name='unq_failurereason_key') ) op.create_index('idx_failurereason_build_id', 'failurereason', ['build_id'], unique=False) op.create_index('idx_failurereason_job_id', 'failurereason', ['job_id'], unique=False) op.create_index('idx_failurereason_project_id', 'failurereason', ['project_id'], unique=False) def downgrade(): op.drop_table('failurereason')
apache-2.0
7,627,041,655,368,663,000
37.853659
98
0.657878
false
azunite/chrome_build
depot-tools-auth.py
27
3275
#!/usr/bin/env python # Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Manages cached OAuth2 tokens used by other depot_tools scripts. Usage: depot-tools-auth login codereview.chromium.org depot-tools-auth info codereview.chromium.org depot-tools-auth logout codereview.chromium.org """ import logging import optparse import sys from third_party import colorama import auth import subcommand __version__ = '1.0' @subcommand.usage('<hostname>') def CMDlogin(parser, args): """Performs interactive login and caches authentication token.""" # Forcefully relogin, revoking previous token. hostname, authenticator = parser.parse_args(args) authenticator.logout() authenticator.login() print_token_info(hostname, authenticator) return 0 @subcommand.usage('<hostname>') def CMDlogout(parser, args): """Revokes cached authentication token and removes it from disk.""" _, authenticator = parser.parse_args(args) done = authenticator.logout() print 'Done.' if done else 'Already logged out.' return 0 @subcommand.usage('<hostname>') def CMDinfo(parser, args): """Shows email associated with a cached authentication token.""" # If no token is cached, AuthenticationError will be caught in 'main'. hostname, authenticator = parser.parse_args(args) print_token_info(hostname, authenticator) return 0 def print_token_info(hostname, authenticator): token_info = authenticator.get_token_info() print 'Logged in to %s as %s.' % (hostname, token_info['email']) print '' print 'To login with a different email run:' print ' depot-tools-auth login %s' % hostname print 'To logout and purge the authentication token run:' print ' depot-tools-auth logout %s' % hostname class OptionParser(optparse.OptionParser): def __init__(self, *args, **kwargs): optparse.OptionParser.__init__( self, *args, prog='depot-tools-auth', version=__version__, **kwargs) self.add_option( '-v', '--verbose', action='count', default=0, help='Use 2 times for more debugging info') auth.add_auth_options(self, auth.make_auth_config(use_oauth2=True)) def parse_args(self, args=None, values=None): """Parses options and returns (hostname, auth.Authenticator object).""" options, args = optparse.OptionParser.parse_args(self, args, values) levels = [logging.WARNING, logging.INFO, logging.DEBUG] logging.basicConfig(level=levels[min(options.verbose, len(levels) - 1)]) auth_config = auth.extract_auth_config_from_options(options) if len(args) != 1: self.error('Expecting single argument (hostname).') if not auth_config.use_oauth2: self.error('This command is only usable with OAuth2 authentication') return args[0], auth.get_authenticator_for_host(args[0], auth_config) def main(argv): dispatcher = subcommand.CommandDispatcher(__name__) try: return dispatcher.execute(OptionParser(), argv) except auth.AuthenticationError as e: print >> sys.stderr, e return 1 if __name__ == '__main__': colorama.init() try: sys.exit(main(sys.argv[1:])) except KeyboardInterrupt: sys.stderr.write('interrupted\n') sys.exit(1)
bsd-3-clause
-262,266,611,635,079,140
31.107843
76
0.712672
false
Gchorba/Ask
lib/python2.7/site-packages/werkzeug/wsgi.py
147
37837
# -*- coding: utf-8 -*- """ werkzeug.wsgi ~~~~~~~~~~~~~ This module implements WSGI related helpers. :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ import re import os import sys import posixpath import mimetypes from itertools import chain from zlib import adler32 from time import time, mktime from datetime import datetime from functools import partial, update_wrapper from werkzeug._compat import iteritems, text_type, string_types, \ implements_iterator, make_literal_wrapper, to_unicode, to_bytes, \ wsgi_get_bytes, try_coerce_native, PY2 from werkzeug._internal import _empty_stream, _encode_idna from werkzeug.http import is_resource_modified, http_date from werkzeug.urls import uri_to_iri, url_quote, url_parse, url_join def responder(f): """Marks a function as responder. Decorate a function with it and it will automatically call the return value as WSGI application. Example:: @responder def application(environ, start_response): return Response('Hello World!') """ return update_wrapper(lambda *a: f(*a)(*a[-2:]), f) def get_current_url(environ, root_only=False, strip_querystring=False, host_only=False, trusted_hosts=None): """A handy helper function that recreates the full URL as IRI for the current request or parts of it. Here an example: >>> from werkzeug.test import create_environ >>> env = create_environ("/?param=foo", "http://localhost/script") >>> get_current_url(env) 'http://localhost/script/?param=foo' >>> get_current_url(env, root_only=True) 'http://localhost/script/' >>> get_current_url(env, host_only=True) 'http://localhost/' >>> get_current_url(env, strip_querystring=True) 'http://localhost/script/' This optionally it verifies that the host is in a list of trusted hosts. If the host is not in there it will raise a :exc:`~werkzeug.exceptions.SecurityError`. Note that the string returned might contain unicode characters as the representation is an IRI not an URI. If you need an ASCII only representation you can use the :func:`~werkzeug.urls.iri_to_uri` function: >>> from werkzeug.urls import iri_to_uri >>> iri_to_uri(get_current_url(env)) 'http://localhost/script/?param=foo' :param environ: the WSGI environment to get the current URL from. :param root_only: set `True` if you only want the root URL. :param strip_querystring: set to `True` if you don't want the querystring. :param host_only: set to `True` if the host URL should be returned. :param trusted_hosts: a list of trusted hosts, see :func:`host_is_trusted` for more information. """ tmp = [environ['wsgi.url_scheme'], '://', get_host(environ, trusted_hosts)] cat = tmp.append if host_only: return uri_to_iri(''.join(tmp) + '/') cat(url_quote(wsgi_get_bytes(environ.get('SCRIPT_NAME', ''))).rstrip('/')) cat('/') if not root_only: cat(url_quote(wsgi_get_bytes(environ.get('PATH_INFO', '')).lstrip(b'/'))) if not strip_querystring: qs = get_query_string(environ) if qs: cat('?' + qs) return uri_to_iri(''.join(tmp)) def host_is_trusted(hostname, trusted_list): """Checks if a host is trusted against a list. This also takes care of port normalization. .. versionadded:: 0.9 :param hostname: the hostname to check :param trusted_list: a list of hostnames to check against. If a hostname starts with a dot it will match against all subdomains as well. """ if not hostname: return False if isinstance(trusted_list, string_types): trusted_list = [trusted_list] def _normalize(hostname): if ':' in hostname: hostname = hostname.rsplit(':', 1)[0] return _encode_idna(hostname) hostname = _normalize(hostname) for ref in trusted_list: if ref.startswith('.'): ref = ref[1:] suffix_match = True else: suffix_match = False ref = _normalize(ref) if ref == hostname: return True if suffix_match and hostname.endswith('.' + ref): return True return False def get_host(environ, trusted_hosts=None): """Return the real host for the given WSGI environment. This first checks the `X-Forwarded-Host` header, then the normal `Host` header, and finally the `SERVER_NAME` environment variable (using the first one it finds). Optionally it verifies that the host is in a list of trusted hosts. If the host is not in there it will raise a :exc:`~werkzeug.exceptions.SecurityError`. :param environ: the WSGI environment to get the host of. :param trusted_hosts: a list of trusted hosts, see :func:`host_is_trusted` for more information. """ if 'HTTP_X_FORWARDED_HOST' in environ: rv = environ['HTTP_X_FORWARDED_HOST'].split(',', 1)[0].strip() elif 'HTTP_HOST' in environ: rv = environ['HTTP_HOST'] else: rv = environ['SERVER_NAME'] if (environ['wsgi.url_scheme'], environ['SERVER_PORT']) not \ in (('https', '443'), ('http', '80')): rv += ':' + environ['SERVER_PORT'] if trusted_hosts is not None: if not host_is_trusted(rv, trusted_hosts): from werkzeug.exceptions import SecurityError raise SecurityError('Host "%s" is not trusted' % rv) return rv def get_content_length(environ): """Returns the content length from the WSGI environment as integer. If it's not available `None` is returned. .. versionadded:: 0.9 :param environ: the WSGI environ to fetch the content length from. """ content_length = environ.get('CONTENT_LENGTH') if content_length is not None: try: return max(0, int(content_length)) except (ValueError, TypeError): pass def get_input_stream(environ, safe_fallback=True): """Returns the input stream from the WSGI environment and wraps it in the most sensible way possible. The stream returned is not the raw WSGI stream in most cases but one that is safe to read from without taking into account the content length. .. versionadded:: 0.9 :param environ: the WSGI environ to fetch the stream from. :param safe: indicates weather the function should use an empty stream as safe fallback or just return the original WSGI input stream if it can't wrap it safely. The default is to return an empty string in those cases. """ stream = environ['wsgi.input'] content_length = get_content_length(environ) # A wsgi extension that tells us if the input is terminated. In # that case we return the stream unchanged as we know we can savely # read it until the end. if environ.get('wsgi.input_terminated'): return stream # If we don't have a content length we fall back to an empty stream # in case of a safe fallback, otherwise we return the stream unchanged. # The non-safe fallback is not recommended but might be useful in # some situations. if content_length is None: return safe_fallback and _empty_stream or stream # Otherwise limit the stream to the content length return LimitedStream(stream, content_length) def get_query_string(environ): """Returns the `QUERY_STRING` from the WSGI environment. This also takes care about the WSGI decoding dance on Python 3 environments as a native string. The string returned will be restricted to ASCII characters. .. versionadded:: 0.9 :param environ: the WSGI environment object to get the query string from. """ qs = wsgi_get_bytes(environ.get('QUERY_STRING', '')) # QUERY_STRING really should be ascii safe but some browsers # will send us some unicode stuff (I am looking at you IE). # In that case we want to urllib quote it badly. return try_coerce_native(url_quote(qs, safe=':&%=+$!*\'(),')) def get_path_info(environ, charset='utf-8', errors='replace'): """Returns the `PATH_INFO` from the WSGI environment and properly decodes it. This also takes care about the WSGI decoding dance on Python 3 environments. if the `charset` is set to `None` a bytestring is returned. .. versionadded:: 0.9 :param environ: the WSGI environment object to get the path from. :param charset: the charset for the path info, or `None` if no decoding should be performed. :param errors: the decoding error handling. """ path = wsgi_get_bytes(environ.get('PATH_INFO', '')) return to_unicode(path, charset, errors, allow_none_charset=True) def get_script_name(environ, charset='utf-8', errors='replace'): """Returns the `SCRIPT_NAME` from the WSGI environment and properly decodes it. This also takes care about the WSGI decoding dance on Python 3 environments. if the `charset` is set to `None` a bytestring is returned. .. versionadded:: 0.9 :param environ: the WSGI environment object to get the path from. :param charset: the charset for the path, or `None` if no decoding should be performed. :param errors: the decoding error handling. """ path = wsgi_get_bytes(environ.get('SCRIPT_NAME', '')) return to_unicode(path, charset, errors, allow_none_charset=True) def pop_path_info(environ, charset='utf-8', errors='replace'): """Removes and returns the next segment of `PATH_INFO`, pushing it onto `SCRIPT_NAME`. Returns `None` if there is nothing left on `PATH_INFO`. If the `charset` is set to `None` a bytestring is returned. If there are empty segments (``'/foo//bar``) these are ignored but properly pushed to the `SCRIPT_NAME`: >>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'} >>> pop_path_info(env) 'a' >>> env['SCRIPT_NAME'] '/foo/a' >>> pop_path_info(env) 'b' >>> env['SCRIPT_NAME'] '/foo/a/b' .. versionadded:: 0.5 .. versionchanged:: 0.9 The path is now decoded and a charset and encoding parameter can be provided. :param environ: the WSGI environment that is modified. """ path = environ.get('PATH_INFO') if not path: return None script_name = environ.get('SCRIPT_NAME', '') # shift multiple leading slashes over old_path = path path = path.lstrip('/') if path != old_path: script_name += '/' * (len(old_path) - len(path)) if '/' not in path: environ['PATH_INFO'] = '' environ['SCRIPT_NAME'] = script_name + path rv = wsgi_get_bytes(path) else: segment, path = path.split('/', 1) environ['PATH_INFO'] = '/' + path environ['SCRIPT_NAME'] = script_name + segment rv = wsgi_get_bytes(segment) return to_unicode(rv, charset, errors, allow_none_charset=True) def peek_path_info(environ, charset='utf-8', errors='replace'): """Returns the next segment on the `PATH_INFO` or `None` if there is none. Works like :func:`pop_path_info` without modifying the environment: >>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'} >>> peek_path_info(env) 'a' >>> peek_path_info(env) 'a' If the `charset` is set to `None` a bytestring is returned. .. versionadded:: 0.5 .. versionchanged:: 0.9 The path is now decoded and a charset and encoding parameter can be provided. :param environ: the WSGI environment that is checked. """ segments = environ.get('PATH_INFO', '').lstrip('/').split('/', 1) if segments: return to_unicode(wsgi_get_bytes(segments[0]), charset, errors, allow_none_charset=True) def extract_path_info(environ_or_baseurl, path_or_url, charset='utf-8', errors='replace', collapse_http_schemes=True): """Extracts the path info from the given URL (or WSGI environment) and path. The path info returned is a unicode string, not a bytestring suitable for a WSGI environment. The URLs might also be IRIs. If the path info could not be determined, `None` is returned. Some examples: >>> extract_path_info('http://example.com/app', '/app/hello') u'/hello' >>> extract_path_info('http://example.com/app', ... 'https://example.com/app/hello') u'/hello' >>> extract_path_info('http://example.com/app', ... 'https://example.com/app/hello', ... collapse_http_schemes=False) is None True Instead of providing a base URL you can also pass a WSGI environment. .. versionadded:: 0.6 :param environ_or_baseurl: a WSGI environment dict, a base URL or base IRI. This is the root of the application. :param path_or_url: an absolute path from the server root, a relative path (in which case it's the path info) or a full URL. Also accepts IRIs and unicode parameters. :param charset: the charset for byte data in URLs :param errors: the error handling on decode :param collapse_http_schemes: if set to `False` the algorithm does not assume that http and https on the same server point to the same resource. """ def _normalize_netloc(scheme, netloc): parts = netloc.split(u'@', 1)[-1].split(u':', 1) if len(parts) == 2: netloc, port = parts if (scheme == u'http' and port == u'80') or \ (scheme == u'https' and port == u'443'): port = None else: netloc = parts[0] port = None if port is not None: netloc += u':' + port return netloc # make sure whatever we are working on is a IRI and parse it path = uri_to_iri(path_or_url, charset, errors) if isinstance(environ_or_baseurl, dict): environ_or_baseurl = get_current_url(environ_or_baseurl, root_only=True) base_iri = uri_to_iri(environ_or_baseurl, charset, errors) base_scheme, base_netloc, base_path = url_parse(base_iri)[:3] cur_scheme, cur_netloc, cur_path, = \ url_parse(url_join(base_iri, path))[:3] # normalize the network location base_netloc = _normalize_netloc(base_scheme, base_netloc) cur_netloc = _normalize_netloc(cur_scheme, cur_netloc) # is that IRI even on a known HTTP scheme? if collapse_http_schemes: for scheme in base_scheme, cur_scheme: if scheme not in (u'http', u'https'): return None else: if not (base_scheme in (u'http', u'https') and base_scheme == cur_scheme): return None # are the netlocs compatible? if base_netloc != cur_netloc: return None # are we below the application path? base_path = base_path.rstrip(u'/') if not cur_path.startswith(base_path): return None return u'/' + cur_path[len(base_path):].lstrip(u'/') class SharedDataMiddleware(object): """A WSGI middleware that provides static content for development environments or simple server setups. Usage is quite simple:: import os from werkzeug.wsgi import SharedDataMiddleware app = SharedDataMiddleware(app, { '/shared': os.path.join(os.path.dirname(__file__), 'shared') }) The contents of the folder ``./shared`` will now be available on ``http://example.com/shared/``. This is pretty useful during development because a standalone media server is not required. One can also mount files on the root folder and still continue to use the application because the shared data middleware forwards all unhandled requests to the application, even if the requests are below one of the shared folders. If `pkg_resources` is available you can also tell the middleware to serve files from package data:: app = SharedDataMiddleware(app, { '/shared': ('myapplication', 'shared_files') }) This will then serve the ``shared_files`` folder in the `myapplication` Python package. The optional `disallow` parameter can be a list of :func:`~fnmatch.fnmatch` rules for files that are not accessible from the web. If `cache` is set to `False` no caching headers are sent. Currently the middleware does not support non ASCII filenames. If the encoding on the file system happens to be the encoding of the URI it may work but this could also be by accident. We strongly suggest using ASCII only file names for static files. The middleware will guess the mimetype using the Python `mimetype` module. If it's unable to figure out the charset it will fall back to `fallback_mimetype`. .. versionchanged:: 0.5 The cache timeout is configurable now. .. versionadded:: 0.6 The `fallback_mimetype` parameter was added. :param app: the application to wrap. If you don't want to wrap an application you can pass it :exc:`NotFound`. :param exports: a dict of exported files and folders. :param disallow: a list of :func:`~fnmatch.fnmatch` rules. :param fallback_mimetype: the fallback mimetype for unknown files. :param cache: enable or disable caching headers. :param cache_timeout: the cache timeout in seconds for the headers. """ def __init__(self, app, exports, disallow=None, cache=True, cache_timeout=60 * 60 * 12, fallback_mimetype='text/plain'): self.app = app self.exports = {} self.cache = cache self.cache_timeout = cache_timeout for key, value in iteritems(exports): if isinstance(value, tuple): loader = self.get_package_loader(*value) elif isinstance(value, string_types): if os.path.isfile(value): loader = self.get_file_loader(value) else: loader = self.get_directory_loader(value) else: raise TypeError('unknown def %r' % value) self.exports[key] = loader if disallow is not None: from fnmatch import fnmatch self.is_allowed = lambda x: not fnmatch(x, disallow) self.fallback_mimetype = fallback_mimetype def is_allowed(self, filename): """Subclasses can override this method to disallow the access to certain files. However by providing `disallow` in the constructor this method is overwritten. """ return True def _opener(self, filename): return lambda: ( open(filename, 'rb'), datetime.utcfromtimestamp(os.path.getmtime(filename)), int(os.path.getsize(filename)) ) def get_file_loader(self, filename): return lambda x: (os.path.basename(filename), self._opener(filename)) def get_package_loader(self, package, package_path): from pkg_resources import DefaultProvider, ResourceManager, \ get_provider loadtime = datetime.utcnow() provider = get_provider(package) manager = ResourceManager() filesystem_bound = isinstance(provider, DefaultProvider) def loader(path): if path is None: return None, None path = posixpath.join(package_path, path) if not provider.has_resource(path): return None, None basename = posixpath.basename(path) if filesystem_bound: return basename, self._opener( provider.get_resource_filename(manager, path)) return basename, lambda: ( provider.get_resource_stream(manager, path), loadtime, 0 ) return loader def get_directory_loader(self, directory): def loader(path): if path is not None: path = os.path.join(directory, path) else: path = directory if os.path.isfile(path): return os.path.basename(path), self._opener(path) return None, None return loader def generate_etag(self, mtime, file_size, real_filename): if not isinstance(real_filename, bytes): real_filename = real_filename.encode(sys.getfilesystemencoding()) return 'wzsdm-%d-%s-%s' % ( mktime(mtime.timetuple()), file_size, adler32(real_filename) & 0xffffffff ) def __call__(self, environ, start_response): cleaned_path = get_path_info(environ) if PY2: cleaned_path = cleaned_path.encode(sys.getfilesystemencoding()) # sanitize the path for non unix systems cleaned_path = cleaned_path.strip('/') for sep in os.sep, os.altsep: if sep and sep != '/': cleaned_path = cleaned_path.replace(sep, '/') path = '/' + '/'.join(x for x in cleaned_path.split('/') if x and x != '..') file_loader = None for search_path, loader in iteritems(self.exports): if search_path == path: real_filename, file_loader = loader(None) if file_loader is not None: break if not search_path.endswith('/'): search_path += '/' if path.startswith(search_path): real_filename, file_loader = loader(path[len(search_path):]) if file_loader is not None: break if file_loader is None or not self.is_allowed(real_filename): return self.app(environ, start_response) guessed_type = mimetypes.guess_type(real_filename) mime_type = guessed_type[0] or self.fallback_mimetype f, mtime, file_size = file_loader() headers = [('Date', http_date())] if self.cache: timeout = self.cache_timeout etag = self.generate_etag(mtime, file_size, real_filename) headers += [ ('Etag', '"%s"' % etag), ('Cache-Control', 'max-age=%d, public' % timeout) ] if not is_resource_modified(environ, etag, last_modified=mtime): f.close() start_response('304 Not Modified', headers) return [] headers.append(('Expires', http_date(time() + timeout))) else: headers.append(('Cache-Control', 'public')) headers.extend(( ('Content-Type', mime_type), ('Content-Length', str(file_size)), ('Last-Modified', http_date(mtime)) )) start_response('200 OK', headers) return wrap_file(environ, f) class DispatcherMiddleware(object): """Allows one to mount middlewares or applications in a WSGI application. This is useful if you want to combine multiple WSGI applications:: app = DispatcherMiddleware(app, { '/app2': app2, '/app3': app3 }) """ def __init__(self, app, mounts=None): self.app = app self.mounts = mounts or {} def __call__(self, environ, start_response): script = environ.get('PATH_INFO', '') path_info = '' while '/' in script: if script in self.mounts: app = self.mounts[script] break script, last_item = script.rsplit('/', 1) path_info = '/%s%s' % (last_item, path_info) else: app = self.mounts.get(script, self.app) original_script_name = environ.get('SCRIPT_NAME', '') environ['SCRIPT_NAME'] = original_script_name + script environ['PATH_INFO'] = path_info return app(environ, start_response) @implements_iterator class ClosingIterator(object): """The WSGI specification requires that all middlewares and gateways respect the `close` callback of an iterator. Because it is useful to add another close action to a returned iterator and adding a custom iterator is a boring task this class can be used for that:: return ClosingIterator(app(environ, start_response), [cleanup_session, cleanup_locals]) If there is just one close function it can be passed instead of the list. A closing iterator is not needed if the application uses response objects and finishes the processing if the response is started:: try: return response(environ, start_response) finally: cleanup_session() cleanup_locals() """ def __init__(self, iterable, callbacks=None): iterator = iter(iterable) self._next = partial(next, iterator) if callbacks is None: callbacks = [] elif callable(callbacks): callbacks = [callbacks] else: callbacks = list(callbacks) iterable_close = getattr(iterator, 'close', None) if iterable_close: callbacks.insert(0, iterable_close) self._callbacks = callbacks def __iter__(self): return self def __next__(self): return self._next() def close(self): for callback in self._callbacks: callback() def wrap_file(environ, file, buffer_size=8192): """Wraps a file. This uses the WSGI server's file wrapper if available or otherwise the generic :class:`FileWrapper`. .. versionadded:: 0.5 If the file wrapper from the WSGI server is used it's important to not iterate over it from inside the application but to pass it through unchanged. If you want to pass out a file wrapper inside a response object you have to set :attr:`~BaseResponse.direct_passthrough` to `True`. More information about file wrappers are available in :pep:`333`. :param file: a :class:`file`-like object with a :meth:`~file.read` method. :param buffer_size: number of bytes for one iteration. """ return environ.get('wsgi.file_wrapper', FileWrapper)(file, buffer_size) @implements_iterator class FileWrapper(object): """This class can be used to convert a :class:`file`-like object into an iterable. It yields `buffer_size` blocks until the file is fully read. You should not use this class directly but rather use the :func:`wrap_file` function that uses the WSGI server's file wrapper support if it's available. .. versionadded:: 0.5 If you're using this object together with a :class:`BaseResponse` you have to use the `direct_passthrough` mode. :param file: a :class:`file`-like object with a :meth:`~file.read` method. :param buffer_size: number of bytes for one iteration. """ def __init__(self, file, buffer_size=8192): self.file = file self.buffer_size = buffer_size def close(self): if hasattr(self.file, 'close'): self.file.close() def __iter__(self): return self def __next__(self): data = self.file.read(self.buffer_size) if data: return data raise StopIteration() def _make_chunk_iter(stream, limit, buffer_size): """Helper for the line and chunk iter functions.""" if isinstance(stream, (bytes, bytearray, text_type)): raise TypeError('Passed a string or byte object instead of ' 'true iterator or stream.') if not hasattr(stream, 'read'): for item in stream: if item: yield item return if not isinstance(stream, LimitedStream) and limit is not None: stream = LimitedStream(stream, limit) _read = stream.read while 1: item = _read(buffer_size) if not item: break yield item def make_line_iter(stream, limit=None, buffer_size=10 * 1024): """Safely iterates line-based over an input stream. If the input stream is not a :class:`LimitedStream` the `limit` parameter is mandatory. This uses the stream's :meth:`~file.read` method internally as opposite to the :meth:`~file.readline` method that is unsafe and can only be used in violation of the WSGI specification. The same problem applies to the `__iter__` function of the input stream which calls :meth:`~file.readline` without arguments. If you need line-by-line processing it's strongly recommended to iterate over the input stream using this helper function. .. versionchanged:: 0.8 This function now ensures that the limit was reached. .. versionadded:: 0.9 added support for iterators as input stream. :param stream: the stream or iterate to iterate over. :param limit: the limit in bytes for the stream. (Usually content length. Not necessary if the `stream` is a :class:`LimitedStream`. :param buffer_size: The optional buffer size. """ _iter = _make_chunk_iter(stream, limit, buffer_size) first_item = next(_iter, '') if not first_item: return s = make_literal_wrapper(first_item) empty = s('') cr = s('\r') lf = s('\n') crlf = s('\r\n') _iter = chain((first_item,), _iter) def _iter_basic_lines(): _join = empty.join buffer = [] while 1: new_data = next(_iter, '') if not new_data: break new_buf = [] for item in chain(buffer, new_data.splitlines(True)): new_buf.append(item) if item and item[-1:] in crlf: yield _join(new_buf) new_buf = [] buffer = new_buf if buffer: yield _join(buffer) # This hackery is necessary to merge 'foo\r' and '\n' into one item # of 'foo\r\n' if we were unlucky and we hit a chunk boundary. previous = empty for item in _iter_basic_lines(): if item == lf and previous[-1:] == cr: previous += item item = empty if previous: yield previous previous = item if previous: yield previous def make_chunk_iter(stream, separator, limit=None, buffer_size=10 * 1024): """Works like :func:`make_line_iter` but accepts a separator which divides chunks. If you want newline based processing you should use :func:`make_line_iter` instead as it supports arbitrary newline markers. .. versionadded:: 0.8 .. versionadded:: 0.9 added support for iterators as input stream. :param stream: the stream or iterate to iterate over. :param separator: the separator that divides chunks. :param limit: the limit in bytes for the stream. (Usually content length. Not necessary if the `stream` is otherwise already limited). :param buffer_size: The optional buffer size. """ _iter = _make_chunk_iter(stream, limit, buffer_size) first_item = next(_iter, '') if not first_item: return _iter = chain((first_item,), _iter) if isinstance(first_item, text_type): separator = to_unicode(separator) _split = re.compile(r'(%s)' % re.escape(separator)).split _join = u''.join else: separator = to_bytes(separator) _split = re.compile(b'(' + re.escape(separator) + b')').split _join = b''.join buffer = [] while 1: new_data = next(_iter, '') if not new_data: break chunks = _split(new_data) new_buf = [] for item in chain(buffer, chunks): if item == separator: yield _join(new_buf) new_buf = [] else: new_buf.append(item) buffer = new_buf if buffer: yield _join(buffer) @implements_iterator class LimitedStream(object): """Wraps a stream so that it doesn't read more than n bytes. If the stream is exhausted and the caller tries to get more bytes from it :func:`on_exhausted` is called which by default returns an empty string. The return value of that function is forwarded to the reader function. So if it returns an empty string :meth:`read` will return an empty string as well. The limit however must never be higher than what the stream can output. Otherwise :meth:`readlines` will try to read past the limit. .. admonition:: Note on WSGI compliance calls to :meth:`readline` and :meth:`readlines` are not WSGI compliant because it passes a size argument to the readline methods. Unfortunately the WSGI PEP is not safely implementable without a size argument to :meth:`readline` because there is no EOF marker in the stream. As a result of that the use of :meth:`readline` is discouraged. For the same reason iterating over the :class:`LimitedStream` is not portable. It internally calls :meth:`readline`. We strongly suggest using :meth:`read` only or using the :func:`make_line_iter` which safely iterates line-based over a WSGI input stream. :param stream: the stream to wrap. :param limit: the limit for the stream, must not be longer than what the string can provide if the stream does not end with `EOF` (like `wsgi.input`) """ def __init__(self, stream, limit): self._read = stream.read self._readline = stream.readline self._pos = 0 self.limit = limit def __iter__(self): return self @property def is_exhausted(self): """If the stream is exhausted this attribute is `True`.""" return self._pos >= self.limit def on_exhausted(self): """This is called when the stream tries to read past the limit. The return value of this function is returned from the reading function. """ # Read null bytes from the stream so that we get the # correct end of stream marker. return self._read(0) def on_disconnect(self): """What should happen if a disconnect is detected? The return value of this function is returned from read functions in case the client went away. By default a :exc:`~werkzeug.exceptions.ClientDisconnected` exception is raised. """ from werkzeug.exceptions import ClientDisconnected raise ClientDisconnected() def exhaust(self, chunk_size=1024 * 64): """Exhaust the stream. This consumes all the data left until the limit is reached. :param chunk_size: the size for a chunk. It will read the chunk until the stream is exhausted and throw away the results. """ to_read = self.limit - self._pos chunk = chunk_size while to_read > 0: chunk = min(to_read, chunk) self.read(chunk) to_read -= chunk def read(self, size=None): """Read `size` bytes or if size is not provided everything is read. :param size: the number of bytes read. """ if self._pos >= self.limit: return self.on_exhausted() if size is None or size == -1: # -1 is for consistence with file size = self.limit to_read = min(self.limit - self._pos, size) try: read = self._read(to_read) except (IOError, ValueError): return self.on_disconnect() if to_read and len(read) != to_read: return self.on_disconnect() self._pos += len(read) return read def readline(self, size=None): """Reads one line from the stream.""" if self._pos >= self.limit: return self.on_exhausted() if size is None: size = self.limit - self._pos else: size = min(size, self.limit - self._pos) try: line = self._readline(size) except (ValueError, IOError): return self.on_disconnect() if size and not line: return self.on_disconnect() self._pos += len(line) return line def readlines(self, size=None): """Reads a file into a list of strings. It calls :meth:`readline` until the file is read to the end. It does support the optional `size` argument if the underlaying stream supports it for `readline`. """ last_pos = self._pos result = [] if size is not None: end = min(self.limit, last_pos + size) else: end = self.limit while 1: if size is not None: size -= last_pos - self._pos if self._pos >= end: break result.append(self.readline(size)) if size is not None: last_pos = self._pos return result def tell(self): """Returns the position of the stream. .. versionadded:: 0.9 """ return self._pos def __next__(self): line = self.readline() if not line: raise StopIteration() return line
mit
1,368,267,649,332,455,200
35.06959
81
0.606206
false
NL66278/odoo
addons/hr_holidays/tests/__init__.py
121
1159
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Business Applications # Copyright (c) 2013-TODAY OpenERP S.A. <http://www.openerp.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.addons.hr_holidays.tests import test_holidays_flow checks = [ test_holidays_flow, ] # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
6,080,726,473,760,443,000
40.392857
78
0.626402
false
tchellomello/home-assistant
homeassistant/components/xiaomi_tv/media_player.py
21
3130
"""Add support for the Xiaomi TVs.""" import logging import pymitv import voluptuous as vol from homeassistant.components.media_player import PLATFORM_SCHEMA, MediaPlayerEntity from homeassistant.components.media_player.const import ( SUPPORT_TURN_OFF, SUPPORT_TURN_ON, SUPPORT_VOLUME_STEP, ) from homeassistant.const import CONF_HOST, CONF_NAME, STATE_OFF, STATE_ON import homeassistant.helpers.config_validation as cv DEFAULT_NAME = "Xiaomi TV" _LOGGER = logging.getLogger(__name__) SUPPORT_XIAOMI_TV = SUPPORT_VOLUME_STEP | SUPPORT_TURN_ON | SUPPORT_TURN_OFF # No host is needed for configuration, however it can be set. PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Optional(CONF_HOST): cv.string, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, } ) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Xiaomi TV platform.""" # If a hostname is set. Discovery is skipped. host = config.get(CONF_HOST) name = config.get(CONF_NAME) if host is not None: # Check if there's a valid TV at the IP address. if not pymitv.Discover().check_ip(host): _LOGGER.error("Could not find Xiaomi TV with specified IP: %s", host) else: # Register TV with Home Assistant. add_entities([XiaomiTV(host, name)]) else: # Otherwise, discover TVs on network. add_entities(XiaomiTV(tv, DEFAULT_NAME) for tv in pymitv.Discover().scan()) class XiaomiTV(MediaPlayerEntity): """Represent the Xiaomi TV for Home Assistant.""" def __init__(self, ip, name): """Receive IP address and name to construct class.""" # Initialize the Xiaomi TV. self._tv = pymitv.TV(ip) # Default name value, only to be overridden by user. self._name = name self._state = STATE_OFF @property def name(self): """Return the display name of this TV.""" return self._name @property def state(self): """Return _state variable, containing the appropriate constant.""" return self._state @property def assumed_state(self): """Indicate that state is assumed.""" return True @property def supported_features(self): """Flag media player features that are supported.""" return SUPPORT_XIAOMI_TV def turn_off(self): """ Instruct the TV to turn sleep. This is done instead of turning off, because the TV won't accept any input when turned off. Thus, the user would be unable to turn the TV back on, unless it's done manually. """ if self._state != STATE_OFF: self._tv.sleep() self._state = STATE_OFF def turn_on(self): """Wake the TV back up from sleep.""" if self._state != STATE_ON: self._tv.wake() self._state = STATE_ON def volume_up(self): """Increase volume by one.""" self._tv.volume_up() def volume_down(self): """Decrease volume by one.""" self._tv.volume_down()
apache-2.0
1,234,339,652,272,674,000
27.981481
84
0.627476
false
privateip/ansible
lib/ansible/playbook/role/__init__.py
14
18852
# (c) 2012-2014, Michael DeHaan <[email protected]> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import collections import os from ansible.compat.six import iteritems, binary_type, text_type from ansible.errors import AnsibleError, AnsibleParserError from ansible.playbook.attribute import FieldAttribute from ansible.playbook.base import Base from ansible.playbook.become import Become from ansible.playbook.conditional import Conditional from ansible.playbook.helpers import load_list_of_blocks from ansible.playbook.role.metadata import RoleMetadata from ansible.playbook.taggable import Taggable from ansible.plugins import get_all_plugin_loaders from ansible.utils.vars import combine_vars __all__ = ['Role', 'hash_params'] # TODO: this should be a utility function, but can't be a member of # the role due to the fact that it would require the use of self # in a static method. This is also used in the base class for # strategies (ansible/plugins/strategy/__init__.py) def hash_params(params): """ Construct a data structure of parameters that is hashable. This requires changing any mutable data structures into immutable ones. We chose a frozenset because role parameters have to be unique. .. warning:: this does not handle unhashable scalars. Two things mitigate that limitation: 1) There shouldn't be any unhashable scalars specified in the yaml 2) Our only choice would be to return an error anyway. """ # Any container is unhashable if it contains unhashable items (for # instance, tuple() is a Hashable subclass but if it contains a dict, it # cannot be hashed) if isinstance(params, collections.Container) and not isinstance(params, (text_type, binary_type)): if isinstance(params, collections.Mapping): try: # Optimistically hope the contents are all hashable new_params = frozenset(params.items()) except TypeError: new_params = set() for k, v in params.items(): # Hash each entry individually new_params.update((k, hash_params(v))) new_params = frozenset(new_params) elif isinstance(params, (collections.Set, collections.Sequence)): try: # Optimistically hope the contents are all hashable new_params = frozenset(params) except TypeError: new_params = set() for v in params: # Hash each entry individually new_params.update(hash_params(v)) new_params = frozenset(new_params) else: # This is just a guess. new_params = frozenset(params) return new_params # Note: We do not handle unhashable scalars but our only choice would be # to raise an error there anyway. return frozenset((params,)) class Role(Base, Become, Conditional, Taggable): _delegate_to = FieldAttribute(isa='string') _delegate_facts = FieldAttribute(isa='bool', default=False) def __init__(self, play=None, from_files=None): self._role_name = None self._role_path = None self._role_params = dict() self._loader = None self._metadata = None self._play = play self._parents = [] self._dependencies = [] self._task_blocks = [] self._handler_blocks = [] self._default_vars = dict() self._role_vars = dict() self._had_task_run = dict() self._completed = dict() if from_files is None: from_files = {} self._from_files = from_files super(Role, self).__init__() def __repr__(self): return self.get_name() def get_name(self): return self._role_name @staticmethod def load(role_include, play, parent_role=None, from_files=None): if from_files is None: from_files = {} try: # The ROLE_CACHE is a dictionary of role names, with each entry # containing another dictionary corresponding to a set of parameters # specified for a role as the key and the Role() object itself. # We use frozenset to make the dictionary hashable. params = role_include.get_role_params() if role_include.when is not None: params['when'] = role_include.when if role_include.tags is not None: params['tags'] = role_include.tags if from_files is not None: params['from_files'] = from_files if role_include.vars: params['vars'] = role_include.vars hashed_params = hash_params(params) if role_include.role in play.ROLE_CACHE: for (entry, role_obj) in iteritems(play.ROLE_CACHE[role_include.role]): if hashed_params == entry: if parent_role: role_obj.add_parent(parent_role) return role_obj r = Role(play=play, from_files=from_files) r._load_role_data(role_include, parent_role=parent_role) if role_include.role not in play.ROLE_CACHE: play.ROLE_CACHE[role_include.role] = dict() play.ROLE_CACHE[role_include.role][hashed_params] = r return r except RuntimeError: raise AnsibleError("A recursion loop was detected with the roles specified. Make sure child roles do not have dependencies on parent roles", obj=role_include._ds) def _load_role_data(self, role_include, parent_role=None): self._role_name = role_include.role self._role_path = role_include.get_role_path() self._role_params = role_include.get_role_params() self._variable_manager = role_include.get_variable_manager() self._loader = role_include.get_loader() if parent_role: self.add_parent(parent_role) # copy over all field attributes, except for when and tags, which # are special cases and need to preserve pre-existing values for (attr_name, _) in iteritems(self._valid_attrs): if attr_name not in ('when', 'tags'): setattr(self, attr_name, getattr(role_include, attr_name)) current_when = getattr(self, 'when')[:] current_when.extend(role_include.when) setattr(self, 'when', current_when) current_tags = getattr(self, 'tags')[:] current_tags.extend(role_include.tags) setattr(self, 'tags', current_tags) # dynamically load any plugins from the role directory for name, obj in get_all_plugin_loaders(): if obj.subdir: plugin_path = os.path.join(self._role_path, obj.subdir) if os.path.isdir(plugin_path): obj.add_directory(plugin_path) # load the role's other files, if they exist metadata = self._load_role_yaml('meta') if metadata: self._metadata = RoleMetadata.load(metadata, owner=self, variable_manager=self._variable_manager, loader=self._loader) self._dependencies = self._load_dependencies() else: self._metadata = RoleMetadata() task_data = self._load_role_yaml('tasks', main=self._from_files.get('tasks')) if task_data: try: self._task_blocks = load_list_of_blocks(task_data, play=self._play, role=self, loader=self._loader, variable_manager=self._variable_manager) except AssertionError: raise AnsibleParserError("The tasks/main.yml file for role '%s' must contain a list of tasks" % self._role_name , obj=task_data) handler_data = self._load_role_yaml('handlers') if handler_data: try: self._handler_blocks = load_list_of_blocks(handler_data, play=self._play, role=self, use_handlers=True, loader=self._loader, variable_manager=self._variable_manager) except AssertionError: raise AnsibleParserError("The handlers/main.yml file for role '%s' must contain a list of tasks" % self._role_name , obj=handler_data) # vars and default vars are regular dictionaries self._role_vars = self._load_role_yaml('vars', main=self._from_files.get('vars')) if self._role_vars is None: self._role_vars = dict() elif not isinstance(self._role_vars, dict): raise AnsibleParserError("The vars/main.yml file for role '%s' must contain a dictionary of variables" % self._role_name) self._default_vars = self._load_role_yaml('defaults', main=self._from_files.get('defaults')) if self._default_vars is None: self._default_vars = dict() elif not isinstance(self._default_vars, dict): raise AnsibleParserError("The defaults/main.yml file for role '%s' must contain a dictionary of variables" % self._role_name) def _load_role_yaml(self, subdir, main=None): file_path = os.path.join(self._role_path, subdir) if self._loader.path_exists(file_path) and self._loader.is_directory(file_path): main_file = self._resolve_main(file_path, main) if self._loader.path_exists(main_file): return self._loader.load_from_file(main_file) elif main is not None: raise AnsibleParserError("Could not find specified file in role: %s/%s" % (subdir,main)) return None def _resolve_main(self, basepath, main=None): ''' flexibly handle variations in main filenames ''' post = False # allow override if set, otherwise use default if main is None: main = 'main' post = True bare_main = os.path.join(basepath, main) possible_mains = ( os.path.join(basepath, '%s.yml' % main), os.path.join(basepath, '%s.yaml' % main), os.path.join(basepath, '%s.json' % main), ) if post: possible_mains = possible_mains + (bare_main,) else: possible_mains = (bare_main,) + possible_mains if sum([self._loader.is_file(x) for x in possible_mains]) > 1: raise AnsibleError("found multiple main files at %s, only one allowed" % (basepath)) else: for m in possible_mains: if self._loader.is_file(m): return m # exactly one main file return possible_mains[0] # zero mains (we still need to return something) def _load_dependencies(self): ''' Recursively loads role dependencies from the metadata list of dependencies, if it exists ''' deps = [] if self._metadata: for role_include in self._metadata.dependencies: r = Role.load(role_include, play=self._play, parent_role=self) deps.append(r) return deps #------------------------------------------------------------------------------ # other functions def add_parent(self, parent_role): ''' adds a role to the list of this roles parents ''' assert isinstance(parent_role, Role) if parent_role not in self._parents: self._parents.append(parent_role) def get_parents(self): return self._parents def get_default_vars(self, dep_chain=[]): default_vars = dict() for dep in self.get_all_dependencies(): default_vars = combine_vars(default_vars, dep.get_default_vars()) if dep_chain: for parent in dep_chain: default_vars = combine_vars(default_vars, parent._default_vars) default_vars = combine_vars(default_vars, self._default_vars) return default_vars def get_inherited_vars(self, dep_chain=[]): inherited_vars = dict() if dep_chain: for parent in dep_chain: inherited_vars = combine_vars(inherited_vars, parent._role_vars) return inherited_vars def get_role_params(self, dep_chain=[]): params = {} if dep_chain: for parent in dep_chain: params = combine_vars(params, parent._role_params) params = combine_vars(params, self._role_params) return params def get_vars(self, dep_chain=[], include_params=True): all_vars = self.get_inherited_vars(dep_chain) for dep in self.get_all_dependencies(): all_vars = combine_vars(all_vars, dep.get_vars(include_params=include_params)) all_vars = combine_vars(all_vars, self.vars) all_vars = combine_vars(all_vars, self._role_vars) if include_params: all_vars = combine_vars(all_vars, self.get_role_params(dep_chain=dep_chain)) return all_vars def get_direct_dependencies(self): return self._dependencies[:] def get_all_dependencies(self): ''' Returns a list of all deps, built recursively from all child dependencies, in the proper order in which they should be executed or evaluated. ''' child_deps = [] for dep in self.get_direct_dependencies(): for child_dep in dep.get_all_dependencies(): child_deps.append(child_dep) child_deps.append(dep) return child_deps def get_task_blocks(self): return self._task_blocks[:] def get_handler_blocks(self, play, dep_chain=None): block_list = [] # update the dependency chain here if dep_chain is None: dep_chain = [] new_dep_chain = dep_chain + [self] for dep in self.get_direct_dependencies(): dep_blocks = dep.get_handler_blocks(play=play, dep_chain=new_dep_chain) block_list.extend(dep_blocks) for task_block in self._handler_blocks: new_task_block = task_block.copy() new_task_block._dep_chain = new_dep_chain new_task_block._play = play block_list.append(new_task_block) return block_list def has_run(self, host): ''' Returns true if this role has been iterated over completely and at least one task was run ''' return host.name in self._completed and not self._metadata.allow_duplicates def compile(self, play, dep_chain=None): ''' Returns the task list for this role, which is created by first recursively compiling the tasks for all direct dependencies, and then adding on the tasks for this role. The role compile() also remembers and saves the dependency chain with each task, so tasks know by which route they were found, and can correctly take their parent's tags/conditionals into account. ''' block_list = [] # update the dependency chain here if dep_chain is None: dep_chain = [] new_dep_chain = dep_chain + [self] deps = self.get_direct_dependencies() for dep in deps: dep_blocks = dep.compile(play=play, dep_chain=new_dep_chain) block_list.extend(dep_blocks) for task_block in self._task_blocks: new_task_block = task_block.copy(exclude_parent=True) if task_block._parent: new_task_block._parent = task_block._parent.copy() new_task_block._dep_chain = new_dep_chain new_task_block._play = play block_list.append(new_task_block) return block_list def serialize(self, include_deps=True): res = super(Role, self).serialize() res['_role_name'] = self._role_name res['_role_path'] = self._role_path res['_role_vars'] = self._role_vars res['_role_params'] = self._role_params res['_default_vars'] = self._default_vars res['_had_task_run'] = self._had_task_run.copy() res['_completed'] = self._completed.copy() if self._metadata: res['_metadata'] = self._metadata.serialize() if include_deps: deps = [] for role in self.get_direct_dependencies(): deps.append(role.serialize()) res['_dependencies'] = deps parents = [] for parent in self._parents: parents.append(parent.serialize(include_deps=False)) res['_parents'] = parents return res def deserialize(self, data, include_deps=True): self._role_name = data.get('_role_name', '') self._role_path = data.get('_role_path', '') self._role_vars = data.get('_role_vars', dict()) self._role_params = data.get('_role_params', dict()) self._default_vars = data.get('_default_vars', dict()) self._had_task_run = data.get('_had_task_run', dict()) self._completed = data.get('_completed', dict()) if include_deps: deps = [] for dep in data.get('_dependencies', []): r = Role() r.deserialize(dep) deps.append(r) setattr(self, '_dependencies', deps) parent_data = data.get('_parents', []) parents = [] for parent in parent_data: r = Role() r.deserialize(parent, include_deps=False) parents.append(r) setattr(self, '_parents', parents) metadata_data = data.get('_metadata') if metadata_data: m = RoleMetadata() m.deserialize(metadata_data) self._metadata = m super(Role, self).deserialize(data) def set_loader(self, loader): self._loader = loader for parent in self._parents: parent.set_loader(loader) for dep in self.get_direct_dependencies(): dep.set_loader(loader)
gpl-3.0
-1,690,207,452,564,387,300
37.710472
181
0.596435
false
sppps/coffee-cup-accounting
consume/__init__.py
1
2884
import forms from flask import current_app as app from generic.editor import GenericEditor from bson import ObjectId class ConsumeEditor(GenericEditor): def __init__(self, *args, **kwargs): super(ConsumeEditor, self).__init__(*args, **kwargs) def _create_form(self, *args, **kwargs): return forms.ConsumeForm(*args, **kwargs) def _on_before_item_create(self, item): db = app.config['db'] tech_map = db.techmaps.find_one(item['techmap_id']) feed_count = len(item['consumers']) supplies = [] for tm_ingredient in tech_map['ingredients']: ingredient = db.ingredients.find_one(tm_ingredient['ingredient_id']) if tm_ingredient['per_feed']: required_amount = feed_count*tm_ingredient['amount'] else: required_amount = tm_ingredient['amount'] query = { 'ingredient_id': tm_ingredient['ingredient_id'], 'current_amount': {'$gt': 0} } for supply in db.supply.find(query).sort([['datetime', 1]]): price_per_unit = supply['price']/supply['supply_amount'] if supply['current_amount'] >= required_amount: supplies.append({ 'ingredient_name': ingredient['name'], 'supply_id': supply['_id'], 'amount': required_amount, 'price_per_unit': price_per_unit }) required_amount = 0 else: supplies.append({ 'ingredient_name': ingredient['name'], 'ingredient_units': ingredient['units'], 'supply_id': supply['_id'], 'amount': supply['current_amount'], 'price_per_unit': price_per_unit }) required_amount -= supply['current_amount'] if required_amount <= 0.0: break if required_amount > 0.0: raise Exception(u'Non-zero supply not found for %s' % ingredient['name']) item['total'] = sum([s['amount']*s['price_per_unit'] for s in supplies]) item['supplies'] = supplies for supply in supplies: db.supply.update({ '_id': supply['supply_id'] }, { '$inc': {'current_amount': -supply['amount']} }) for consumer in item['consumers']: db.consumers.update({ '_id': consumer['consumer_id'] }, { '$inc': { 'debt': item['total']/feed_count } }) def create_blueprint(): return ConsumeEditor('consume', __name__).blueprint
gpl-3.0
233,291,850,835,643,900
37.972973
89
0.48301
false
broferek/ansible
lib/ansible/modules/network/fortios/fortios_system_auto_install.py
13
9641
#!/usr/bin/python from __future__ import (absolute_import, division, print_function) # Copyright 2019 Fortinet, Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. __metaclass__ = type ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'metadata_version': '1.1'} DOCUMENTATION = ''' --- module: fortios_system_auto_install short_description: Configure USB auto installation in Fortinet's FortiOS and FortiGate. description: - This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the user to set and modify system feature and auto_install category. Examples include all parameters and values need to be adjusted to datasources before usage. Tested with FOS v6.0.5 version_added: "2.9" author: - Miguel Angel Munoz (@mamunozgonzalez) - Nicolas Thomas (@thomnico) notes: - Requires fortiosapi library developed by Fortinet - Run as a local_action in your playbook requirements: - fortiosapi>=0.9.8 options: host: description: - FortiOS or FortiGate IP address. type: str required: false username: description: - FortiOS or FortiGate username. type: str required: false password: description: - FortiOS or FortiGate password. type: str default: "" vdom: description: - Virtual domain, among those defined previously. A vdom is a virtual instance of the FortiGate that can be configured and used as a different unit. type: str default: root https: description: - Indicates if the requests towards FortiGate must use HTTPS protocol. type: bool default: true ssl_verify: description: - Ensures FortiGate certificate must be verified by a proper CA. type: bool default: true system_auto_install: description: - Configure USB auto installation. default: null type: dict suboptions: auto_install_config: description: - Enable/disable auto install the config in USB disk. type: str choices: - enable - disable auto_install_image: description: - Enable/disable auto install the image in USB disk. type: str choices: - enable - disable default_config_file: description: - Default config file name in USB disk. type: str default_image_file: description: - Default image file name in USB disk. type: str ''' EXAMPLES = ''' - hosts: localhost vars: host: "192.168.122.40" username: "admin" password: "" vdom: "root" ssl_verify: "False" tasks: - name: Configure USB auto installation. fortios_system_auto_install: host: "{{ host }}" username: "{{ username }}" password: "{{ password }}" vdom: "{{ vdom }}" https: "False" system_auto_install: auto_install_config: "enable" auto_install_image: "enable" default_config_file: "<your_own_value>" default_image_file: "<your_own_value>" ''' RETURN = ''' build: description: Build number of the fortigate image returned: always type: str sample: '1547' http_method: description: Last method used to provision the content into FortiGate returned: always type: str sample: 'PUT' http_status: description: Last result given by FortiGate on last operation applied returned: always type: str sample: "200" mkey: description: Master key (id) used in the last call to FortiGate returned: success type: str sample: "id" name: description: Name of the table used to fulfill the request returned: always type: str sample: "urlfilter" path: description: Path of the table used to fulfill the request returned: always type: str sample: "webfilter" revision: description: Internal revision number returned: always type: str sample: "17.0.2.10658" serial: description: Serial number of the unit returned: always type: str sample: "FGVMEVYYQT3AB5352" status: description: Indication of the operation's result returned: always type: str sample: "success" vdom: description: Virtual domain used returned: always type: str sample: "root" version: description: Version of the FortiGate returned: always type: str sample: "v5.6.3" ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.connection import Connection from ansible.module_utils.network.fortios.fortios import FortiOSHandler from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG def login(data, fos): host = data['host'] username = data['username'] password = data['password'] ssl_verify = data['ssl_verify'] fos.debug('on') if 'https' in data and not data['https']: fos.https('off') else: fos.https('on') fos.login(host, username, password, verify=ssl_verify) def filter_system_auto_install_data(json): option_list = ['auto_install_config', 'auto_install_image', 'default_config_file', 'default_image_file'] dictionary = {} for attribute in option_list: if attribute in json and json[attribute] is not None: dictionary[attribute] = json[attribute] return dictionary def underscore_to_hyphen(data): if isinstance(data, list): for elem in data: elem = underscore_to_hyphen(elem) elif isinstance(data, dict): new_data = {} for k, v in data.items(): new_data[k.replace('_', '-')] = underscore_to_hyphen(v) data = new_data return data def system_auto_install(data, fos): vdom = data['vdom'] system_auto_install_data = data['system_auto_install'] filtered_data = underscore_to_hyphen(filter_system_auto_install_data(system_auto_install_data)) return fos.set('system', 'auto-install', data=filtered_data, vdom=vdom) def is_successful_status(status): return status['status'] == "success" or \ status['http_method'] == "DELETE" and status['http_status'] == 404 def fortios_system(data, fos): if data['system_auto_install']: resp = system_auto_install(data, fos) return not is_successful_status(resp), \ resp['status'] == "success", \ resp def main(): fields = { "host": {"required": False, "type": "str"}, "username": {"required": False, "type": "str"}, "password": {"required": False, "type": "str", "default": "", "no_log": True}, "vdom": {"required": False, "type": "str", "default": "root"}, "https": {"required": False, "type": "bool", "default": True}, "ssl_verify": {"required": False, "type": "bool", "default": True}, "system_auto_install": { "required": False, "type": "dict", "default": None, "options": { "auto_install_config": {"required": False, "type": "str", "choices": ["enable", "disable"]}, "auto_install_image": {"required": False, "type": "str", "choices": ["enable", "disable"]}, "default_config_file": {"required": False, "type": "str"}, "default_image_file": {"required": False, "type": "str"} } } } module = AnsibleModule(argument_spec=fields, supports_check_mode=False) # legacy_mode refers to using fortiosapi instead of HTTPAPI legacy_mode = 'host' in module.params and module.params['host'] is not None and \ 'username' in module.params and module.params['username'] is not None and \ 'password' in module.params and module.params['password'] is not None if not legacy_mode: if module._socket_path: connection = Connection(module._socket_path) fos = FortiOSHandler(connection) is_error, has_changed, result = fortios_system(module.params, fos) else: module.fail_json(**FAIL_SOCKET_MSG) else: try: from fortiosapi import FortiOSAPI except ImportError: module.fail_json(msg="fortiosapi module is required") fos = FortiOSAPI() login(module.params, fos) is_error, has_changed, result = fortios_system(module.params, fos) fos.logout() if not is_error: module.exit_json(changed=has_changed, meta=result) else: module.fail_json(msg="Error in repo", meta=result) if __name__ == '__main__': main()
gpl-3.0
6,889,538,730,088,696,000
29.509494
99
0.605124
false
Frederick888/cargo
src/etc/install-deps.py
17
2275
#!/usr/bin/env python import contextlib import download import os import shutil import sys import tarfile if os.environ.get('BITS') == '32': host_bits = 'i686' extra_bits = 'x86_64' else: host_bits = 'x86_64' extra_bits = 'i686' extra = None libdir = 'lib' # Figure out our target triple if sys.platform == 'linux' or sys.platform == 'linux2': host = host_bits + '-unknown-linux-gnu' extra = extra_bits + '-unknown-linux-gnu' elif sys.platform == 'darwin': host = host_bits + '-apple-darwin' extra = extra_bits + '-apple-darwin' elif sys.platform == 'win32': libdir = 'bin' if os.environ.get('MSVC') == '1': host = host_bits + '-pc-windows-msvc' extra = extra_bits + '-pc-windows-msvc' else: host = host_bits + '-pc-windows-gnu' else: raise "Unknown platform" rust_date = open('src/rustversion.txt').read().strip() url = 'https://static.rust-lang.org/dist/' + rust_date def install_via_tarballs(): if os.path.isdir("rustc-install"): shutil.rmtree("rustc-install") host_fname = 'rustc-nightly-' + host + '.tar.gz' download.get(url + '/' + host_fname, host_fname) download.unpack(host_fname, "rustc-install", quiet=True) os.remove(host_fname) if extra is not None: extra_fname = 'rustc-nightly-' + extra + '.tar.gz' print("adding target libs for " + extra) download.get(url + '/' + extra_fname, extra_fname) folder = extra_fname.replace(".tar.gz", "") with contextlib.closing(tarfile.open(extra_fname)) as tar: for p in tar.getnames(): if not "rustc/" + libdir + "/rustlib/" + extra in p: continue name = p.replace(folder + "/", "", 1) dst = "rustc-install/" + name tar.extract(p, "rustc-install") tp = os.path.join("rustc-install", p) if os.path.isdir(tp) and os.path.exists(dst): continue shutil.move(tp, dst) shutil.rmtree("rustc-install/" + folder) os.remove(extra_fname) if os.path.isdir("rustc"): shutil.rmtree("rustc") os.rename("rustc-install/rustc", "rustc") shutil.rmtree("rustc-install") install_via_tarballs()
apache-2.0
-1,974,201,443,799,018,500
29.743243
68
0.582857
false
mfherbst/spack
var/spack/repos/builtin/packages/deconseq-standalone/package.py
5
2521
############################################################################## # Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, [email protected], All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/spack/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class DeconseqStandalone(Package): """The DeconSeq tool can be used to automatically detect and efficiently remove sequence contaminations from genomic and metagenomic datasets.""" homepage = "http://deconseq.sourceforge.net" url = "https://sourceforge.net/projects/deconseq/files/standalone/deconseq-standalone-0.4.3.tar.gz" version('0.4.3', 'cb3fddb90e584d89fd9c2b6b8f2e20a2') depends_on('perl@5:') def install(self, spec, prefix): filter_file(r'#!/usr/bin/perl', '#!/usr/bin/env perl', 'deconseq.pl') filter_file(r'#!/usr/bin/perl', '#!/usr/bin/env perl', 'splitFasta.pl') mkdirp(prefix.bin) install('bwa64', prefix.bin) install('bwaMAC', prefix.bin) install('deconseq.pl', prefix.bin) install('splitFasta.pl', prefix.bin) install('DeconSeqConfig.pm', prefix) chmod = which('chmod') chmod('+x', join_path(prefix.bin, 'bwa64')) chmod('+x', join_path(prefix.bin, 'bwaMAC')) chmod('+x', join_path(prefix.bin, 'deconseq.pl')) chmod('+x', join_path(prefix.bin, 'splitFasta.pl')) def setup_environment(self, spack_env, run_env): run_env.prepend_path('PERL5LIB', prefix)
lgpl-2.1
-3,389,374,328,395,453,400
41.016667
108
0.645379
false
squidsoup/snapcraft
snapcraft/file_utils.py
3
7787
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*- # # Copyright (C) 2016 Canonical Ltd # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from contextlib import contextmanager import os import shutil import subprocess import logging from snapcraft.internal.errors import ( RequiredCommandFailure, RequiredCommandNotFound, RequiredPathDoesNotExist, ) logger = logging.getLogger(__name__) def replace_in_file(directory, file_pattern, search_pattern, replacement): """Searches and replaces patterns that match a file pattern. :param str directory: The directory to look for files. :param str file_pattern: The file pattern to match inside directory. :param search_pattern: A re.compile'd pattern to search for within matching files. :param str replacement: The string to replace the matching search_pattern with. """ for root, directories, files in os.walk(directory): for file_name in files: if file_pattern.match(file_name): _search_and_replace_contents(os.path.join(root, file_name), search_pattern, replacement) def link_or_copy(source, destination, follow_symlinks=False): """Hard-link source and destination files. Copy if it fails to link. Hard-linking may fail (e.g. a cross-device link, or permission denied), so as a backup plan we just copy it. :param str source: The source to which destination will be linked. :param str destination: The destination to be linked to source. :param bool follow_symlinks: Whether or not symlinks should be followed. """ try: # Note that follow_symlinks doesn't seem to work for os.link, so we'll # implement this logic ourselves using realpath. source_path = source if follow_symlinks: source_path = os.path.realpath(source) if not os.path.exists(os.path.dirname(destination)): create_similar_directory( os.path.dirname(source_path), os.path.dirname(destination)) # Setting follow_symlinks=False in case this bug is ever fixed # upstream-- we want this function to continue supporting NOT following # symlinks. os.link(source_path, destination, follow_symlinks=False) except OSError: shutil.copy2(source, destination, follow_symlinks=follow_symlinks) uid = os.stat(source, follow_symlinks=follow_symlinks).st_uid gid = os.stat(source, follow_symlinks=follow_symlinks).st_gid try: os.chown(destination, uid, gid, follow_symlinks=follow_symlinks) except PermissionError as e: logger.debug('Unable to chown {destination}: {error}'.format( destination=destination, error=e)) def link_or_copy_tree(source_tree, destination_tree, copy_function=link_or_copy): """Copy a source tree into a destination, hard-linking if possile. :param str source_tree: Source directory to be copied. :param str destination_tree: Destination directory. If this directory already exists, the files in `source_tree` will take precedence. :param str boundary: Filesystem boundary no symlinks are allowed to cross. """ if not os.path.isdir(source_tree): raise NotADirectoryError('{!r} is not a directory'.format(source_tree)) if (not os.path.isdir(destination_tree) and os.path.exists(destination_tree)): raise NotADirectoryError( 'Cannot overwrite non-directory {!r} with directory ' '{!r}'.format(destination_tree, source_tree)) create_similar_directory(source_tree, destination_tree) for root, directories, files in os.walk(source_tree): for directory in directories: source = os.path.join(root, directory) destination = os.path.join( destination_tree, os.path.relpath(source, source_tree)) create_similar_directory(source, destination) for file_name in files: source = os.path.join(root, file_name) destination = os.path.join( destination_tree, os.path.relpath(source, source_tree)) copy_function(source, destination) def create_similar_directory(source, destination, follow_symlinks=False): """Create a directory with the same permission bits and owner information. :param str source: Directory from which to copy name, permission bits, and owner information. :param str destintion: Directory to create and to which the `source` information will be copied. :param bool follow_symlinks: Whether or not symlinks should be followed. """ stat = os.stat(source, follow_symlinks=follow_symlinks) uid = stat.st_uid gid = stat.st_gid os.makedirs(destination, exist_ok=True) try: os.chown(destination, uid, gid, follow_symlinks=follow_symlinks) except PermissionError as exception: logger.debug('Unable to chown {}: {}'.format(destination, exception)) shutil.copystat(source, destination, follow_symlinks=follow_symlinks) def _search_and_replace_contents(file_path, search_pattern, replacement): # Don't bother trying to rewrite a symlink. It's either invalid or the # linked file will be rewritten on its own. if os.path.islink(file_path): return try: with open(file_path, 'r+') as f: try: original = f.read() except UnicodeDecodeError: # This was probably a binary file. Skip it. return replaced = search_pattern.sub(replacement, original) if replaced != original: f.seek(0) f.truncate() f.write(replaced) except PermissionError as e: logger.warning('Unable to open {path} for writing: {error}'.format( path=file_path, error=e)) def executable_exists(path): """Return True if 'path' exists and is readable and executable.""" return os.path.exists(path) and os.access(path, os.R_OK | os.X_OK) @contextmanager def requires_command_success(command, not_found_fmt=None, failure_fmt=None): if isinstance(command, str): cmd_list = command.split() else: raise TypeError('command must be a string.') kwargs = dict(command=command, cmd_list=cmd_list) try: subprocess.check_call( cmd_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except FileNotFoundError: if not_found_fmt is not None: kwargs['fmt'] = not_found_fmt raise RequiredCommandNotFound(**kwargs) except subprocess.CalledProcessError: if failure_fmt is not None: kwargs['fmt'] = failure_fmt raise RequiredCommandFailure(**kwargs) yield @contextmanager def requires_path_exists(path, error_fmt=None): if not os.path.exists(path): kwargs = dict(path=path) if error_fmt is not None: kwargs['fmt'] = error_fmt raise RequiredPathDoesNotExist(**kwargs) yield
gpl-3.0
5,294,693,437,406,654,000
37.171569
79
0.653525
false
dpgaspar/Flask-AppBuilder
examples/related_fields/testdata.py
1
2739
import logging from app import db from app.models import ContactGroup, ContactSubGroup, Gender, Contact import random from datetime import datetime log = logging.getLogger(__name__) def get_random_name(names_list, size=1): name_lst = [ names_list[random.randrange(0, len(names_list))].decode("utf-8").capitalize() for i in range(0, size) ] return " ".join(name_lst) try: db.session.query(Contact).delete() db.session.query(Gender).delete() db.session.query(ContactGroup).delete() db.session.commit() except: db.session.rollback() try: groups = list() groups.append(ContactGroup(name="Friends")) groups.append(ContactGroup(name="Work")) db.session.add(groups[0]) db.session.add(groups[1]) db.session.commit() sub_groups = list() sub_groups.append(ContactSubGroup(name="Close Friends", contact_group=groups[0])) sub_groups.append(ContactSubGroup(name="Long time no see", contact_group=groups[0])) sub_groups.append(ContactSubGroup(name="BBIC", contact_group=groups[1])) sub_groups.append(ContactSubGroup(name="Miniclip", contact_group=groups[1])) db.session.add(sub_groups[0]) db.session.add(sub_groups[1]) db.session.add(sub_groups[2]) db.session.add(sub_groups[3]) db.session.commit() except Exception as e: log.error("Creating Groups: %s", e) db.session.rollback() try: genders = list() genders.append(Gender(name="Male")) genders.append(Gender(name="Female")) db.session.add(genders[0]) db.session.add(genders[1]) db.session.commit() except Exception as e: log.error("Creating Genders: %s", e) db.session.rollback() f = open("NAMES.DIC", "rb") names_list = [x.strip() for x in f.readlines()] f.close() for i in range(1, 1000): c = Contact() c.name = get_random_name(names_list, random.randrange(2, 6)) c.address = "Street " + names_list[random.randrange(0, len(names_list))].decode( "utf-8" ) c.personal_phone = random.randrange(1111111, 9999999) c.personal_celphone = random.randrange(1111111, 9999999) group = random.randrange(0, 2) if group == 0: sub_group = random.randrange(0, 2) else: sub_group = random.randrange(1, 4) c.contact_group = groups[group] c.contact_sub_group = sub_groups[sub_group] c.gender = genders[random.randrange(0, 2)] year = random.choice(range(1900, 2012)) month = random.choice(range(1, 12)) day = random.choice(range(1, 28)) c.birthday = datetime(year, month, day) db.session.add(c) try: db.session.commit() print("inserted", c) except Exception as e: log.error("Creating Contact: %s", e) db.session.rollback()
bsd-3-clause
-7,856,697,193,785,780,000
29.098901
88
0.655714
false
blckshrk/Weboob
modules/allocine/backend.py
1
4176
# -*- coding: utf-8 -*- # Copyright(C) 2013 Julien Veyssier # # This file is part of weboob. # # weboob is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # weboob is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with weboob. If not, see <http://www.gnu.org/licenses/>. from weboob.capabilities.cinema import ICapCinema, Person, Movie from weboob.tools.backend import BaseBackend from .browser import AllocineBrowser from urllib import quote_plus __all__ = ['AllocineBackend'] class AllocineBackend(BaseBackend, ICapCinema): NAME = 'allocine' MAINTAINER = u'Julien Veyssier' EMAIL = '[email protected]' VERSION = '0.h' DESCRIPTION = u'AlloCiné French cinema database service' LICENSE = 'AGPLv3+' BROWSER = AllocineBrowser def get_movie(self, id): return self.browser.get_movie(id) def get_person(self, id): return self.browser.get_person(id) def iter_movies(self, pattern): return self.browser.iter_movies(quote_plus(pattern.encode('utf-8'))) def iter_persons(self, pattern): return self.browser.iter_persons(quote_plus(pattern.encode('utf-8'))) def iter_movie_persons(self, id, role=None): return self.browser.iter_movie_persons(id, role) def iter_person_movies(self, id, role=None): return self.browser.iter_person_movies(id, role) def iter_person_movies_ids(self, id): return self.browser.iter_person_movies_ids(id) def iter_movie_persons_ids(self, id): return self.browser.iter_movie_persons_ids(id) def get_person_biography(self, id): return self.browser.get_person_biography(id) def get_movie_releases(self, id, country=None): return self.browser.get_movie_releases(id, country) def fill_person(self, person, fields): if 'real_name' in fields or 'birth_place' in fields\ or 'death_date' in fields or 'nationality' in fields\ or 'short_biography' in fields or 'roles' in fields\ or 'birth_date' in fields or 'thumbnail_url' in fields\ or 'biography' in fields\ or 'gender' in fields or fields is None: per = self.get_person(person.id) person.real_name = per.real_name person.birth_date = per.birth_date person.death_date = per.death_date person.birth_place = per.birth_place person.gender = per.gender person.nationality = per.nationality person.short_biography = per.short_biography person.short_description = per.short_description person.roles = per.roles person.biography = per.biography person.thumbnail_url = per.thumbnail_url return person def fill_movie(self, movie, fields): if 'other_titles' in fields or 'release_date' in fields\ or 'duration' in fields or 'country' in fields\ or 'roles' in fields or 'note' in fields\ or 'thumbnail_url' in fields: mov = self.get_movie(movie.id) movie.other_titles = mov.other_titles movie.release_date = mov.release_date movie.duration = mov.duration movie.pitch = mov.pitch movie.country = mov.country movie.note = mov.note movie.roles = mov.roles movie.genres = mov.genres movie.short_description = mov.short_description movie.thumbnail_url = mov.thumbnail_url if 'all_release_dates' in fields: movie.all_release_dates = self.get_movie_releases(movie.id) return movie OBJECTS = { Person: fill_person, Movie: fill_movie }
agpl-3.0
-4,641,905,836,682,227,000
34.991379
77
0.649581
false
boedy1996/SPARC
geonode/contrib/dynamic/models.py
23
13932
from __future__ import unicode_literals import keyword import re from django.utils.datastructures import SortedDict from django.conf import settings from django.contrib.gis.utils import LayerMapping from django.contrib.gis.db import models from django.contrib.gis import admin from django.core.exceptions import ValidationError from django import db from geonode.layers.models import Layer from .postgis import file2pgtable DYNAMIC_DATASTORE = 'datastore' class ModelDescription(models.Model): name = models.CharField(max_length=255) layer = models.ForeignKey(Layer, null=True, blank=True) def get_django_model(self, with_admin=False): "Returns a functional Django model based on current data" # Get all associated fields into a list ready for dict() fields = [(f.name, f.get_django_field()) for f in self.fields.all()] # Use the create_model function defined above return create_model(self.name, dict(fields), app_label='dynamic', module='geonode.contrib.dynamic', options={'db_table': self.name, 'managed': False }, with_admin=with_admin, ) def data_objects(self): """ """ TheModel = self.get_django_model() return TheModel.using(DYNAMIC_DATASTORE) def is_valid_field(self, field_data, all_data): if hasattr( models, field_data) and issubclass( getattr( models, field_data), models.Field): # It exists and is a proper field type return raise ValidationError("This is not a valid field type.") class Field(models.Model): model = models.ForeignKey(ModelDescription, related_name='fields') name = models.CharField(max_length=255) type = models.CharField(max_length=255, validators=[is_valid_field]) original_name = models.CharField(max_length=255) def get_django_field(self): "Returns the correct field type, instantiated with applicable settings" # Get all associated settings into a list ready for dict() settings = [(s.name, s.value) for s in self.settings.all()] # noqa field_type = getattr(models, self.type) # Instantiate the field with the settings as **kwargs return field_type(**dict(settings)) class Meta: unique_together = (('model', 'name'),) class Setting(models.Model): field = models.ForeignKey(Field, related_name='settings') name = models.CharField(max_length=255) value = models.CharField(max_length=255) class Meta: unique_together = (('field', 'name'),) def create_model( name, fields=None, app_label='', module='', options=None, admin_opts=None, with_admin=False): """ Create specified model """ class Meta: # Using type('Meta', ...) gives a dictproxy error during model creation pass if app_label: # app_label must be set using the Meta inner class setattr(Meta, 'app_label', app_label) # Update Meta with any options that were provided if options is not None: for key, value in options.iteritems(): setattr(Meta, key, value) # Set up a dictionary to simulate declarations within a class attrs = {'__module__': module, 'Meta': Meta} # Add in any fields that were provided if fields: attrs.update(fields) # Create the class, which automatically triggers ModelBase processing model = type(str(name), (models.Model,), attrs) class Admin(admin.OSMGeoAdmin): """Takes into account multi-db queries. """ using = DYNAMIC_DATASTORE def save_model(self, request, obj, form, change): # Tell Django to save objects to the 'other' database. obj.save(using=self.using) def delete_model(self, request, obj): # Tell Django to delete objects from the 'other' database obj.delete(using=self.using) def get_queryset(self, request): # Tell Django to look for objects on the 'other' database. return super(Admin, self).get_queryset(request).using(self.using) def queryset(self, request): # Tell Django to look for objects on the 'other' database. return super(Admin, self).queryset(request).using(self.using) def formfield_for_foreignkey(self, db_field, request=None, **kwargs): # Tell Django to populate ForeignKey widgets using a query # on the 'other' database. return super( Admin, self).formfield_for_foreignkey( db_field, request=request, using=self.using, **kwargs) def formfield_for_manytomany(self, db_field, request=None, **kwargs): # Tell Django to populate ManyToMany widgets using a query # on the 'other' database. return super( Admin, self).formfield_for_manytomany( db_field, request=request, using=self.using, **kwargs) # Create an Admin class if admin options were provided if admin_opts is not None: for key, value in admin_opts: setattr(Admin, key, value) if not with_admin: return model else: return model, Admin def generate_model(model_description, mapping, db_key=''): """Uses instrospection to generate a Django model from a database table. """ connection = db.connections[db_key] cursor = connection.cursor() table_name = model_description.name try: relations = connection.introspection.get_relations(cursor, table_name) except NotImplementedError: relations = {} try: indexes = connection.introspection.get_indexes(cursor, table_name) except NotImplementedError: indexes = {} used_column_names = [] # Holds column names used in the table so far for i, row in enumerate(connection.introspection.get_table_description(cursor, table_name)): # Holds Field notes, to be displayed in a Python comment. comment_notes = [] # Holds Field parameters such as 'db_column'. extra_params = SortedDict() column_name = row[0] is_relation = i in relations att_name, params, notes = normalize_col_name( column_name, used_column_names, is_relation ) extra_params.update(params) comment_notes.extend(notes) used_column_names.append(att_name) # Add primary_key and unique, if necessary. if column_name in indexes: if indexes[column_name]['primary_key']: extra_params['primary_key'] = True elif indexes[column_name]['unique']: extra_params['unique'] = True # Calling `get_field_type` to get the field type string and any # additional parameters and notes field_type, field_params, field_notes = get_field_type( connection, table_name, row) extra_params.update(field_params) comment_notes.extend(field_notes) GEOM_FIELDS = { 'GEOMETRYCOLLECTION': 'GeometryCollectionField', 'POINT': 'PointField', 'MULTIPOINT': 'MultiPointField', 'LINESTRING': 'LineStringField', 'MULTILINESTRING': 'MultiLineStringField', 'POLYGON': 'PolygonField', 'MULTIPOLYGON': 'MultiPolygonField', 'GEOMETRY': 'GeometryField', } geom_type = mapping['geom'] # Use the geom_type to override the geometry field. if field_type == 'GeometryField': if geom_type in GEOM_FIELDS: field_type = GEOM_FIELDS[geom_type] # Change the type of id to AutoField to get auto generated ids. if att_name == 'id' and extra_params == {'primary_key': True}: field_type = 'AutoField' # Add 'null' and 'blank', if the 'null_ok' flag was present in the # table description. if row[6]: # If it's NULL... if field_type == 'BooleanField': field_type = 'NullBooleanField' else: extra_params['blank'] = True if field_type not in ('TextField', 'CharField'): extra_params['null'] = True if any(field_type) and column_name != 'id': field, __ = Field.objects.get_or_create( model=model_description, name=att_name) field.type = field_type field.original_name = mapping[column_name] field.save() for name, value in extra_params.items(): if any(name): Setting.objects.get_or_create( field=field, name=name, value=value) def normalize_col_name(col_name, used_column_names, is_relation): """ Modify the column name to make it Python-compatible as a field name """ field_params = {} field_notes = [] new_name = col_name.lower() if new_name != col_name: field_notes.append('Field name made lowercase.') if is_relation: if new_name.endswith('_id'): new_name = new_name[:-3] else: field_params['db_column'] = col_name new_name, num_repl = re.subn(r'\W', '_', new_name) if num_repl > 0: field_notes.append('Field renamed to remove unsuitable characters.') if new_name.find('__') >= 0: while new_name.find('__') >= 0: new_name = new_name.replace('__', '_') if col_name.lower().find('__') >= 0: # Only add the comment if the double underscore was in the original # name field_notes.append( "Field renamed because it contained more than one '_' in a row.") if new_name.startswith('_'): new_name = 'field%s' % new_name field_notes.append("Field renamed because it started with '_'.") if new_name.endswith('_'): new_name = '%sfield' % new_name field_notes.append("Field renamed because it ended with '_'.") if keyword.iskeyword(new_name): new_name += '_field' field_notes.append( 'Field renamed because it was a Python reserved word.') if new_name[0].isdigit(): new_name = 'number_%s' % new_name field_notes.append( "Field renamed because it wasn't a valid Python identifier.") if new_name in used_column_names: num = 0 while '%s_%d' % (new_name, num) in used_column_names: num += 1 new_name = '%s_%d' % (new_name, num) field_notes.append('Field renamed because of name conflict.') if col_name != new_name and field_notes: field_params['db_column'] = col_name return new_name, field_params, field_notes def get_field_type(connection, table_name, row): """ Given the database connection, the table name, and the cursor row description, this routine will return the given field type name, as well as any additional keyword parameters and notes for the field. """ field_params = SortedDict() field_notes = [] try: field_type = connection.introspection.get_field_type(row[1], row) except KeyError: field_type = 'TextField' field_notes.append('This field type is a guess.') # This is a hook for DATA_TYPES_REVERSE to return a tuple of # (field_type, field_params_dict). if isinstance(field_type, tuple): field_type, new_params = field_type field_params.update(new_params) # Add max_length for all CharFields. if field_type == 'CharField' and row[3]: field_params['max_length'] = int(row[3]) if field_type == 'DecimalField': field_params['max_digits'] = row[4] field_params['decimal_places'] = row[5] return field_type, field_params, field_notes def pre_save_layer(instance, sender, **kwargs): """Save to postgis if there is a datastore. """ # Abort if a postgis DATABASE is not configured. if DYNAMIC_DATASTORE not in settings.DATABASES: return # Do not process if there is no table. base_file = instance.get_base_file() if base_file is None or base_file.name != 'shp': return filename = base_file.file.path # Load the table in postgis and get a mapping from fields in the database # and fields in the Shapefile. mapping = file2pgtable(filename, instance.name) # Get a dynamic model with the same name as the layer. model_description, __ = ModelDescription.objects.get_or_create( name=instance.name) # Set up the fields with the postgis table generate_model(model_description, mapping, db_key=DYNAMIC_DATASTORE) # Get the new actual Django model. TheModel = model_description.get_django_model() # Use layermapping to load the layer with geodjango lm = LayerMapping(TheModel, filename, mapping, encoding=instance.charset, using=DYNAMIC_DATASTORE, transform=None ) lm.save() def post_save_layer(instance, sender, **kwargs): """Assign layer instance to the dynamic model. """ # Assign this layer model to all ModelDescriptions with the same name. ModelDescription.objects.filter(name=instance.name).update(layer=instance) models.signals.pre_save.connect(pre_save_layer, sender=Layer) models.signals.post_save.connect(post_save_layer, sender=Layer)
gpl-3.0
6,557,411,052,492,091,000
32.652174
96
0.599699
false
cuckoosandbox/monitor
src/capstone/bindings/python/test_xcore.py
3
2325
#!/usr/bin/env python # Capstone Python bindings, by Nguyen Anh Quynnh <[email protected]> from __future__ import print_function from capstone import * from capstone.xcore import * from xprint import to_x, to_hex, to_x_32 XCORE_CODE = b"\xfe\x0f\xfe\x17\x13\x17\xc6\xfe\xec\x17\x97\xf8\xec\x4f\x1f\xfd\xec\x37\x07\xf2\x45\x5b\xf9\xfa\x02\x06\x1b\x10\x09\xfd\xec\xa7" all_tests = ( (CS_ARCH_XCORE, 0, XCORE_CODE, "XCore"), ) def print_insn_detail(insn): # print address, mnemonic and operands print("0x%x:\t%s\t%s" % (insn.address, insn.mnemonic, insn.op_str)) # "data" instruction generated by SKIPDATA option has no detail if insn.id == 0: return if len(insn.operands) > 0: print("\top_count: %u" % len(insn.operands)) c = 0 for i in insn.operands: if i.type == XCORE_OP_REG: print("\t\toperands[%u].type: REG = %s" % (c, insn.reg_name(i.reg))) if i.type == XCORE_OP_IMM: print("\t\toperands[%u].type: IMM = 0x%s" % (c, to_x(i.imm))) if i.type == XCORE_OP_MEM: print("\t\toperands[%u].type: MEM" % c) if i.mem.base != 0: print("\t\t\toperands[%u].mem.base: REG = %s" \ % (c, insn.reg_name(i.mem.base))) if i.mem.index != 0: print("\t\t\toperands[%u].mem.index: REG = %s" \ % (c, insn.reg_name(i.mem.index))) if i.mem.disp != 0: print("\t\t\toperands[%u].mem.disp: 0x%s" \ % (c, to_x(i.mem.disp))) if i.mem.direct != 1: print("\t\t\toperands[%u].mem.direct: -1") c += 1 # ## Test class Cs def test_class(): for (arch, mode, code, comment) in all_tests: print("*" * 16) print("Platform: %s" %comment) print("Code: %s" % to_hex(code)) print("Disasm:") try: md = Cs(arch, mode) md.detail = True for insn in md.disasm(code, 0x1000): print_insn_detail(insn) print () print("0x%x:\n" % (insn.address + insn.size)) except CsError as e: print("ERROR: %s" %e) if __name__ == '__main__': test_class()
gpl-3.0
8,256,170,380,025,095,000
31.746479
144
0.501075
false
jjmleiro/hue
apps/hbase/src/hbase/urls.py
32
1029
#!/usr/bin/env python # Licensed to Cloudera, Inc. under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. Cloudera, Inc. licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from django.conf.urls import patterns, url urlpatterns = patterns('hbase.views', url(r'^$', 'app', name='index'), url(r'api/(?P<url>.+)$', 'api_router'), url(r'^install_examples$', 'install_examples', name='install_examples'), )
apache-2.0
8,220,170,429,493,725,000
40.16
74
0.739553
false
hickford/cython
Cython/Compiler/FlowControl.py
14
45482
from __future__ import absolute_import import cython cython.declare(PyrexTypes=object, ExprNodes=object, Nodes=object, Builtin=object, InternalError=object, error=object, warning=object, py_object_type=object, unspecified_type=object, object_expr=object, fake_rhs_expr=object, TypedExprNode=object) from . import Builtin from . import ExprNodes from . import Nodes from . import Options from .PyrexTypes import py_object_type, unspecified_type from . import PyrexTypes from .Visitor import TreeVisitor, CythonTransform from .Errors import error, warning, InternalError from .Optimize import ConstantFolding class TypedExprNode(ExprNodes.ExprNode): # Used for declaring assignments of a specified type without a known entry. def __init__(self, type, may_be_none=None, pos=None): super(TypedExprNode, self).__init__(pos) self.type = type self._may_be_none = may_be_none def may_be_none(self): return self._may_be_none != False object_expr = TypedExprNode(py_object_type, may_be_none=True) # Fake rhs to silence "unused variable" warning fake_rhs_expr = TypedExprNode(unspecified_type) class ControlBlock(object): """Control flow graph node. Sequence of assignments and name references. children set of children nodes parents set of parent nodes positions set of position markers stats list of block statements gen dict of assignments generated by this block bounded set of entries that are definitely bounded in this block Example: a = 1 b = a + c # 'c' is already bounded or exception here stats = [Assignment(a), NameReference(a), NameReference(c), Assignment(b)] gen = {Entry(a): Assignment(a), Entry(b): Assignment(b)} bounded = set([Entry(a), Entry(c)]) """ def __init__(self): self.children = set() self.parents = set() self.positions = set() self.stats = [] self.gen = {} self.bounded = set() self.i_input = 0 self.i_output = 0 self.i_gen = 0 self.i_kill = 0 self.i_state = 0 def empty(self): return (not self.stats and not self.positions) def detach(self): """Detach block from parents and children.""" for child in self.children: child.parents.remove(self) for parent in self.parents: parent.children.remove(self) self.parents.clear() self.children.clear() def add_child(self, block): self.children.add(block) block.parents.add(self) class ExitBlock(ControlBlock): """Non-empty exit point block.""" def empty(self): return False class AssignmentList(object): def __init__(self): self.stats = [] class ControlFlow(object): """Control-flow graph. entry_point ControlBlock entry point for this graph exit_point ControlBlock normal exit point block ControlBlock current block blocks set children nodes entries set tracked entries loops list stack for loop descriptors exceptions list stack for exception descriptors """ def __init__(self): self.blocks = set() self.entries = set() self.loops = [] self.exceptions = [] self.entry_point = ControlBlock() self.exit_point = ExitBlock() self.blocks.add(self.exit_point) self.block = self.entry_point def newblock(self, parent=None): """Create floating block linked to `parent` if given. NOTE: Block is NOT added to self.blocks """ block = ControlBlock() self.blocks.add(block) if parent: parent.add_child(block) return block def nextblock(self, parent=None): """Create block children block linked to current or `parent` if given. NOTE: Block is added to self.blocks """ block = ControlBlock() self.blocks.add(block) if parent: parent.add_child(block) elif self.block: self.block.add_child(block) self.block = block return self.block def is_tracked(self, entry): if entry.is_anonymous: return False return (entry.is_local or entry.is_pyclass_attr or entry.is_arg or entry.from_closure or entry.in_closure or entry.error_on_uninitialized) def is_statically_assigned(self, entry): if (entry.is_local and entry.is_variable and (entry.type.is_struct_or_union or entry.type.is_complex or entry.type.is_array or entry.type.is_cpp_class)): # stack allocated structured variable => never uninitialised return True return False def mark_position(self, node): """Mark position, will be used to draw graph nodes.""" if self.block: self.block.positions.add(node.pos[:2]) def mark_assignment(self, lhs, rhs, entry): if self.block and self.is_tracked(entry): assignment = NameAssignment(lhs, rhs, entry) self.block.stats.append(assignment) self.block.gen[entry] = assignment self.entries.add(entry) def mark_argument(self, lhs, rhs, entry): if self.block and self.is_tracked(entry): assignment = Argument(lhs, rhs, entry) self.block.stats.append(assignment) self.block.gen[entry] = assignment self.entries.add(entry) def mark_deletion(self, node, entry): if self.block and self.is_tracked(entry): assignment = NameDeletion(node, entry) self.block.stats.append(assignment) self.block.gen[entry] = Uninitialized self.entries.add(entry) def mark_reference(self, node, entry): if self.block and self.is_tracked(entry): self.block.stats.append(NameReference(node, entry)) ## XXX: We don't track expression evaluation order so we can't use ## XXX: successful reference as initialization sign. ## # Local variable is definitely bound after this reference ## if not node.allow_null: ## self.block.bounded.add(entry) self.entries.add(entry) def normalize(self): """Delete unreachable and orphan blocks.""" queue = set([self.entry_point]) visited = set() while queue: root = queue.pop() visited.add(root) for child in root.children: if child not in visited: queue.add(child) unreachable = self.blocks - visited for block in unreachable: block.detach() visited.remove(self.entry_point) for block in visited: if block.empty(): for parent in block.parents: # Re-parent for child in block.children: parent.add_child(child) block.detach() unreachable.add(block) self.blocks -= unreachable def initialize(self): """Set initial state, map assignments to bits.""" self.assmts = {} bit = 1 for entry in self.entries: assmts = AssignmentList() assmts.mask = assmts.bit = bit self.assmts[entry] = assmts bit <<= 1 for block in self.blocks: for stat in block.stats: if isinstance(stat, NameAssignment): stat.bit = bit assmts = self.assmts[stat.entry] assmts.stats.append(stat) assmts.mask |= bit bit <<= 1 for block in self.blocks: for entry, stat in block.gen.items(): assmts = self.assmts[entry] if stat is Uninitialized: block.i_gen |= assmts.bit else: block.i_gen |= stat.bit block.i_kill |= assmts.mask block.i_output = block.i_gen for entry in block.bounded: block.i_kill |= self.assmts[entry].bit for assmts in self.assmts.values(): self.entry_point.i_gen |= assmts.bit self.entry_point.i_output = self.entry_point.i_gen def map_one(self, istate, entry): ret = set() assmts = self.assmts[entry] if istate & assmts.bit: if self.is_statically_assigned(entry): ret.add(StaticAssignment(entry)) elif entry.from_closure: ret.add(Unknown) else: ret.add(Uninitialized) for assmt in assmts.stats: if istate & assmt.bit: ret.add(assmt) return ret def reaching_definitions(self): """Per-block reaching definitions analysis.""" dirty = True while dirty: dirty = False for block in self.blocks: i_input = 0 for parent in block.parents: i_input |= parent.i_output i_output = (i_input & ~block.i_kill) | block.i_gen if i_output != block.i_output: dirty = True block.i_input = i_input block.i_output = i_output class LoopDescr(object): def __init__(self, next_block, loop_block): self.next_block = next_block self.loop_block = loop_block self.exceptions = [] class ExceptionDescr(object): """Exception handling helper. entry_point ControlBlock Exception handling entry point finally_enter ControlBlock Normal finally clause entry point finally_exit ControlBlock Normal finally clause exit point """ def __init__(self, entry_point, finally_enter=None, finally_exit=None): self.entry_point = entry_point self.finally_enter = finally_enter self.finally_exit = finally_exit class NameAssignment(object): def __init__(self, lhs, rhs, entry): if lhs.cf_state is None: lhs.cf_state = set() self.lhs = lhs self.rhs = rhs self.entry = entry self.pos = lhs.pos self.refs = set() self.is_arg = False self.is_deletion = False self.inferred_type = None def __repr__(self): return '%s(entry=%r)' % (self.__class__.__name__, self.entry) def infer_type(self): self.inferred_type = self.rhs.infer_type(self.entry.scope) return self.inferred_type def type_dependencies(self): return self.rhs.type_dependencies(self.entry.scope) @property def type(self): if not self.entry.type.is_unspecified: return self.entry.type return self.inferred_type class StaticAssignment(NameAssignment): """Initialised at declaration time, e.g. stack allocation.""" def __init__(self, entry): if not entry.type.is_pyobject: may_be_none = False else: may_be_none = None # unknown lhs = TypedExprNode( entry.type, may_be_none=may_be_none, pos=entry.pos) super(StaticAssignment, self).__init__(lhs, lhs, entry) def infer_type(self): return self.entry.type def type_dependencies(self): return () class Argument(NameAssignment): def __init__(self, lhs, rhs, entry): NameAssignment.__init__(self, lhs, rhs, entry) self.is_arg = True class NameDeletion(NameAssignment): def __init__(self, lhs, entry): NameAssignment.__init__(self, lhs, lhs, entry) self.is_deletion = True def infer_type(self): inferred_type = self.rhs.infer_type(self.entry.scope) if (not inferred_type.is_pyobject and inferred_type.can_coerce_to_pyobject(self.entry.scope)): return py_object_type self.inferred_type = inferred_type return inferred_type class Uninitialized(object): """Definitely not initialised yet.""" class Unknown(object): """Coming from outer closure, might be initialised or not.""" class NameReference(object): def __init__(self, node, entry): if node.cf_state is None: node.cf_state = set() self.node = node self.entry = entry self.pos = node.pos def __repr__(self): return '%s(entry=%r)' % (self.__class__.__name__, self.entry) class ControlFlowState(list): # Keeps track of Node's entry assignments # # cf_is_null [boolean] It is uninitialized # cf_maybe_null [boolean] May be uninitialized # is_single [boolean] Has only one assignment at this point cf_maybe_null = False cf_is_null = False is_single = False def __init__(self, state): if Uninitialized in state: state.discard(Uninitialized) self.cf_maybe_null = True if not state: self.cf_is_null = True elif Unknown in state: state.discard(Unknown) self.cf_maybe_null = True else: if len(state) == 1: self.is_single = True # XXX: Remove fake_rhs_expr super(ControlFlowState, self).__init__( [i for i in state if i.rhs is not fake_rhs_expr]) def one(self): return self[0] class GVContext(object): """Graphviz subgraph object.""" def __init__(self): self.blockids = {} self.nextid = 0 self.children = [] self.sources = {} def add(self, child): self.children.append(child) def nodeid(self, block): if block not in self.blockids: self.blockids[block] = 'block%d' % self.nextid self.nextid += 1 return self.blockids[block] def extract_sources(self, block): if not block.positions: return '' start = min(block.positions) stop = max(block.positions) srcdescr = start[0] if not srcdescr in self.sources: self.sources[srcdescr] = list(srcdescr.get_lines()) lines = self.sources[srcdescr] return '\\n'.join([l.strip() for l in lines[start[1] - 1:stop[1]]]) def render(self, fp, name, annotate_defs=False): """Render graphviz dot graph""" fp.write('digraph %s {\n' % name) fp.write(' node [shape=box];\n') for child in self.children: child.render(fp, self, annotate_defs) fp.write('}\n') def escape(self, text): return text.replace('"', '\\"').replace('\n', '\\n') class GV(object): """Graphviz DOT renderer.""" def __init__(self, name, flow): self.name = name self.flow = flow def render(self, fp, ctx, annotate_defs=False): fp.write(' subgraph %s {\n' % self.name) for block in self.flow.blocks: label = ctx.extract_sources(block) if annotate_defs: for stat in block.stats: if isinstance(stat, NameAssignment): label += '\n %s [%s %s]' % ( stat.entry.name, 'deletion' if stat.is_deletion else 'definition', stat.pos[1]) elif isinstance(stat, NameReference): if stat.entry: label += '\n %s [reference %s]' % (stat.entry.name, stat.pos[1]) if not label: label = 'empty' pid = ctx.nodeid(block) fp.write(' %s [label="%s"];\n' % (pid, ctx.escape(label))) for block in self.flow.blocks: pid = ctx.nodeid(block) for child in block.children: fp.write(' %s -> %s;\n' % (pid, ctx.nodeid(child))) fp.write(' }\n') class MessageCollection(object): """Collect error/warnings messages first then sort""" def __init__(self): self.messages = set() def error(self, pos, message): self.messages.add((pos, True, message)) def warning(self, pos, message): self.messages.add((pos, False, message)) def report(self): for pos, is_error, message in sorted(self.messages): if is_error: error(pos, message) else: warning(pos, message, 2) def check_definitions(flow, compiler_directives): flow.initialize() flow.reaching_definitions() # Track down state assignments = set() # Node to entry map references = {} assmt_nodes = set() for block in flow.blocks: i_state = block.i_input for stat in block.stats: i_assmts = flow.assmts[stat.entry] state = flow.map_one(i_state, stat.entry) if isinstance(stat, NameAssignment): stat.lhs.cf_state.update(state) assmt_nodes.add(stat.lhs) i_state = i_state & ~i_assmts.mask if stat.is_deletion: i_state |= i_assmts.bit else: i_state |= stat.bit assignments.add(stat) if stat.rhs is not fake_rhs_expr: stat.entry.cf_assignments.append(stat) elif isinstance(stat, NameReference): references[stat.node] = stat.entry stat.entry.cf_references.append(stat) stat.node.cf_state.update(state) ## if not stat.node.allow_null: ## i_state &= ~i_assmts.bit ## # after successful read, the state is known to be initialised state.discard(Uninitialized) state.discard(Unknown) for assmt in state: assmt.refs.add(stat) # Check variable usage warn_maybe_uninitialized = compiler_directives['warn.maybe_uninitialized'] warn_unused_result = compiler_directives['warn.unused_result'] warn_unused = compiler_directives['warn.unused'] warn_unused_arg = compiler_directives['warn.unused_arg'] messages = MessageCollection() # assignment hints for node in assmt_nodes: if Uninitialized in node.cf_state: node.cf_maybe_null = True if len(node.cf_state) == 1: node.cf_is_null = True else: node.cf_is_null = False elif Unknown in node.cf_state: node.cf_maybe_null = True else: node.cf_is_null = False node.cf_maybe_null = False # Find uninitialized references and cf-hints for node, entry in references.items(): if Uninitialized in node.cf_state: node.cf_maybe_null = True if not entry.from_closure and len(node.cf_state) == 1: node.cf_is_null = True if (node.allow_null or entry.from_closure or entry.is_pyclass_attr or entry.type.is_error): pass # Can be uninitialized here elif node.cf_is_null: if entry.error_on_uninitialized or ( Options.error_on_uninitialized and ( entry.type.is_pyobject or entry.type.is_unspecified)): messages.error( node.pos, "local variable '%s' referenced before assignment" % entry.name) else: messages.warning( node.pos, "local variable '%s' referenced before assignment" % entry.name) elif warn_maybe_uninitialized: messages.warning( node.pos, "local variable '%s' might be referenced before assignment" % entry.name) elif Unknown in node.cf_state: # TODO: better cross-closure analysis to know when inner functions # are being called before a variable is being set, and when # a variable is known to be set before even defining the # inner function, etc. node.cf_maybe_null = True else: node.cf_is_null = False node.cf_maybe_null = False # Unused result for assmt in assignments: if (not assmt.refs and not assmt.entry.is_pyclass_attr and not assmt.entry.in_closure): if assmt.entry.cf_references and warn_unused_result: if assmt.is_arg: messages.warning(assmt.pos, "Unused argument value '%s'" % assmt.entry.name) else: messages.warning(assmt.pos, "Unused result in '%s'" % assmt.entry.name) assmt.lhs.cf_used = False # Unused entries for entry in flow.entries: if (not entry.cf_references and not entry.is_pyclass_attr): if entry.name != '_' and not entry.name.startswith('unused'): # '_' is often used for unused variables, e.g. in loops if entry.is_arg: if warn_unused_arg: messages.warning(entry.pos, "Unused argument '%s'" % entry.name) else: if warn_unused: messages.warning(entry.pos, "Unused entry '%s'" % entry.name) entry.cf_used = False messages.report() for node in assmt_nodes: node.cf_state = ControlFlowState(node.cf_state) for node in references: node.cf_state = ControlFlowState(node.cf_state) class AssignmentCollector(TreeVisitor): def __init__(self): super(AssignmentCollector, self).__init__() self.assignments = [] def visit_Node(self): self._visitchildren(self, None) def visit_SingleAssignmentNode(self, node): self.assignments.append((node.lhs, node.rhs)) def visit_CascadedAssignmentNode(self, node): for lhs in node.lhs_list: self.assignments.append((lhs, node.rhs)) class ControlFlowAnalysis(CythonTransform): def visit_ModuleNode(self, node): self.gv_ctx = GVContext() self.constant_folder = ConstantFolding() # Set of NameNode reductions self.reductions = set() self.in_inplace_assignment = False self.env_stack = [] self.env = node.scope self.stack = [] self.flow = ControlFlow() self.visitchildren(node) check_definitions(self.flow, self.current_directives) dot_output = self.current_directives['control_flow.dot_output'] if dot_output: annotate_defs = self.current_directives['control_flow.dot_annotate_defs'] fp = open(dot_output, 'wt') try: self.gv_ctx.render(fp, 'module', annotate_defs=annotate_defs) finally: fp.close() return node def visit_FuncDefNode(self, node): for arg in node.args: if arg.default: self.visitchildren(arg) self.visitchildren(node, ('decorators',)) self.env_stack.append(self.env) self.env = node.local_scope self.stack.append(self.flow) self.flow = ControlFlow() # Collect all entries for entry in node.local_scope.entries.values(): if self.flow.is_tracked(entry): self.flow.entries.add(entry) self.mark_position(node) # Function body block self.flow.nextblock() for arg in node.args: self._visit(arg) if node.star_arg: self.flow.mark_argument(node.star_arg, TypedExprNode(Builtin.tuple_type, may_be_none=False), node.star_arg.entry) if node.starstar_arg: self.flow.mark_argument(node.starstar_arg, TypedExprNode(Builtin.dict_type, may_be_none=False), node.starstar_arg.entry) self._visit(node.body) # Workaround for generators if node.is_generator: self._visit(node.gbody.body) # Exit point if self.flow.block: self.flow.block.add_child(self.flow.exit_point) # Cleanup graph self.flow.normalize() check_definitions(self.flow, self.current_directives) self.flow.blocks.add(self.flow.entry_point) self.gv_ctx.add(GV(node.local_scope.name, self.flow)) self.flow = self.stack.pop() self.env = self.env_stack.pop() return node def visit_DefNode(self, node): node.used = True return self.visit_FuncDefNode(node) def visit_GeneratorBodyDefNode(self, node): return node def visit_CTypeDefNode(self, node): return node def mark_assignment(self, lhs, rhs=None): if not self.flow.block: return if self.flow.exceptions: exc_descr = self.flow.exceptions[-1] self.flow.block.add_child(exc_descr.entry_point) self.flow.nextblock() if not rhs: rhs = object_expr if lhs.is_name: if lhs.entry is not None: entry = lhs.entry else: entry = self.env.lookup(lhs.name) if entry is None: # TODO: This shouldn't happen... return self.flow.mark_assignment(lhs, rhs, entry) elif lhs.is_sequence_constructor: for i, arg in enumerate(lhs.args): if not rhs or arg.is_starred: item_node = None else: item_node = rhs.inferable_item_node(i) self.mark_assignment(arg, item_node) else: self._visit(lhs) if self.flow.exceptions: exc_descr = self.flow.exceptions[-1] self.flow.block.add_child(exc_descr.entry_point) self.flow.nextblock() def mark_position(self, node): """Mark position if DOT output is enabled.""" if self.current_directives['control_flow.dot_output']: self.flow.mark_position(node) def visit_FromImportStatNode(self, node): for name, target in node.items: if name != "*": self.mark_assignment(target) self.visitchildren(node) return node def visit_AssignmentNode(self, node): raise InternalError("Unhandled assignment node") def visit_SingleAssignmentNode(self, node): self._visit(node.rhs) self.mark_assignment(node.lhs, node.rhs) return node def visit_CascadedAssignmentNode(self, node): self._visit(node.rhs) for lhs in node.lhs_list: self.mark_assignment(lhs, node.rhs) return node def visit_ParallelAssignmentNode(self, node): collector = AssignmentCollector() collector.visitchildren(node) for lhs, rhs in collector.assignments: self._visit(rhs) for lhs, rhs in collector.assignments: self.mark_assignment(lhs, rhs) return node def visit_InPlaceAssignmentNode(self, node): self.in_inplace_assignment = True self.visitchildren(node) self.in_inplace_assignment = False self.mark_assignment(node.lhs, self.constant_folder(node.create_binop_node())) return node def visit_DelStatNode(self, node): for arg in node.args: if arg.is_name: entry = arg.entry or self.env.lookup(arg.name) if entry.in_closure or entry.from_closure: error(arg.pos, "can not delete variable '%s' " "referenced in nested scope" % entry.name) if not node.ignore_nonexisting: self._visit(arg) # mark reference self.flow.mark_deletion(arg, entry) else: self._visit(arg) return node def visit_CArgDeclNode(self, node): entry = self.env.lookup(node.name) if entry: may_be_none = not node.not_none self.flow.mark_argument( node, TypedExprNode(entry.type, may_be_none), entry) return node def visit_NameNode(self, node): if self.flow.block: entry = node.entry or self.env.lookup(node.name) if entry: self.flow.mark_reference(node, entry) if entry in self.reductions and not self.in_inplace_assignment: error(node.pos, "Cannot read reduction variable in loop body") return node def visit_StatListNode(self, node): if self.flow.block: for stat in node.stats: self._visit(stat) if not self.flow.block: stat.is_terminator = True break return node def visit_Node(self, node): self.visitchildren(node) self.mark_position(node) return node def visit_IfStatNode(self, node): next_block = self.flow.newblock() parent = self.flow.block # If clauses for clause in node.if_clauses: parent = self.flow.nextblock(parent) self._visit(clause.condition) self.flow.nextblock() self._visit(clause.body) if self.flow.block: self.flow.block.add_child(next_block) # Else clause if node.else_clause: self.flow.nextblock(parent=parent) self._visit(node.else_clause) if self.flow.block: self.flow.block.add_child(next_block) else: parent.add_child(next_block) if next_block.parents: self.flow.block = next_block else: self.flow.block = None return node def visit_WhileStatNode(self, node): condition_block = self.flow.nextblock() next_block = self.flow.newblock() # Condition block self.flow.loops.append(LoopDescr(next_block, condition_block)) if node.condition: self._visit(node.condition) # Body block self.flow.nextblock() self._visit(node.body) self.flow.loops.pop() # Loop it if self.flow.block: self.flow.block.add_child(condition_block) self.flow.block.add_child(next_block) # Else clause if node.else_clause: self.flow.nextblock(parent=condition_block) self._visit(node.else_clause) if self.flow.block: self.flow.block.add_child(next_block) else: condition_block.add_child(next_block) if next_block.parents: self.flow.block = next_block else: self.flow.block = None return node def mark_forloop_target(self, node): # TODO: Remove redundancy with range optimization... is_special = False sequence = node.iterator.sequence target = node.target if isinstance(sequence, ExprNodes.SimpleCallNode): function = sequence.function if sequence.self is None and function.is_name: entry = self.env.lookup(function.name) if not entry or entry.is_builtin: if function.name == 'reversed' and len(sequence.args) == 1: sequence = sequence.args[0] elif function.name == 'enumerate' and len(sequence.args) == 1: if target.is_sequence_constructor and len(target.args) == 2: iterator = sequence.args[0] if iterator.is_name: iterator_type = iterator.infer_type(self.env) if iterator_type.is_builtin_type: # assume that builtin types have a length within Py_ssize_t self.mark_assignment( target.args[0], ExprNodes.IntNode(target.pos, value='PY_SSIZE_T_MAX', type=PyrexTypes.c_py_ssize_t_type)) target = target.args[1] sequence = sequence.args[0] if isinstance(sequence, ExprNodes.SimpleCallNode): function = sequence.function if sequence.self is None and function.is_name: entry = self.env.lookup(function.name) if not entry or entry.is_builtin: if function.name in ('range', 'xrange'): is_special = True for arg in sequence.args[:2]: self.mark_assignment(target, arg) if len(sequence.args) > 2: self.mark_assignment(target, self.constant_folder( ExprNodes.binop_node(node.pos, '+', sequence.args[0], sequence.args[2]))) if not is_special: # A for-loop basically translates to subsequent calls to # __getitem__(), so using an IndexNode here allows us to # naturally infer the base type of pointers, C arrays, # Python strings, etc., while correctly falling back to an # object type when the base type cannot be handled. self.mark_assignment(target, node.item) def visit_AsyncForStatNode(self, node): return self.visit_ForInStatNode(node) def visit_ForInStatNode(self, node): condition_block = self.flow.nextblock() next_block = self.flow.newblock() # Condition with iterator self.flow.loops.append(LoopDescr(next_block, condition_block)) self._visit(node.iterator) # Target assignment self.flow.nextblock() if isinstance(node, Nodes.ForInStatNode): self.mark_forloop_target(node) elif isinstance(node, Nodes.AsyncForStatNode): # not entirely correct, but good enough for now self.mark_assignment(node.target, node.item) else: # Parallel self.mark_assignment(node.target) # Body block if isinstance(node, Nodes.ParallelRangeNode): # In case of an invalid self._delete_privates(node, exclude=node.target.entry) self.flow.nextblock() self._visit(node.body) self.flow.loops.pop() # Loop it if self.flow.block: self.flow.block.add_child(condition_block) # Else clause if node.else_clause: self.flow.nextblock(parent=condition_block) self._visit(node.else_clause) if self.flow.block: self.flow.block.add_child(next_block) else: condition_block.add_child(next_block) if next_block.parents: self.flow.block = next_block else: self.flow.block = None return node def _delete_privates(self, node, exclude=None): for private_node in node.assigned_nodes: if not exclude or private_node.entry is not exclude: self.flow.mark_deletion(private_node, private_node.entry) def visit_ParallelRangeNode(self, node): reductions = self.reductions # if node.target is None or not a NameNode, an error will have # been previously issued if hasattr(node.target, 'entry'): self.reductions = set(reductions) for private_node in node.assigned_nodes: private_node.entry.error_on_uninitialized = True pos, reduction = node.assignments[private_node.entry] if reduction: self.reductions.add(private_node.entry) node = self.visit_ForInStatNode(node) self.reductions = reductions return node def visit_ParallelWithBlockNode(self, node): for private_node in node.assigned_nodes: private_node.entry.error_on_uninitialized = True self._delete_privates(node) self.visitchildren(node) self._delete_privates(node) return node def visit_ForFromStatNode(self, node): condition_block = self.flow.nextblock() next_block = self.flow.newblock() # Condition with iterator self.flow.loops.append(LoopDescr(next_block, condition_block)) self._visit(node.bound1) self._visit(node.bound2) if node.step is not None: self._visit(node.step) # Target assignment self.flow.nextblock() self.mark_assignment(node.target, node.bound1) if node.step is not None: self.mark_assignment(node.target, self.constant_folder( ExprNodes.binop_node(node.pos, '+', node.bound1, node.step))) # Body block self.flow.nextblock() self._visit(node.body) self.flow.loops.pop() # Loop it if self.flow.block: self.flow.block.add_child(condition_block) # Else clause if node.else_clause: self.flow.nextblock(parent=condition_block) self._visit(node.else_clause) if self.flow.block: self.flow.block.add_child(next_block) else: condition_block.add_child(next_block) if next_block.parents: self.flow.block = next_block else: self.flow.block = None return node def visit_LoopNode(self, node): raise InternalError("Generic loops are not supported") def visit_WithTargetAssignmentStatNode(self, node): self.mark_assignment(node.lhs, node.with_node.enter_call) return node def visit_WithStatNode(self, node): self._visit(node.manager) self._visit(node.enter_call) self._visit(node.body) return node def visit_TryExceptStatNode(self, node): # After exception handling next_block = self.flow.newblock() # Body block self.flow.newblock() # Exception entry point entry_point = self.flow.newblock() self.flow.exceptions.append(ExceptionDescr(entry_point)) self.flow.nextblock() ## XXX: links to exception handling point should be added by ## XXX: children nodes self.flow.block.add_child(entry_point) self.flow.nextblock() self._visit(node.body) self.flow.exceptions.pop() # After exception if self.flow.block: if node.else_clause: self.flow.nextblock() self._visit(node.else_clause) if self.flow.block: self.flow.block.add_child(next_block) for clause in node.except_clauses: self.flow.block = entry_point if clause.pattern: for pattern in clause.pattern: self._visit(pattern) else: # TODO: handle * pattern pass entry_point = self.flow.newblock(parent=self.flow.block) self.flow.nextblock() if clause.target: self.mark_assignment(clause.target) self._visit(clause.body) if self.flow.block: self.flow.block.add_child(next_block) if self.flow.exceptions: entry_point.add_child(self.flow.exceptions[-1].entry_point) if next_block.parents: self.flow.block = next_block else: self.flow.block = None return node def visit_TryFinallyStatNode(self, node): body_block = self.flow.nextblock() # Exception entry point entry_point = self.flow.newblock() self.flow.block = entry_point self._visit(node.finally_except_clause) if self.flow.block and self.flow.exceptions: self.flow.block.add_child(self.flow.exceptions[-1].entry_point) # Normal execution finally_enter = self.flow.newblock() self.flow.block = finally_enter self._visit(node.finally_clause) finally_exit = self.flow.block descr = ExceptionDescr(entry_point, finally_enter, finally_exit) self.flow.exceptions.append(descr) if self.flow.loops: self.flow.loops[-1].exceptions.append(descr) self.flow.block = body_block ## XXX: Is it still required body_block.add_child(entry_point) self.flow.nextblock() self._visit(node.body) self.flow.exceptions.pop() if self.flow.loops: self.flow.loops[-1].exceptions.pop() if self.flow.block: self.flow.block.add_child(finally_enter) if finally_exit: self.flow.block = self.flow.nextblock(parent=finally_exit) else: self.flow.block = None return node def visit_RaiseStatNode(self, node): self.mark_position(node) self.visitchildren(node) if self.flow.exceptions: self.flow.block.add_child(self.flow.exceptions[-1].entry_point) self.flow.block = None return node def visit_ReraiseStatNode(self, node): self.mark_position(node) if self.flow.exceptions: self.flow.block.add_child(self.flow.exceptions[-1].entry_point) self.flow.block = None return node def visit_ReturnStatNode(self, node): self.mark_position(node) self.visitchildren(node) for exception in self.flow.exceptions[::-1]: if exception.finally_enter: self.flow.block.add_child(exception.finally_enter) if exception.finally_exit: exception.finally_exit.add_child(self.flow.exit_point) break else: if self.flow.block: self.flow.block.add_child(self.flow.exit_point) self.flow.block = None return node def visit_BreakStatNode(self, node): if not self.flow.loops: #error(node.pos, "break statement not inside loop") return node loop = self.flow.loops[-1] self.mark_position(node) for exception in loop.exceptions[::-1]: if exception.finally_enter: self.flow.block.add_child(exception.finally_enter) if exception.finally_exit: exception.finally_exit.add_child(loop.next_block) break else: self.flow.block.add_child(loop.next_block) self.flow.block = None return node def visit_ContinueStatNode(self, node): if not self.flow.loops: #error(node.pos, "continue statement not inside loop") return node loop = self.flow.loops[-1] self.mark_position(node) for exception in loop.exceptions[::-1]: if exception.finally_enter: self.flow.block.add_child(exception.finally_enter) if exception.finally_exit: exception.finally_exit.add_child(loop.loop_block) break else: self.flow.block.add_child(loop.loop_block) self.flow.block = None return node def visit_ComprehensionNode(self, node): if node.expr_scope: self.env_stack.append(self.env) self.env = node.expr_scope # Skip append node here self._visit(node.loop) if node.expr_scope: self.env = self.env_stack.pop() return node def visit_ScopedExprNode(self, node): if node.expr_scope: self.env_stack.append(self.env) self.env = node.expr_scope self.visitchildren(node) if node.expr_scope: self.env = self.env_stack.pop() return node def visit_PyClassDefNode(self, node): self.visitchildren(node, ('dict', 'metaclass', 'mkw', 'bases', 'class_result')) self.flow.mark_assignment(node.target, node.classobj, self.env.lookup(node.name)) self.env_stack.append(self.env) self.env = node.scope self.flow.nextblock() self.visitchildren(node, ('body',)) self.flow.nextblock() self.env = self.env_stack.pop() return node def visit_AmpersandNode(self, node): if node.operand.is_name: # Fake assignment to silence warning self.mark_assignment(node.operand, fake_rhs_expr) self.visitchildren(node) return node
apache-2.0
128,987,482,302,917,060
33.639756
107
0.559122
false
yytang2012/novels-crawler
novelsCrawler/spiders/m-yushuwu.py
1
2338
# -*- coding: utf-8 -*- from scrapy import Selector from libs.misc import get_spider_name_from_domain from libs.polish import * from novelsCrawler.spiders.simpleSpider import SimpleSpider class MyushuwuSpider(SimpleSpider): """ classdocs example: https://m.yushuwu.com/novel/31960.html """ dom = 'm.yushuwu.com' name = get_spider_name_from_domain(dom) allowed_domains = [dom] def parse_title(self, response): sel = Selector(response) title = sel.xpath('//title/text()').extract()[0] title = title.split('_')[0] title = polish_title(title, self.name) print(title) return title def parse_subtitle_contents(self, response): sel = Selector(response) subtitle = sel.xpath('//h1/text()').extract()[0] subtitle = polish_subtitle(subtitle) contents = sel.xpath('//div[@id="nr1"]/p/text()').extract() contents = polish_content(contents) return subtitle, contents def url_check(self, url): # pattern = 'http://m.lwxs.com/wapbook/([\d]+).html' # m = re.search(pattern, url) # if m is not None: # return 'http://m.lwxs.com/wapbook/{0}_1/'.format(m.group(1)) return url def get_next_page_url(self, response): sel = Selector(response) tmp = sel.xpath('//div[@class="nr_page"]/table/tr') next_page_url = tmp.xpath('td[@class="next"]/a/@href').extract()[0] mulu = tmp.xpath('td[@class="mulu"]/a/@href').extract()[0] next_page_url = None if next_page_url == mulu else response.urljoin(next_page_url) return next_page_url def get_pages_url(self, response): sel = Selector(response) pages_info_sel = sel.xpath('//div[@class="lb_mulu chapterList"]/ul/li/a') last_page_id = int(pages_info_sel[-1].xpath('text()').extract()[0]) pages_url = [None for i in range(last_page_id)] for page_info_sel in pages_info_sel: try: page_id = int(page_info_sel.xpath('text()').extract()[0]) subtitle_url = page_info_sel.xpath('@href').extract()[0] subtitle_url = response.urljoin(subtitle_url) pages_url[page_id-1] = subtitle_url except Exception as e: pass return pages_url
mit
542,870,649,504,191,600
34.424242
90
0.587254
false
vuolter/pyload
src/pyload/plugins/downloaders/XdadevelopersCom.py
1
1413
# -*- coding: utf-8 -* # # Test links: # http://forum.xda-developers.com/devdb/project/dl/?id=10885 from ..base.simple_downloader import SimpleDownloader class XdadevelopersCom(SimpleDownloader): __name__ = "XdadevelopersCom" __type__ = "downloader" __version__ = "0.08" __status__ = "testing" __pattern__ = ( r"https?://(?:www\.)?forum\.xda-developers\.com/devdb/project/dl/\?id=\d+" ) __config__ = [ ("enabled", "bool", "Activated", True), ("use_premium", "bool", "Use premium account if available", True), ("fallback", "bool", "Fallback to free download if premium fails", True), ("chk_filesize", "bool", "Check file size", True), ("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10), ] __description__ = """Xda-developers.com downloader plugin""" __license__ = "GPLv3" __authors__ = [("zapp-brannigan", "[email protected]")] NAME_PATTERN = r"<label>Filename:</label>\s*<div>\s*(?P<N>.*?)\n" SIZE_PATTERN = r"<label>Size:</label>\s*<div>\s*(?P<S>[\d.,]+)(?P<U>[\w^_]+)" OFFLINE_PATTERN = r"</i> Device Filter</h3>" def setup(self): self.multi_dl = True self.resume_download = True self.chunk_limit = 1 def handle_free(self, pyfile): # TODO: Revert to `get={'task': "get"}` in 0.6.x self.link = pyfile.url + "&task=get"
agpl-3.0
-224,362,435,025,899,740
31.860465
85
0.571833
false
posita/dropbox-sdk-python
dropbox/stone_validators.py
2
24413
""" Defines classes to represent each Stone type in Python. These classes should be used to validate Python objects and normalize them for a given type. The data types defined here should not be specific to an RPC or serialization format. This module should be dropped into a project that requires the use of Stone. In the future, this could be imported from a pre-installed Python package, rather than being added to a project. """ from __future__ import absolute_import, unicode_literals from abc import ABCMeta, abstractmethod import datetime import hashlib import math import numbers import re import six _MYPY = False if _MYPY: import typing # noqa: F401 # pylint: disable=import-error,unused-import,useless-suppression # See <http://python3porting.com/differences.html#buffer> if six.PY3: _binary_types = (bytes, memoryview) # noqa: E501,F821 # pylint: disable=undefined-variable,useless-suppression else: _binary_types = (bytes, buffer) # noqa: E501,F821 # pylint: disable=undefined-variable,useless-suppression class ValidationError(Exception): """Raised when a value doesn't pass validation by its validator.""" def __init__(self, message, parent=None): """ Args: message (str): Error message detailing validation failure. parent (str): Adds the parent as the closest reference point for the error. Use :meth:`add_parent` to add more. """ super(ValidationError, self).__init__(message) self.message = message self._parents = [] if parent: self._parents.append(parent) def add_parent(self, parent): """ Args: parent (str): Adds the parent to the top of the tree of references that lead to the validator that failed. """ self._parents.append(parent) def __str__(self): """ Returns: str: A descriptive message of the validation error that may also include the path to the validator that failed. """ if self._parents: return '{}: {}'.format('.'.join(self._parents[::-1]), self.message) else: return self.message def __repr__(self): # Not a perfect repr, but includes the error location information. return 'ValidationError(%r)' % six.text_type(self) def generic_type_name(v): """Return a descriptive type name that isn't Python specific. For example, an int value will return 'integer' rather than 'int'.""" if isinstance(v, numbers.Integral): # Must come before real numbers check since integrals are reals too return 'integer' elif isinstance(v, numbers.Real): return 'float' elif isinstance(v, (tuple, list)): return 'list' elif isinstance(v, six.string_types): return 'string' elif v is None: return 'null' else: return type(v).__name__ class Validator(object): """All primitive and composite data types should be a subclass of this.""" __metaclass__ = ABCMeta @abstractmethod def validate(self, val): """Validates that val is of this data type. Returns: A normalized value if validation succeeds. Raises: ValidationError """ pass def has_default(self): return False def get_default(self): raise AssertionError('No default available.') class Primitive(Validator): """A basic type that is defined by Stone.""" # pylint: disable=abstract-method pass class Boolean(Primitive): def validate(self, val): if not isinstance(val, bool): raise ValidationError('%r is not a valid boolean' % val) return val class Integer(Primitive): """ Do not use this class directly. Extend it and specify a 'minimum' and 'maximum' value as class variables for a more restrictive integer range. """ minimum = None # type: typing.Optional[int] maximum = None # type: typing.Optional[int] def __init__(self, min_value=None, max_value=None): """ A more restrictive minimum or maximum value can be specified than the range inherent to the defined type. """ if min_value is not None: assert isinstance(min_value, numbers.Integral), \ 'min_value must be an integral number' assert min_value >= self.minimum, \ 'min_value cannot be less than the minimum value for this ' \ 'type (%d < %d)' % (min_value, self.minimum) self.minimum = min_value if max_value is not None: assert isinstance(max_value, numbers.Integral), \ 'max_value must be an integral number' assert max_value <= self.maximum, \ 'max_value cannot be greater than the maximum value for ' \ 'this type (%d < %d)' % (max_value, self.maximum) self.maximum = max_value def validate(self, val): if not isinstance(val, numbers.Integral): raise ValidationError('expected integer, got %s' % generic_type_name(val)) elif not (self.minimum <= val <= self.maximum): raise ValidationError('%d is not within range [%d, %d]' % (val, self.minimum, self.maximum)) return val def __repr__(self): return '%s()' % self.__class__.__name__ class Int32(Integer): minimum = -2**31 maximum = 2**31 - 1 class UInt32(Integer): minimum = 0 maximum = 2**32 - 1 class Int64(Integer): minimum = -2**63 maximum = 2**63 - 1 class UInt64(Integer): minimum = 0 maximum = 2**64 - 1 class Real(Primitive): """ Do not use this class directly. Extend it and optionally set a 'minimum' and 'maximum' value to enforce a range that's a subset of the Python float implementation. Python floats are doubles. """ minimum = None # type: typing.Optional[float] maximum = None # type: typing.Optional[float] def __init__(self, min_value=None, max_value=None): """ A more restrictive minimum or maximum value can be specified than the range inherent to the defined type. """ if min_value is not None: assert isinstance(min_value, numbers.Real), \ 'min_value must be a real number' if not isinstance(min_value, float): try: min_value = float(min_value) except OverflowError: raise AssertionError('min_value is too small for a float') if self.minimum is not None and min_value < self.minimum: raise AssertionError('min_value cannot be less than the ' 'minimum value for this type (%f < %f)' % (min_value, self.minimum)) self.minimum = min_value if max_value is not None: assert isinstance(max_value, numbers.Real), \ 'max_value must be a real number' if not isinstance(max_value, float): try: max_value = float(max_value) except OverflowError: raise AssertionError('max_value is too large for a float') if self.maximum is not None and max_value > self.maximum: raise AssertionError('max_value cannot be greater than the ' 'maximum value for this type (%f < %f)' % (max_value, self.maximum)) self.maximum = max_value def validate(self, val): if not isinstance(val, numbers.Real): raise ValidationError('expected real number, got %s' % generic_type_name(val)) if not isinstance(val, float): # This checks for the case where a number is passed in with a # magnitude larger than supported by float64. try: val = float(val) except OverflowError: raise ValidationError('too large for float') if math.isnan(val) or math.isinf(val): raise ValidationError('%f values are not supported' % val) if self.minimum is not None and val < self.minimum: raise ValidationError('%f is not greater than %f' % (val, self.minimum)) if self.maximum is not None and val > self.maximum: raise ValidationError('%f is not less than %f' % (val, self.maximum)) return val def __repr__(self): return '%s()' % self.__class__.__name__ class Float32(Real): # Maximum and minimums from the IEEE 754-1985 standard minimum = -3.40282 * 10**38 maximum = 3.40282 * 10**38 class Float64(Real): pass class String(Primitive): """Represents a unicode string.""" def __init__(self, min_length=None, max_length=None, pattern=None): if min_length is not None: assert isinstance(min_length, numbers.Integral), \ 'min_length must be an integral number' assert min_length >= 0, 'min_length must be >= 0' if max_length is not None: assert isinstance(max_length, numbers.Integral), \ 'max_length must be an integral number' assert max_length > 0, 'max_length must be > 0' if min_length and max_length: assert max_length >= min_length, 'max_length must be >= min_length' if pattern is not None: assert isinstance(pattern, six.string_types), \ 'pattern must be a string' self.min_length = min_length self.max_length = max_length self.pattern = pattern self.pattern_re = None if pattern: try: self.pattern_re = re.compile(r"\A(?:" + pattern + r")\Z") except re.error as e: raise AssertionError('Regex {!r} failed: {}'.format( pattern, e.args[0])) def validate(self, val): """ A unicode string of the correct length and pattern will pass validation. In PY2, we enforce that a str type must be valid utf-8, and a unicode string will be returned. """ if not isinstance(val, six.string_types): raise ValidationError("'%s' expected to be a string, got %s" % (val, generic_type_name(val))) if not six.PY3 and isinstance(val, str): try: val = val.decode('utf-8') except UnicodeDecodeError: raise ValidationError("'%s' was not valid utf-8") if self.max_length is not None and len(val) > self.max_length: raise ValidationError("'%s' must be at most %d characters, got %d" % (val, self.max_length, len(val))) if self.min_length is not None and len(val) < self.min_length: raise ValidationError("'%s' must be at least %d characters, got %d" % (val, self.min_length, len(val))) if self.pattern and not self.pattern_re.match(val): raise ValidationError("'%s' did not match pattern '%s'" % (val, self.pattern)) return val class Bytes(Primitive): def __init__(self, min_length=None, max_length=None): if min_length is not None: assert isinstance(min_length, numbers.Integral), \ 'min_length must be an integral number' assert min_length >= 0, 'min_length must be >= 0' if max_length is not None: assert isinstance(max_length, numbers.Integral), \ 'max_length must be an integral number' assert max_length > 0, 'max_length must be > 0' if min_length is not None and max_length is not None: assert max_length >= min_length, 'max_length must be >= min_length' self.min_length = min_length self.max_length = max_length def validate(self, val): if not isinstance(val, _binary_types): raise ValidationError("expected bytes type, got %s" % generic_type_name(val)) elif self.max_length is not None and len(val) > self.max_length: raise ValidationError("'%s' must have at most %d bytes, got %d" % (val, self.max_length, len(val))) elif self.min_length is not None and len(val) < self.min_length: raise ValidationError("'%s' has fewer than %d bytes, got %d" % (val, self.min_length, len(val))) return val class Timestamp(Primitive): """Note that while a format is specified, it isn't used in validation since a native Python datetime object is preferred. The format, however, can and should be used by serializers.""" def __init__(self, fmt): """fmt must be composed of format codes that the C standard (1989) supports, most notably in its strftime() function.""" assert isinstance(fmt, six.text_type), 'format must be a string' self.format = fmt def validate(self, val): if not isinstance(val, datetime.datetime): raise ValidationError('expected timestamp, got %s' % generic_type_name(val)) elif val.tzinfo is not None and \ val.tzinfo.utcoffset(val).total_seconds() != 0: raise ValidationError('timestamp should have either a UTC ' 'timezone or none set at all') return val class Composite(Validator): """Validator for a type that builds on other primitive and composite types.""" # pylint: disable=abstract-method pass class List(Composite): """Assumes list contents are homogeneous with respect to types.""" def __init__(self, item_validator, min_items=None, max_items=None): """Every list item will be validated with item_validator.""" self.item_validator = item_validator if min_items is not None: assert isinstance(min_items, numbers.Integral), \ 'min_items must be an integral number' assert min_items >= 0, 'min_items must be >= 0' if max_items is not None: assert isinstance(max_items, numbers.Integral), \ 'max_items must be an integral number' assert max_items > 0, 'max_items must be > 0' if min_items is not None and max_items is not None: assert max_items >= min_items, 'max_items must be >= min_items' self.min_items = min_items self.max_items = max_items def validate(self, val): if not isinstance(val, (tuple, list)): raise ValidationError('%r is not a valid list' % val) elif self.max_items is not None and len(val) > self.max_items: raise ValidationError('%r has more than %s items' % (val, self.max_items)) elif self.min_items is not None and len(val) < self.min_items: raise ValidationError('%r has fewer than %s items' % (val, self.min_items)) return [self.item_validator.validate(item) for item in val] class Map(Composite): """Assumes map keys and values are homogeneous with respect to types.""" def __init__(self, key_validator, value_validator): """ Every Map key/value pair will be validated with item_validator. key validators must be a subclass of a String validator """ self.key_validator = key_validator self.value_validator = value_validator def validate(self, val): if not isinstance(val, dict): raise ValidationError('%r is not a valid dict' % val) return { self.key_validator.validate(key): self.value_validator.validate(value) for key, value in val.items() } class Struct(Composite): def __init__(self, definition): """ Args: definition (class): A generated class representing a Stone struct from a spec. Must have a _fields_ attribute with the following structure: _fields_ = [(field_name, validator), ...] where field_name: Name of the field (str). validator: Validator object. """ super(Struct, self).__init__() self.definition = definition def validate(self, val): """ For a val to pass validation, val must be of the correct type and have all required fields present. """ self.validate_type_only(val) self.validate_fields_only(val) return val def validate_with_permissions(self, val, caller_permissions): """ For a val to pass validation, val must be of the correct type and have all required permissioned fields present. Should only be called for callers with extra permissions. """ self.validate(val) self.validate_fields_only_with_permissions(val, caller_permissions) return val def validate_fields_only(self, val): """ To pass field validation, no required field should be missing. This method assumes that the contents of each field have already been validated on assignment, so it's merely a presence check. FIXME(kelkabany): Since the definition object does not maintain a list of which fields are required, all fields are scanned. """ for field_name in self.definition._all_field_names_: if not hasattr(val, field_name): raise ValidationError("missing required field '%s'" % field_name) def validate_fields_only_with_permissions(self, val, caller_permissions): """ To pass field validation, no required field should be missing. This method assumes that the contents of each field have already been validated on assignment, so it's merely a presence check. Should only be called for callers with extra permissions. """ self.validate_fields_only(val) # check if type has been patched for extra_permission in caller_permissions.permissions: all_field_names = '_all_{}_field_names_'.format(extra_permission) for field_name in getattr(self.definition, all_field_names, set()): if not hasattr(val, field_name): raise ValidationError("missing required field '%s'" % field_name) def validate_type_only(self, val): """ Use this when you only want to validate that the type of an object is correct, but not yet validate each field. """ # Since the definition maintains the list of fields for serialization, # we're okay with a subclass that might have extra information. This # makes it easier to return one subclass for two routes, one of which # relies on the parent class. if not isinstance(val, self.definition): raise ValidationError('expected type %s, got %s' % (self.definition.__name__, generic_type_name(val))) def has_default(self): return not self.definition._has_required_fields def get_default(self): assert not self.definition._has_required_fields, 'No default available.' return self.definition() class StructTree(Struct): """Validator for structs with enumerated subtypes. NOTE: validate_fields_only() validates the fields known to this base struct, but does not do any validation specific to the subtype. """ # See PyCQA/pylint#1043 for why this is disabled; this should show up # as a usless-suppression (and can be removed) once a fix is released def __init__(self, definition): # pylint: disable=useless-super-delegation super(StructTree, self).__init__(definition) class Union(Composite): def __init__(self, definition): """ Args: definition (class): A generated class representing a Stone union from a spec. Must have a _tagmap attribute with the following structure: _tagmap = {field_name: validator, ...} where field_name (str): Tag name. validator (Validator): Tag value validator. """ self.definition = definition def validate(self, val): """ For a val to pass validation, it must have a _tag set. This assumes that the object validated that _tag is a valid tag, and that any associated value has also been validated. """ self.validate_type_only(val) if not hasattr(val, '_tag') or val._tag is None: raise ValidationError('no tag set') return val def validate_type_only(self, val): """ Use this when you only want to validate that the type of an object is correct, but not yet validate each field. We check whether val is a Python parent class of the definition. This is because Union subtyping works in the opposite direction of Python inheritance. For example, if a union U2 extends U1 in Python, this validator will accept U1 in places where U2 is expected. """ if not issubclass(self.definition, type(val)): raise ValidationError('expected type %s or subtype, got %s' % (self.definition.__name__, generic_type_name(val))) class Void(Primitive): def validate(self, val): if val is not None: raise ValidationError('expected NoneType, got %s' % generic_type_name(val)) def has_default(self): return True def get_default(self): return None class Nullable(Validator): def __init__(self, validator): assert isinstance(validator, (Primitive, Composite)), \ 'validator must be for a primitive or composite type' assert not isinstance(validator, Nullable), \ 'nullables cannot be stacked' assert not isinstance(validator, Void), \ 'void cannot be made nullable' self.validator = validator def validate(self, val): if val is None: return else: return self.validator.validate(val) def validate_type_only(self, val): """Use this only if Nullable is wrapping a Composite.""" if val is None: return else: return self.validator.validate_type_only(val) def has_default(self): return True def get_default(self): return None class Redactor(object): def __init__(self, regex): """ Args: regex: What parts of the field to redact. """ self.regex = regex @abstractmethod def apply(self, val): """Redacts information from annotated field. Returns: A redacted version of the string provided. """ pass def _get_matches(self, val): if not self.regex: return None try: return re.search(self.regex, val) except TypeError: return None class HashRedactor(Redactor): def apply(self, val): matches = self._get_matches(val) val_to_hash = str(val) if isinstance(val, int) or isinstance(val, float) else val try: # add string literal to ensure unicode hashed = hashlib.md5(val_to_hash.encode('utf-8')).hexdigest() + '' except [AttributeError, ValueError]: hashed = None if matches: blotted = '***'.join(matches.groups()) if hashed: return '{} ({})'.format(hashed, blotted) return blotted return hashed class BlotRedactor(Redactor): def apply(self, val): matches = self._get_matches(val) if matches: return '***'.join(matches.groups()) return '********'
mit
-2,067,160,673,419,353,900
35.437313
115
0.588334
false
theicfire/djangofun
djangotoolbox/tests.py
6
14370
from .fields import ListField, SetField, DictField, EmbeddedModelField from django.db import models, connections from django.db.models import Q from django.db.models.signals import post_save from django.db.utils import DatabaseError from django.dispatch.dispatcher import receiver from django.test import TestCase from django.utils import unittest def count_calls(func): def wrapper(*args, **kwargs): wrapper.calls += 1 return func(*args, **kwargs) wrapper.calls = 0 return wrapper class Target(models.Model): index = models.IntegerField() class Source(models.Model): target = models.ForeignKey(Target) index = models.IntegerField() class ListModel(models.Model): integer = models.IntegerField(primary_key=True) floating_point = models.FloatField() names = ListField(models.CharField(max_length=500)) names_with_default = ListField(models.CharField(max_length=500), default=[]) names_nullable = ListField(models.CharField(max_length=500), null=True) class OrderedListModel(models.Model): ordered_ints = ListField(models.IntegerField(max_length=500), default=[], ordering=count_calls(lambda x:x), null=True) ordered_nullable = ListField(ordering=lambda x:x, null=True) class SetModel(models.Model): setfield = SetField(models.IntegerField()) supports_dicts = getattr(connections['default'].features, 'supports_dicts', False) if supports_dicts: class DictModel(models.Model): dictfield = DictField(models.IntegerField()) dictfield_nullable = DictField(null=True) auto_now = DictField(models.DateTimeField(auto_now=True)) class EmbeddedModelFieldModel(models.Model): simple = EmbeddedModelField('EmbeddedModel', null=True) simple_untyped = EmbeddedModelField(null=True) typed_list = ListField(EmbeddedModelField('SetModel')) untyped_list = ListField(EmbeddedModelField()) untyped_dict = DictField(EmbeddedModelField()) ordered_list = ListField(EmbeddedModelField(), ordering=lambda obj: obj.index) class EmbeddedModel(models.Model): some_relation = models.ForeignKey(DictModel, null=True) someint = models.IntegerField() auto_now = models.DateTimeField(auto_now=True) auto_now_add = models.DateTimeField(auto_now_add=True) class FilterTest(TestCase): floats = [5.3, 2.6, 9.1, 1.58] names = [u'Kakashi', u'Naruto', u'Sasuke', u'Sakura',] unordered_ints = [4, 2, 6, 1] def setUp(self): for i, float in enumerate(FilterTest.floats): ListModel(integer=i+1, floating_point=float, names=FilterTest.names[:i+1]).save() def test_startswith(self): self.assertEquals(dict([(entity.pk, entity.names) for entity in ListModel.objects.filter(names__startswith='Sa')]), dict([(3, ['Kakashi', 'Naruto', 'Sasuke',]), (4, ['Kakashi', 'Naruto', 'Sasuke', 'Sakura',]), ])) def test_options(self): self.assertEqual([entity.names_with_default for entity in ListModel.objects.filter(names__startswith='Sa')], [[], []]) self.assertEqual([entity.names_nullable for entity in ListModel.objects.filter(names__startswith='Sa')], [None, None]) def test_default_value(self): # Make sure default value is copied ListModel().names_with_default.append(2) self.assertEqual(ListModel().names_with_default, []) def test_ordering(self): f = OrderedListModel._meta.fields[1] f.ordering.calls = 0 # ensure no ordering happens on assignment obj = OrderedListModel() obj.ordered_ints = self.unordered_ints self.assertEqual(f.ordering.calls, 0) obj.save() self.assertEqual(OrderedListModel.objects.get().ordered_ints, sorted(self.unordered_ints)) # ordering should happen only once, i.e. the order function may be # called N times at most (N being the number of items in the list) self.assertLessEqual(f.ordering.calls, len(self.unordered_ints)) def test_gt(self): # test gt on list self.assertEquals(dict([(entity.pk, entity.names) for entity in ListModel.objects.filter(names__gt='Kakashi')]), dict([(2, [u'Kakashi', u'Naruto',]), (3, [u'Kakashi', u'Naruto', u'Sasuke',]), (4, [u'Kakashi', u'Naruto', u'Sasuke', u'Sakura',]), ])) def test_lt(self): # test lt on list self.assertEquals(dict([(entity.pk, entity.names) for entity in ListModel.objects.filter(names__lt='Naruto')]), dict([(1, [u'Kakashi',]), (2, [u'Kakashi', u'Naruto',]), (3, [u'Kakashi', u'Naruto', u'Sasuke',]), (4, [u'Kakashi', u'Naruto', u'Sasuke', u'Sakura',]), ])) def test_gte(self): # test gte on list self.assertEquals(dict([(entity.pk, entity.names) for entity in ListModel.objects.filter(names__gte='Sakura')]), dict([(3, [u'Kakashi', u'Naruto', u'Sasuke',]), (4, [u'Kakashi', u'Naruto', u'Sasuke', u'Sakura',]), ])) def test_lte(self): # test lte on list self.assertEquals(dict([(entity.pk, entity.names) for entity in ListModel.objects.filter(names__lte='Kakashi')]), dict([(1, [u'Kakashi',]), (2, [u'Kakashi', u'Naruto',]), (3, [u'Kakashi', u'Naruto', u'Sasuke',]), (4, [u'Kakashi', u'Naruto', u'Sasuke', u'Sakura',]), ])) def test_equals(self): # test equality filter on list self.assertEquals([entity.names for entity in ListModel.objects.filter(names='Sakura')], [[u'Kakashi', u'Naruto', u'Sasuke', u'Sakura',]]) # test with additonal pk filter (for DBs that have special pk queries) query = ListModel.objects.filter(names='Sakura') self.assertEquals(query.get(pk=query[0].pk).names, [u'Kakashi', u'Naruto', u'Sasuke', u'Sakura',]) def test_is_null(self): self.assertEquals(ListModel.objects.filter( names__isnull=True).count(), 0) def test_exclude(self): self.assertEquals(dict([(entity.pk, entity.names) for entity in ListModel.objects.all().exclude( names__lt='Sakura')]), dict([(3, [u'Kakashi', u'Naruto', u'Sasuke',]), (4, [u'Kakashi', u'Naruto', u'Sasuke', u'Sakura',]), ])) def test_chained_filter(self): self.assertEquals([entity.names for entity in ListModel.objects.filter(names='Sasuke').filter( names='Sakura')], [['Kakashi', 'Naruto', 'Sasuke', 'Sakura'],]) self.assertEquals([entity.names for entity in ListModel.objects.filter(names__startswith='Sa').filter( names='Sakura')], [['Kakashi', 'Naruto', 'Sasuke', 'Sakura']]) # test across multiple columns. On app engine only one filter is allowed # to be an inequality filter self.assertEquals([entity.names for entity in ListModel.objects.filter(floating_point=9.1).filter( names__startswith='Sa')], [['Kakashi', 'Naruto', 'Sasuke',],]) def test_setfield(self): setdata = [1, 2, 3, 2, 1] # At the same time test value conversion SetModel(setfield=map(str, setdata)).save() item = SetModel.objects.filter(setfield=3)[0] self.assertEqual(item.setfield, set(setdata)) # This shouldn't raise an error because the default value is # an empty list SetModel().save() @unittest.skipIf(not supports_dicts, "Backend doesn't support dicts") def test_dictfield(self): DictModel(dictfield=dict(a=1, b='55', foo=3.14), auto_now={'a' : None}).save() item = DictModel.objects.get() self.assertEqual(item.dictfield, {u'a' : 1, u'b' : 55, u'foo' : 3}) dt = item.auto_now['a'] self.assertNotEqual(dt, None) item.save() self.assertGreater(DictModel.objects.get().auto_now['a'], dt) # This shouldn't raise an error becaues the default value is # an empty dict DictModel().save() @unittest.skip('Fails with GAE SDK, but passes on production') def test_Q_objects(self): self.assertEquals([entity.names for entity in ListModel.objects.exclude(Q(names__lt='Sakura') | Q(names__gte='Sasuke'))], [['Kakashi', 'Naruto', 'Sasuke', 'Sakura']]) class BaseModel(models.Model): pass class ExtendedModel(BaseModel): name = models.CharField(max_length=20) class BaseModelProxy(BaseModel): class Meta: proxy = True class ExtendedModelProxy(ExtendedModel): class Meta: proxy = True class ProxyTest(TestCase): def test_proxy(self): list(BaseModelProxy.objects.all()) def test_proxy_with_inheritance(self): self.assertRaises(DatabaseError, lambda: list(ExtendedModelProxy.objects.all())) class EmbeddedModelFieldTest(TestCase): def _simple_instance(self): EmbeddedModelFieldModel.objects.create(simple=EmbeddedModel(someint='5')) return EmbeddedModelFieldModel.objects.get() def test_simple(self): instance = self._simple_instance() self.assertIsInstance(instance.simple, EmbeddedModel) # Make sure get_prep_value is called: self.assertEqual(instance.simple.someint, 5) # AutoFields' values should not be populated: self.assertEqual(instance.simple.id, None) def test_pre_save(self): # Make sure field.pre_save is called instance = self._simple_instance() self.assertNotEqual(instance.simple.auto_now, None) self.assertNotEqual(instance.simple.auto_now_add, None) auto_now = instance.simple.auto_now auto_now_add = instance.simple.auto_now_add instance.save() instance = EmbeddedModelFieldModel.objects.get() # auto_now_add shouldn't have changed now, but auto_now should. self.assertEqual(instance.simple.auto_now_add, auto_now_add) self.assertGreater(instance.simple.auto_now, auto_now) def test_error_messages(self): for kwargs in ( {'simple_untyped' : 42}, {'simple' : 42} ): self.assertRaisesRegexp(TypeError, "Expected instance of type", EmbeddedModelFieldModel(**kwargs).save) def test_typed_listfield(self): EmbeddedModelFieldModel.objects.create( typed_list=[SetModel(setfield=range(3)), SetModel(setfield=range(9))], ordered_list=[Target(index=i) for i in xrange(5, 0, -1)] ) obj = EmbeddedModelFieldModel.objects.get() self.assertIn(5, obj.typed_list[1].setfield) self.assertEqual([target.index for target in obj.ordered_list], range(1, 6)) def test_untyped_listfield(self): EmbeddedModelFieldModel.objects.create(untyped_list=[ EmbeddedModel(someint=7), OrderedListModel(ordered_ints=range(5, 0, -1)), SetModel(setfield=[1, 2, 2, 3]) ]) instances = EmbeddedModelFieldModel.objects.get().untyped_list for instance, cls in zip(instances, [EmbeddedModel, OrderedListModel, SetModel]): self.assertIsInstance(instance, cls) self.assertNotEqual(instances[0].auto_now, None) self.assertEqual(instances[1].ordered_ints, range(1, 6)) def test_untyped_dict(self): EmbeddedModelFieldModel.objects.create(untyped_dict={ 'a' : SetModel(setfield=range(3)), 'b' : DictModel(dictfield={'a' : 1, 'b' : 2}), 'c' : DictModel(dictfield={}, auto_now={'y' : 1}) }) data = EmbeddedModelFieldModel.objects.get().untyped_dict self.assertIsInstance(data['a'], SetModel) self.assertNotEqual(data['c'].auto_now['y'], None) def test_foreignkey_in_embedded_object(self): simple = EmbeddedModel(some_relation=DictModel.objects.create()) obj = EmbeddedModelFieldModel.objects.create(simple=simple) simple = EmbeddedModelFieldModel.objects.get().simple self.assertNotIn('some_relation', simple.__dict__) self.assertIsInstance(simple.__dict__['some_relation_id'], type(obj.id)) self.assertIsInstance(simple.some_relation, DictModel) EmbeddedModelFieldTest = unittest.skipIf( not supports_dicts, "Backend doesn't support dicts")( EmbeddedModelFieldTest) class SignalTest(TestCase): def test_post_save(self): created = [] @receiver(post_save, sender=SetModel) def handle(**kwargs): created.append(kwargs['created']) SetModel().save() self.assertEqual(created, [True]) SetModel.objects.get().save() self.assertEqual(created, [True, False]) qs = SetModel.objects.all() list(qs)[0].save() self.assertEqual(created, [True, False, False]) list(qs)[0].save() self.assertEqual(created, [True, False, False, False]) list(qs.select_related())[0].save() self.assertEqual(created, [True, False, False, False, False]) class SelectRelatedTest(TestCase): def test_select_related(self): target = Target(index=5) target.save() Source(target=target, index=8).save() source = Source.objects.all().select_related()[0] self.assertEqual(source.target.pk, target.pk) self.assertEqual(source.target.index, target.index) source = Source.objects.all().select_related('target')[0] self.assertEqual(source.target.pk, target.pk) self.assertEqual(source.target.index, target.index)
bsd-3-clause
-1,283,027,664,947,976,000
42.153153
93
0.603897
false
wwright2/dcim3-angstrom1
sources/bitbake/lib/bb/fetch2/perforce.py
3
5987
# ex:ts=4:sw=4:sts=4:et # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- """ BitBake 'Fetch' implementations Classes for obtaining upstream sources for the BitBake build tools. """ # Copyright (C) 2003, 2004 Chris Larson # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # Based on functions from the base bb module, Copyright 2003 Holger Schurig from future_builtins import zip import os import subprocess import logging import bb from bb import data from bb.fetch2 import FetchMethod from bb.fetch2 import FetchError from bb.fetch2 import logger from bb.fetch2 import runfetchcmd class Perforce(FetchMethod): def supports(self, ud, d): return ud.type in ['p4'] def doparse(url, d): parm = {} path = url.split("://")[1] delim = path.find("@"); if delim != -1: (user, pswd, host, port) = path.split('@')[0].split(":") path = path.split('@')[1] else: (host, port) = d.getVar('P4PORT').split(':') user = "" pswd = "" if path.find(";") != -1: keys=[] values=[] plist = path.split(';') for item in plist: if item.count('='): (key, value) = item.split('=') keys.append(key) values.append(value) parm = dict(zip(keys, values)) path = "//" + path.split(';')[0] host += ":%s" % (port) parm["cset"] = Perforce.getcset(d, path, host, user, pswd, parm) return host, path, user, pswd, parm doparse = staticmethod(doparse) def getcset(d, depot, host, user, pswd, parm): p4opt = "" if "cset" in parm: return parm["cset"]; if user: p4opt += " -u %s" % (user) if pswd: p4opt += " -P %s" % (pswd) if host: p4opt += " -p %s" % (host) p4date = d.getVar("P4DATE", True) if "revision" in parm: depot += "#%s" % (parm["revision"]) elif "label" in parm: depot += "@%s" % (parm["label"]) elif p4date: depot += "@%s" % (p4date) p4cmd = d.getVar('FETCHCMD_p4', True) or "p4" logger.debug(1, "Running %s%s changes -m 1 %s", p4cmd, p4opt, depot) p4file, errors = bb.process.run("%s%s changes -m 1 %s" % (p4cmd, p4opt, depot)) cset = p4file.strip() logger.debug(1, "READ %s", cset) if not cset: return -1 return cset.split(' ')[1] getcset = staticmethod(getcset) def urldata_init(self, ud, d): (host, path, user, pswd, parm) = Perforce.doparse(ud.url, d) base_path = path.replace('/...', '') base_path = self._strip_leading_slashes(base_path) if "label" in parm: version = parm["label"] else: version = Perforce.getcset(d, path, host, user, pswd, parm) ud.localfile = data.expand('%s+%s+%s.tar.gz' % (host, base_path.replace('/', '.'), version), d) def download(self, ud, d): """ Fetch urls """ (host, depot, user, pswd, parm) = Perforce.doparse(ud.url, d) if depot.find('/...') != -1: path = depot[:depot.find('/...')] else: path = depot module = parm.get('module', os.path.basename(path)) # Get the p4 command p4opt = "" if user: p4opt += " -u %s" % (user) if pswd: p4opt += " -P %s" % (pswd) if host: p4opt += " -p %s" % (host) p4cmd = d.getVar('FETCHCMD_p4', True) or "p4" # create temp directory logger.debug(2, "Fetch: creating temporary directory") bb.utils.mkdirhier(d.expand('${WORKDIR}')) mktemp = d.getVar("FETCHCMD_p4mktemp", True) or d.expand("mktemp -d -q '${WORKDIR}/oep4.XXXXXX'") tmpfile, errors = bb.process.run(mktemp) tmpfile = tmpfile.strip() if not tmpfile: raise FetchError("Fetch: unable to create temporary directory.. make sure 'mktemp' is in the PATH.", ud.url) if "label" in parm: depot = "%s@%s" % (depot, parm["label"]) else: cset = Perforce.getcset(d, depot, host, user, pswd, parm) depot = "%s@%s" % (depot, cset) os.chdir(tmpfile) logger.info("Fetch " + ud.url) logger.info("%s%s files %s", p4cmd, p4opt, depot) p4file, errors = bb.process.run("%s%s files %s" % (p4cmd, p4opt, depot)) p4file = [f.rstrip() for f in p4file.splitlines()] if not p4file: raise FetchError("Fetch: unable to get the P4 files from %s" % depot, ud.url) count = 0 for file in p4file: list = file.split() if list[2] == "delete": continue dest = list[0][len(path)+1:] where = dest.find("#") subprocess.call("%s%s print -o %s/%s %s" % (p4cmd, p4opt, module, dest[:where], list[0]), shell=True) count = count + 1 if count == 0: logger.error() raise FetchError("Fetch: No files gathered from the P4 fetch", ud.url) runfetchcmd("tar -czf %s %s" % (ud.localpath, module), d, cleanup = [ud.localpath]) # cleanup bb.utils.prunedir(tmpfile)
mit
8,683,616,657,613,537,000
31.016043
120
0.540838
false
gkudos/qgis-cartodb
cartodb/simplejson/tests/test_float.py
141
1430
import math from unittest import TestCase from simplejson.compat import long_type, text_type import simplejson as json from simplejson.decoder import NaN, PosInf, NegInf class TestFloat(TestCase): def test_degenerates_allow(self): for inf in (PosInf, NegInf): self.assertEqual(json.loads(json.dumps(inf)), inf) # Python 2.5 doesn't have math.isnan nan = json.loads(json.dumps(NaN)) self.assertTrue((0 + nan) != nan) def test_degenerates_ignore(self): for f in (PosInf, NegInf, NaN): self.assertEqual(json.loads(json.dumps(f, ignore_nan=True)), None) def test_degenerates_deny(self): for f in (PosInf, NegInf, NaN): self.assertRaises(ValueError, json.dumps, f, allow_nan=False) def test_floats(self): for num in [1617161771.7650001, math.pi, math.pi**100, math.pi**-100, 3.1]: self.assertEqual(float(json.dumps(num)), num) self.assertEqual(json.loads(json.dumps(num)), num) self.assertEqual(json.loads(text_type(json.dumps(num))), num) def test_ints(self): for num in [1, long_type(1), 1<<32, 1<<64]: self.assertEqual(json.dumps(num), str(num)) self.assertEqual(int(json.dumps(num)), num) self.assertEqual(json.loads(json.dumps(num)), num) self.assertEqual(json.loads(text_type(json.dumps(num))), num)
gpl-2.0
-5,228,616,375,632,317,000
39.857143
78
0.627972
false
StackStorm/st2
st2common/tests/unit/test_unit_testing_mocks.py
3
4989
# Copyright 2020 The StackStorm Authors. # Copyright 2019 Extreme Networks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import import unittest2 from st2tests.base import BaseSensorTestCase from st2tests.mocks.sensor import MockSensorWrapper from st2tests.mocks.sensor import MockSensorService from st2tests.mocks.action import MockActionWrapper from st2tests.mocks.action import MockActionService __all__ = [ "BaseSensorTestCaseTestCase", "MockSensorServiceTestCase", "MockActionServiceTestCase", ] class MockSensorClass(object): pass class BaseMockResourceServiceTestCase(object): class TestCase(unittest2.TestCase): def test_get_user_info(self): result = self.mock_service.get_user_info() self.assertEqual(result["username"], "admin") self.assertEqual(result["rbac"]["roles"], ["admin"]) def test_list_set_get_delete_values(self): # list_values, set_value result = self.mock_service.list_values() self.assertSequenceEqual(result, []) self.mock_service.set_value(name="t1.local", value="test1", local=True) self.mock_service.set_value(name="t1.global", value="test1", local=False) result = self.mock_service.list_values(local=True) self.assertEqual(len(result), 1) self.assertEqual(result[0].name, "dummy.test:t1.local") result = self.mock_service.list_values(local=False) self.assertEqual(result[0].name, "dummy.test:t1.local") self.assertEqual(result[1].name, "t1.global") self.assertEqual(len(result), 2) # get_value self.assertEqual(self.mock_service.get_value("inexistent"), None) self.assertEqual( self.mock_service.get_value(name="t1.local", local=True), "test1" ) # delete_value self.assertEqual(len(self.mock_service.list_values(local=True)), 1) self.assertEqual(self.mock_service.delete_value("inexistent"), False) self.assertEqual(len(self.mock_service.list_values(local=True)), 1) self.assertEqual(self.mock_service.delete_value("t1.local"), True) self.assertEqual(len(self.mock_service.list_values(local=True)), 0) class BaseSensorTestCaseTestCase(BaseSensorTestCase): sensor_cls = MockSensorClass def test_dispatch_and_assertTriggerDispatched(self): sensor_service = self.sensor_service expected_msg = 'Trigger "nope" hasn\'t been dispatched' self.assertRaisesRegexp( AssertionError, expected_msg, self.assertTriggerDispatched, trigger="nope" ) sensor_service.dispatch(trigger="test1", payload={"a": "b"}) result = self.assertTriggerDispatched(trigger="test1") self.assertTrue(result) result = self.assertTriggerDispatched(trigger="test1", payload={"a": "b"}) self.assertTrue(result) expected_msg = 'Trigger "test1" hasn\'t been dispatched' self.assertRaisesRegexp( AssertionError, expected_msg, self.assertTriggerDispatched, trigger="test1", payload={"a": "c"}, ) class MockSensorServiceTestCase(BaseMockResourceServiceTestCase.TestCase): def setUp(self): mock_sensor_wrapper = MockSensorWrapper(pack="dummy", class_name="test") self.mock_service = MockSensorService(sensor_wrapper=mock_sensor_wrapper) def test_get_logger(self): sensor_service = self.mock_service logger = sensor_service.get_logger("test") logger.info("test info") logger.debug("test debug") self.assertEqual(len(logger.method_calls), 2) method_name, method_args, method_kwargs = tuple(logger.method_calls[0]) self.assertEqual(method_name, "info") self.assertEqual(method_args, ("test info",)) self.assertEqual(method_kwargs, {}) method_name, method_args, method_kwargs = tuple(logger.method_calls[1]) self.assertEqual(method_name, "debug") self.assertEqual(method_args, ("test debug",)) self.assertEqual(method_kwargs, {}) class MockActionServiceTestCase(BaseMockResourceServiceTestCase.TestCase): def setUp(self): mock_action_wrapper = MockActionWrapper(pack="dummy", class_name="test") self.mock_service = MockActionService(action_wrapper=mock_action_wrapper)
apache-2.0
7,089,959,499,132,218,000
37.976563
86
0.67268
false
CoherentLabs/depot_tools
third_party/pylint/checkers/stdlib.py
1
2665
# Copyright 2012 Google Inc. # # http://www.logilab.fr/ -- mailto:[email protected] # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free Software # Foundation; either version 2 of the License, or (at your option) any later # version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. """Checkers for various standard library functions.""" import re import sys import astroid from pylint.interfaces import IAstroidChecker from pylint.checkers import BaseChecker from pylint.checkers import utils _VALID_OPEN_MODE_REGEX = re.compile(r'^(r?U|[rwa]\+?b?)$') if sys.version_info >= (3, 0): OPEN_MODULE = '_io' else: OPEN_MODULE = '__builtin__' class OpenModeChecker(BaseChecker): __implements__ = (IAstroidChecker,) name = 'open_mode' msgs = { 'W1501': ('"%s" is not a valid mode for open.', 'bad-open-mode', 'Python supports: r, w, a modes with b, +, and U options. ' 'See http://docs.python.org/2/library/functions.html#open'), } @utils.check_messages('bad-open-mode') def visit_callfunc(self, node): """Visit a CallFunc node.""" if hasattr(node, 'func'): infer = utils.safe_infer(node.func) if infer and infer.root().name == OPEN_MODULE: if getattr(node.func, 'name', None) in ('open', 'file'): self._check_open_mode(node) def _check_open_mode(self, node): """Check that the mode argument of an open or file call is valid.""" try: mode_arg = utils.get_argument_from_call(node, position=1, keyword='mode') if mode_arg: mode_arg = utils.safe_infer(mode_arg) if (isinstance(mode_arg, astroid.Const) and not _VALID_OPEN_MODE_REGEX.match(mode_arg.value)): self.add_message('bad-open-mode', node=node, args=(mode_arg.value)) except (utils.NoSuchArgumentError, TypeError): pass def register(linter): """required method to auto register this checker """ linter.register_checker(OpenModeChecker(linter))
bsd-3-clause
-1,799,978,615,859,561,500
37.071429
85
0.635272
false
lseyesl/phantomjs
src/qt/qtwebkit/Tools/Scripts/webkitpy/common/config/urls_unittest.py
124
4004
# Copyright (C) 2012 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import unittest2 as unittest from .urls import parse_bug_id, parse_attachment_id class URLsTest(unittest.TestCase): def test_parse_bug_id(self): # FIXME: These would be all better as doctests self.assertEqual(12345, parse_bug_id("http://webkit.org/b/12345")) self.assertEqual(12345, parse_bug_id("foo\n\nhttp://webkit.org/b/12345\nbar\n\n")) self.assertEqual(12345, parse_bug_id("http://bugs.webkit.org/show_bug.cgi?id=12345")) self.assertEqual(12345, parse_bug_id("http://bugs.webkit.org/show_bug.cgi?id=12345&ctype=xml")) self.assertEqual(12345, parse_bug_id("http://bugs.webkit.org/show_bug.cgi?id=12345&ctype=xml&excludefield=attachmentdata")) self.assertEqual(12345, parse_bug_id("http://bugs.webkit.org/show_bug.cgi?id=12345excludefield=attachmentdata&ctype=xml")) # Our url parser is super-fragile, but at least we're testing it. self.assertIsNone(parse_bug_id("http://www.webkit.org/b/12345")) self.assertIsNone(parse_bug_id("http://bugs.webkit.org/show_bug.cgi?ctype=xml&id=12345")) self.assertIsNone(parse_bug_id("http://bugs.webkit.org/show_bug.cgi?ctype=xml&id=12345&excludefield=attachmentdata")) self.assertIsNone(parse_bug_id("http://bugs.webkit.org/show_bug.cgi?ctype=xml&excludefield=attachmentdata&id=12345")) self.assertIsNone(parse_bug_id("http://bugs.webkit.org/show_bug.cgi?excludefield=attachmentdata&ctype=xml&id=12345")) self.assertIsNone(parse_bug_id("http://bugs.webkit.org/show_bug.cgi?excludefield=attachmentdata&id=12345&ctype=xml")) def test_parse_attachment_id(self): self.assertEqual(12345, parse_attachment_id("https://bugs.webkit.org/attachment.cgi?id=12345&action=review")) self.assertEqual(12345, parse_attachment_id("https://bugs.webkit.org/attachment.cgi?id=12345&action=edit")) self.assertEqual(12345, parse_attachment_id("https://bugs.webkit.org/attachment.cgi?id=12345&action=prettypatch")) self.assertEqual(12345, parse_attachment_id("https://bugs.webkit.org/attachment.cgi?id=12345&action=diff")) # Direct attachment links are hosted from per-bug subdomains: self.assertEqual(12345, parse_attachment_id("https://bug-23456-attachments.webkit.org/attachment.cgi?id=12345")) # Make sure secure attachment URLs work too. self.assertEqual(12345, parse_attachment_id("https://bug-23456-attachments.webkit.org/attachment.cgi?id=12345&t=Bqnsdkl9fs"))
bsd-3-clause
9,005,511,434,208,433,000
64.639344
133
0.745255
false
flh/odoo
openerp/report/misc.py
458
1425
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from pychart import * colorline = [color.T(r=((r+3) % 11)/10.0, g=((g+6) % 11)/10.0, b=((b+9) % 11)/10.0) for r in range(11) for g in range(11) for b in range(11)] def choice_colors(n): if n: return colorline[0:-1:len(colorline)/n] return [] if __name__=='__main__': print choice_colors(10) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
-2,599,303,623,539,803,600
36.5
79
0.574035
false
tensorflow/datasets
tensorflow_datasets/image/coil100.py
1
3345
# coding=utf-8 # Copyright 2021 The TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Dataset class for COIL-100 dataset.""" import os import tensorflow.compat.v2 as tf import tensorflow_datasets.public_api as tfds _URL = "http://www.cs.columbia.edu/CAVE/databases/SLAM_coil-20_coil-100/coil-100/coil-100.zip" _DESCRIPTION = ("""The dataset contains 7200 color images of 100 objects (72 images per object). The objects have a wide variety of complex geometric and reflectance characteristics. The objects were placed on a motorized turntable against a black background. The turntable was rotated through 360 degrees to vary object pose with respect to a fxed color camera. Images of the objects were taken at pose intervals of 5 degrees.This corresponds to 72 poses per object""") _ANGLE_LABELS = [str(x) for x in range(0, 360, 5)] _OBJECT_IDS = [f"obj{str(x)}" for x in range(1, 101)] _IMAGE_SHAPE = (128, 128, 3) _CITATION = """\ @article{nene1996columbia, title={Columbia object image library (coil-20)}, author={Nene, Sameer A and Nayar, Shree K and Murase, Hiroshi and others}, year={1996}, publisher={Technical report CUCS-005-96} } """ class Coil100(tfds.core.GeneratorBasedBuilder): """COIL-100 Image Dataset Class.""" VERSION = tfds.core.Version("2.0.0") RELEASE_NOTES = { "2.0.0": "Change features (`object_id` is now `ClassLabel`, rename " "`label` -> `angle_label`, add `angle`)", "1.0.0": "Initial release", } def _info(self): """Define Dataset Info.""" return tfds.core.DatasetInfo( builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({ "image": tfds.features.Image(shape=_IMAGE_SHAPE), "angle_label": tfds.features.ClassLabel(names=_ANGLE_LABELS), "object_id": tfds.features.ClassLabel(names=_OBJECT_IDS), "angle": tf.int64, }), supervised_keys=("image", "angle_label"), homepage="http://www.cs.columbia.edu/CAVE/software/softlib/coil-100.php", citation=_CITATION, ) def _split_generators(self, dl_manager): """Define Splits.""" path = dl_manager.download_and_extract(_URL) return { tfds.Split.TRAIN: self._generate_examples(path / "coil-100"), } def _generate_examples(self, data_dir_path): """Generate images and labels for splits.""" for file_name in tf.io.gfile.listdir(data_dir_path): if file_name.endswith(".png"): image = os.path.join(data_dir_path, file_name) angle_label = file_name.split("_")[2].split(".")[0] object_id = file_name.split("_")[0] yield file_name, { "image": image, "angle_label": angle_label, "object_id": object_id, "angle": int(angle_label), }
apache-2.0
-2,295,583,092,356,631,000
34.967742
109
0.667265
false
cristiana214/cristianachavez214-cristianachavez
python/src/Lib/ctypes/macholib/dyld.py
253
5341
###################################################################### # This file should be kept compatible with Python 2.3, see PEP 291. # ###################################################################### """ dyld emulation """ import os from framework import framework_info from dylib import dylib_info from itertools import * __all__ = [ 'dyld_find', 'framework_find', 'framework_info', 'dylib_info', ] # These are the defaults as per man dyld(1) # DEFAULT_FRAMEWORK_FALLBACK = [ os.path.expanduser("~/Library/Frameworks"), "/Library/Frameworks", "/Network/Library/Frameworks", "/System/Library/Frameworks", ] DEFAULT_LIBRARY_FALLBACK = [ os.path.expanduser("~/lib"), "/usr/local/lib", "/lib", "/usr/lib", ] def ensure_utf8(s): """Not all of PyObjC and Python understand unicode paths very well yet""" if isinstance(s, unicode): return s.encode('utf8') return s def dyld_env(env, var): if env is None: env = os.environ rval = env.get(var) if rval is None: return [] return rval.split(':') def dyld_image_suffix(env=None): if env is None: env = os.environ return env.get('DYLD_IMAGE_SUFFIX') def dyld_framework_path(env=None): return dyld_env(env, 'DYLD_FRAMEWORK_PATH') def dyld_library_path(env=None): return dyld_env(env, 'DYLD_LIBRARY_PATH') def dyld_fallback_framework_path(env=None): return dyld_env(env, 'DYLD_FALLBACK_FRAMEWORK_PATH') def dyld_fallback_library_path(env=None): return dyld_env(env, 'DYLD_FALLBACK_LIBRARY_PATH') def dyld_image_suffix_search(iterator, env=None): """For a potential path iterator, add DYLD_IMAGE_SUFFIX semantics""" suffix = dyld_image_suffix(env) if suffix is None: return iterator def _inject(iterator=iterator, suffix=suffix): for path in iterator: if path.endswith('.dylib'): yield path[:-len('.dylib')] + suffix + '.dylib' else: yield path + suffix yield path return _inject() def dyld_override_search(name, env=None): # If DYLD_FRAMEWORK_PATH is set and this dylib_name is a # framework name, use the first file that exists in the framework # path if any. If there is none go on to search the DYLD_LIBRARY_PATH # if any. framework = framework_info(name) if framework is not None: for path in dyld_framework_path(env): yield os.path.join(path, framework['name']) # If DYLD_LIBRARY_PATH is set then use the first file that exists # in the path. If none use the original name. for path in dyld_library_path(env): yield os.path.join(path, os.path.basename(name)) def dyld_executable_path_search(name, executable_path=None): # If we haven't done any searching and found a library and the # dylib_name starts with "@executable_path/" then construct the # library name. if name.startswith('@executable_path/') and executable_path is not None: yield os.path.join(executable_path, name[len('@executable_path/'):]) def dyld_default_search(name, env=None): yield name framework = framework_info(name) if framework is not None: fallback_framework_path = dyld_fallback_framework_path(env) for path in fallback_framework_path: yield os.path.join(path, framework['name']) fallback_library_path = dyld_fallback_library_path(env) for path in fallback_library_path: yield os.path.join(path, os.path.basename(name)) if framework is not None and not fallback_framework_path: for path in DEFAULT_FRAMEWORK_FALLBACK: yield os.path.join(path, framework['name']) if not fallback_library_path: for path in DEFAULT_LIBRARY_FALLBACK: yield os.path.join(path, os.path.basename(name)) def dyld_find(name, executable_path=None, env=None): """ Find a library or framework using dyld semantics """ name = ensure_utf8(name) executable_path = ensure_utf8(executable_path) for path in dyld_image_suffix_search(chain( dyld_override_search(name, env), dyld_executable_path_search(name, executable_path), dyld_default_search(name, env), ), env): if os.path.isfile(path): return path raise ValueError("dylib %s could not be found" % (name,)) def framework_find(fn, executable_path=None, env=None): """ Find a framework using dyld semantics in a very loose manner. Will take input such as: Python Python.framework Python.framework/Versions/Current """ try: return dyld_find(fn, executable_path=executable_path, env=env) except ValueError, e: pass fmwk_index = fn.rfind('.framework') if fmwk_index == -1: fmwk_index = len(fn) fn += '.framework' fn = os.path.join(fn, os.path.basename(fn[:fmwk_index])) try: return dyld_find(fn, executable_path=executable_path, env=env) except ValueError: raise e def test_dyld_find(): env = {} assert dyld_find('libSystem.dylib') == '/usr/lib/libSystem.dylib' assert dyld_find('System.framework/System') == '/System/Library/Frameworks/System.framework/System' if __name__ == '__main__': test_dyld_find()
apache-2.0
-1,562,955,628,579,466,800
30.60355
103
0.628347
false
WillieMaddox/numpy
numpy/lib/tests/test_index_tricks.py
91
11462
from __future__ import division, absolute_import, print_function import numpy as np from numpy.testing import ( run_module_suite, TestCase, assert_, assert_equal, assert_array_equal, assert_almost_equal, assert_array_almost_equal, assert_raises ) from numpy.lib.index_tricks import ( mgrid, ndenumerate, fill_diagonal, diag_indices, diag_indices_from, index_exp, ndindex, r_, s_, ix_ ) class TestRavelUnravelIndex(TestCase): def test_basic(self): assert_equal(np.unravel_index(2, (2, 2)), (1, 0)) assert_equal(np.ravel_multi_index((1, 0), (2, 2)), 2) assert_equal(np.unravel_index(254, (17, 94)), (2, 66)) assert_equal(np.ravel_multi_index((2, 66), (17, 94)), 254) assert_raises(ValueError, np.unravel_index, -1, (2, 2)) assert_raises(TypeError, np.unravel_index, 0.5, (2, 2)) assert_raises(ValueError, np.unravel_index, 4, (2, 2)) assert_raises(ValueError, np.ravel_multi_index, (-3, 1), (2, 2)) assert_raises(ValueError, np.ravel_multi_index, (2, 1), (2, 2)) assert_raises(ValueError, np.ravel_multi_index, (0, -3), (2, 2)) assert_raises(ValueError, np.ravel_multi_index, (0, 2), (2, 2)) assert_raises(TypeError, np.ravel_multi_index, (0.1, 0.), (2, 2)) assert_equal(np.unravel_index((2*3 + 1)*6 + 4, (4, 3, 6)), [2, 1, 4]) assert_equal( np.ravel_multi_index([2, 1, 4], (4, 3, 6)), (2*3 + 1)*6 + 4) arr = np.array([[3, 6, 6], [4, 5, 1]]) assert_equal(np.ravel_multi_index(arr, (7, 6)), [22, 41, 37]) assert_equal( np.ravel_multi_index(arr, (7, 6), order='F'), [31, 41, 13]) assert_equal( np.ravel_multi_index(arr, (4, 6), mode='clip'), [22, 23, 19]) assert_equal(np.ravel_multi_index(arr, (4, 4), mode=('clip', 'wrap')), [12, 13, 13]) assert_equal(np.ravel_multi_index((3, 1, 4, 1), (6, 7, 8, 9)), 1621) assert_equal(np.unravel_index(np.array([22, 41, 37]), (7, 6)), [[3, 6, 6], [4, 5, 1]]) assert_equal( np.unravel_index(np.array([31, 41, 13]), (7, 6), order='F'), [[3, 6, 6], [4, 5, 1]]) assert_equal(np.unravel_index(1621, (6, 7, 8, 9)), [3, 1, 4, 1]) def test_dtypes(self): # Test with different data types for dtype in [np.int16, np.uint16, np.int32, np.uint32, np.int64, np.uint64]: coords = np.array( [[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0]], dtype=dtype) shape = (5, 8) uncoords = 8*coords[0]+coords[1] assert_equal(np.ravel_multi_index(coords, shape), uncoords) assert_equal(coords, np.unravel_index(uncoords, shape)) uncoords = coords[0]+5*coords[1] assert_equal( np.ravel_multi_index(coords, shape, order='F'), uncoords) assert_equal(coords, np.unravel_index(uncoords, shape, order='F')) coords = np.array( [[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0], [1, 3, 1, 0, 9, 5]], dtype=dtype) shape = (5, 8, 10) uncoords = 10*(8*coords[0]+coords[1])+coords[2] assert_equal(np.ravel_multi_index(coords, shape), uncoords) assert_equal(coords, np.unravel_index(uncoords, shape)) uncoords = coords[0]+5*(coords[1]+8*coords[2]) assert_equal( np.ravel_multi_index(coords, shape, order='F'), uncoords) assert_equal(coords, np.unravel_index(uncoords, shape, order='F')) def test_clipmodes(self): # Test clipmodes assert_equal( np.ravel_multi_index([5, 1, -1, 2], (4, 3, 7, 12), mode='wrap'), np.ravel_multi_index([1, 1, 6, 2], (4, 3, 7, 12))) assert_equal(np.ravel_multi_index([5, 1, -1, 2], (4, 3, 7, 12), mode=( 'wrap', 'raise', 'clip', 'raise')), np.ravel_multi_index([1, 1, 0, 2], (4, 3, 7, 12))) assert_raises( ValueError, np.ravel_multi_index, [5, 1, -1, 2], (4, 3, 7, 12)) class TestGrid(TestCase): def test_basic(self): a = mgrid[-1:1:10j] b = mgrid[-1:1:0.1] assert_(a.shape == (10,)) assert_(b.shape == (20,)) assert_(a[0] == -1) assert_almost_equal(a[-1], 1) assert_(b[0] == -1) assert_almost_equal(b[1]-b[0], 0.1, 11) assert_almost_equal(b[-1], b[0]+19*0.1, 11) assert_almost_equal(a[1]-a[0], 2.0/9.0, 11) def test_linspace_equivalence(self): y, st = np.linspace(2, 10, retstep=1) assert_almost_equal(st, 8/49.0) assert_array_almost_equal(y, mgrid[2:10:50j], 13) def test_nd(self): c = mgrid[-1:1:10j, -2:2:10j] d = mgrid[-1:1:0.1, -2:2:0.2] assert_(c.shape == (2, 10, 10)) assert_(d.shape == (2, 20, 20)) assert_array_equal(c[0][0, :], -np.ones(10, 'd')) assert_array_equal(c[1][:, 0], -2*np.ones(10, 'd')) assert_array_almost_equal(c[0][-1, :], np.ones(10, 'd'), 11) assert_array_almost_equal(c[1][:, -1], 2*np.ones(10, 'd'), 11) assert_array_almost_equal(d[0, 1, :] - d[0, 0, :], 0.1*np.ones(20, 'd'), 11) assert_array_almost_equal(d[1, :, 1] - d[1, :, 0], 0.2*np.ones(20, 'd'), 11) class TestConcatenator(TestCase): def test_1d(self): assert_array_equal(r_[1, 2, 3, 4, 5, 6], np.array([1, 2, 3, 4, 5, 6])) b = np.ones(5) c = r_[b, 0, 0, b] assert_array_equal(c, [1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1]) def test_mixed_type(self): g = r_[10.1, 1:10] assert_(g.dtype == 'f8') def test_more_mixed_type(self): g = r_[-10.1, np.array([1]), np.array([2, 3, 4]), 10.0] assert_(g.dtype == 'f8') def test_2d(self): b = np.random.rand(5, 5) c = np.random.rand(5, 5) d = r_['1', b, c] # append columns assert_(d.shape == (5, 10)) assert_array_equal(d[:, :5], b) assert_array_equal(d[:, 5:], c) d = r_[b, c] assert_(d.shape == (10, 5)) assert_array_equal(d[:5, :], b) assert_array_equal(d[5:, :], c) class TestNdenumerate(TestCase): def test_basic(self): a = np.array([[1, 2], [3, 4]]) assert_equal(list(ndenumerate(a)), [((0, 0), 1), ((0, 1), 2), ((1, 0), 3), ((1, 1), 4)]) class TestIndexExpression(TestCase): def test_regression_1(self): # ticket #1196 a = np.arange(2) assert_equal(a[:-1], a[s_[:-1]]) assert_equal(a[:-1], a[index_exp[:-1]]) def test_simple_1(self): a = np.random.rand(4, 5, 6) assert_equal(a[:, :3, [1, 2]], a[index_exp[:, :3, [1, 2]]]) assert_equal(a[:, :3, [1, 2]], a[s_[:, :3, [1, 2]]]) class TestIx_(TestCase): def test_regression_1(self): # Test empty inputs create ouputs of indexing type, gh-5804 # Test both lists and arrays for func in (range, np.arange): a, = np.ix_(func(0)) assert_equal(a.dtype, np.intp) def test_shape_and_dtype(self): sizes = (4, 5, 3, 2) # Test both lists and arrays for func in (range, np.arange): arrays = np.ix_(*[func(sz) for sz in sizes]) for k, (a, sz) in enumerate(zip(arrays, sizes)): assert_equal(a.shape[k], sz) assert_(all(sh == 1 for j, sh in enumerate(a.shape) if j != k)) assert_(np.issubdtype(a.dtype, int)) def test_bool(self): bool_a = [True, False, True, True] int_a, = np.nonzero(bool_a) assert_equal(np.ix_(bool_a)[0], int_a) def test_1d_only(self): idx2d = [[1, 2, 3], [4, 5, 6]] assert_raises(ValueError, np.ix_, idx2d) def test_repeated_input(self): length_of_vector = 5 x = np.arange(length_of_vector) out = ix_(x, x) assert_equal(out[0].shape, (length_of_vector, 1)) assert_equal(out[1].shape, (1, length_of_vector)) # check that input shape is not modified assert_equal(x.shape, (length_of_vector,)) def test_c_(): a = np.c_[np.array([[1, 2, 3]]), 0, 0, np.array([[4, 5, 6]])] assert_equal(a, [[1, 2, 3, 0, 0, 4, 5, 6]]) def test_fill_diagonal(): a = np.zeros((3, 3), int) fill_diagonal(a, 5) yield (assert_array_equal, a, np.array([[5, 0, 0], [0, 5, 0], [0, 0, 5]])) #Test tall matrix a = np.zeros((10, 3), int) fill_diagonal(a, 5) yield (assert_array_equal, a, np.array([[5, 0, 0], [0, 5, 0], [0, 0, 5], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]])) #Test tall matrix wrap a = np.zeros((10, 3), int) fill_diagonal(a, 5, True) yield (assert_array_equal, a, np.array([[5, 0, 0], [0, 5, 0], [0, 0, 5], [0, 0, 0], [5, 0, 0], [0, 5, 0], [0, 0, 5], [0, 0, 0], [5, 0, 0], [0, 5, 0]])) #Test wide matrix a = np.zeros((3, 10), int) fill_diagonal(a, 5) yield (assert_array_equal, a, np.array([[5, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 5, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 5, 0, 0, 0, 0, 0, 0, 0]])) # The same function can operate on a 4-d array: a = np.zeros((3, 3, 3, 3), int) fill_diagonal(a, 4) i = np.array([0, 1, 2]) yield (assert_equal, np.where(a != 0), (i, i, i, i)) def test_diag_indices(): di = diag_indices(4) a = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]]) a[di] = 100 yield (assert_array_equal, a, np.array([[100, 2, 3, 4], [5, 100, 7, 8], [9, 10, 100, 12], [13, 14, 15, 100]])) # Now, we create indices to manipulate a 3-d array: d3 = diag_indices(2, 3) # And use it to set the diagonal of a zeros array to 1: a = np.zeros((2, 2, 2), int) a[d3] = 1 yield (assert_array_equal, a, np.array([[[1, 0], [0, 0]], [[0, 0], [0, 1]]])) def test_diag_indices_from(): x = np.random.random((4, 4)) r, c = diag_indices_from(x) assert_array_equal(r, np.arange(4)) assert_array_equal(c, np.arange(4)) def test_ndindex(): x = list(ndindex(1, 2, 3)) expected = [ix for ix, e in ndenumerate(np.zeros((1, 2, 3)))] assert_array_equal(x, expected) x = list(ndindex((1, 2, 3))) assert_array_equal(x, expected) # Test use of scalars and tuples x = list(ndindex((3,))) assert_array_equal(x, list(ndindex(3))) # Make sure size argument is optional x = list(ndindex()) assert_equal(x, [()]) x = list(ndindex(())) assert_equal(x, [()]) # Make sure 0-sized ndindex works correctly x = list(ndindex(*[0])) assert_equal(x, []) if __name__ == "__main__": run_module_suite()
bsd-3-clause
418,206,911,341,197,100
34.159509
81
0.48037
false
mcalhoun/ansible
test/units/plugins/test_plugins.py
99
2943
# (c) 2012-2014, Michael DeHaan <[email protected]> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os from ansible.compat.tests import unittest from ansible.compat.tests import BUILTINS from ansible.compat.tests.mock import mock_open, patch, MagicMock from ansible.plugins import MODULE_CACHE, PATH_CACHE, PLUGIN_PATH_CACHE, PluginLoader class TestErrors(unittest.TestCase): def setUp(self): pass def tearDown(self): pass @patch.object(PluginLoader, '_get_paths') def test_print_paths(self, mock_method): mock_method.return_value = ['/path/one', '/path/two', '/path/three'] pl = PluginLoader('foo', 'foo', '', 'test_plugins') paths = pl.print_paths() expected_paths = os.pathsep.join(['/path/one', '/path/two', '/path/three']) self.assertEqual(paths, expected_paths) def test_plugins__get_package_paths_no_package(self): pl = PluginLoader('test', '', 'test', 'test_plugin') self.assertEqual(pl._get_package_paths(), []) def test_plugins__get_package_paths_with_package(self): # the _get_package_paths() call uses __import__ to load a # python library, and then uses the __file__ attribute of # the result for that to get the library path, so we mock # that here and patch the builtin to use our mocked result m = MagicMock() m.return_value.__file__ = '/path/to/my/test.py' pl = PluginLoader('test', 'foo.bar.bam', 'test', 'test_plugin') with patch('{0}.__import__'.format(BUILTINS), m): self.assertEqual(pl._get_package_paths(), ['/path/to/my/bar/bam']) def test_plugins__get_paths(self): pl = PluginLoader('test', '', 'test', 'test_plugin') pl._paths = ['/path/one', '/path/two'] self.assertEqual(pl._get_paths(), ['/path/one', '/path/two']) # NOT YET WORKING #def fake_glob(path): # if path == 'test/*': # return ['test/foo', 'test/bar', 'test/bam'] # elif path == 'test/*/*' #m._paths = None #mock_glob = MagicMock() #mock_glob.return_value = [] #with patch('glob.glob', mock_glob): # pass
gpl-3.0
-6,563,966,751,058,425,000
37.220779
85
0.639823
false
SamYaple/neutron
neutron/tests/functional/agent/linux/test_process_monitor.py
21
3784
# Copyright 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_config import cfg from six import moves from neutron.agent.linux import external_process from neutron.agent.linux import utils from neutron.tests import base from neutron.tests.functional.agent.linux import simple_daemon UUID_FORMAT = "test-uuid-%d" SERVICE_NAME = "service" class BaseTestProcessMonitor(base.BaseTestCase): def setUp(self): super(BaseTestProcessMonitor, self).setUp() cfg.CONF.set_override('check_child_processes_interval', 1, 'AGENT') self._child_processes = [] self._process_monitor = None self.create_child_processes_manager('respawn') self.addCleanup(self.cleanup_spawned_children) def create_child_processes_manager(self, action): cfg.CONF.set_override('check_child_processes_action', action, 'AGENT') self._process_monitor = self.build_process_monitor() def build_process_monitor(self): return external_process.ProcessMonitor( config=cfg.CONF, resource_type='test') def _make_cmdline_callback(self, uuid): def _cmdline_callback(pidfile): cmdline = ["python", simple_daemon.__file__, "--uuid=%s" % uuid, "--pid_file=%s" % pidfile] return cmdline return _cmdline_callback def spawn_n_children(self, n, service=None): self._child_processes = [] for child_number in moves.range(n): uuid = self._child_uuid(child_number) _callback = self._make_cmdline_callback(uuid) pm = external_process.ProcessManager( conf=cfg.CONF, uuid=uuid, default_cmd_callback=_callback, service=service) pm.enable() self._process_monitor.register(uuid, SERVICE_NAME, pm) self._child_processes.append(pm) @staticmethod def _child_uuid(child_number): return UUID_FORMAT % child_number def _kill_last_child(self): self._child_processes[-1].disable() def wait_for_all_children_respawned(self): def all_children_active(): return all(pm.active for pm in self._child_processes) for pm in self._child_processes: directory = os.path.dirname(pm.get_pid_file_name()) self.assertEqual(0o755, os.stat(directory).st_mode & 0o777) # we need to allow extra_time for the check process to happen # and properly execute action over the gone processes under # high load conditions max_wait_time = ( cfg.CONF.AGENT.check_child_processes_interval + 5) utils.wait_until_true( all_children_active, timeout=max_wait_time, sleep=0.01, exception=RuntimeError('Not all children respawned.')) def cleanup_spawned_children(self): self._process_monitor.stop() for pm in self._child_processes: pm.disable() class TestProcessMonitor(BaseTestProcessMonitor): def test_respawn_handler(self): self.spawn_n_children(2) self._kill_last_child() self.wait_for_all_children_respawned()
apache-2.0
1,664,293,690,557,467,600
33.715596
78
0.639271
false
cmdunkers/DeeperMind
PythonEnv/lib/python2.7/site-packages/numpy/core/shape_base.py
69
9050
from __future__ import division, absolute_import, print_function __all__ = ['atleast_1d', 'atleast_2d', 'atleast_3d', 'vstack', 'hstack', 'stack'] from . import numeric as _nx from .numeric import asanyarray, newaxis def atleast_1d(*arys): """ Convert inputs to arrays with at least one dimension. Scalar inputs are converted to 1-dimensional arrays, whilst higher-dimensional inputs are preserved. Parameters ---------- arys1, arys2, ... : array_like One or more input arrays. Returns ------- ret : ndarray An array, or sequence of arrays, each with ``a.ndim >= 1``. Copies are made only if necessary. See Also -------- atleast_2d, atleast_3d Examples -------- >>> np.atleast_1d(1.0) array([ 1.]) >>> x = np.arange(9.0).reshape(3,3) >>> np.atleast_1d(x) array([[ 0., 1., 2.], [ 3., 4., 5.], [ 6., 7., 8.]]) >>> np.atleast_1d(x) is x True >>> np.atleast_1d(1, [3, 4]) [array([1]), array([3, 4])] """ res = [] for ary in arys: ary = asanyarray(ary) if len(ary.shape) == 0: result = ary.reshape(1) else: result = ary res.append(result) if len(res) == 1: return res[0] else: return res def atleast_2d(*arys): """ View inputs as arrays with at least two dimensions. Parameters ---------- arys1, arys2, ... : array_like One or more array-like sequences. Non-array inputs are converted to arrays. Arrays that already have two or more dimensions are preserved. Returns ------- res, res2, ... : ndarray An array, or tuple of arrays, each with ``a.ndim >= 2``. Copies are avoided where possible, and views with two or more dimensions are returned. See Also -------- atleast_1d, atleast_3d Examples -------- >>> np.atleast_2d(3.0) array([[ 3.]]) >>> x = np.arange(3.0) >>> np.atleast_2d(x) array([[ 0., 1., 2.]]) >>> np.atleast_2d(x).base is x True >>> np.atleast_2d(1, [1, 2], [[1, 2]]) [array([[1]]), array([[1, 2]]), array([[1, 2]])] """ res = [] for ary in arys: ary = asanyarray(ary) if len(ary.shape) == 0: result = ary.reshape(1, 1) elif len(ary.shape) == 1: result = ary[newaxis,:] else: result = ary res.append(result) if len(res) == 1: return res[0] else: return res def atleast_3d(*arys): """ View inputs as arrays with at least three dimensions. Parameters ---------- arys1, arys2, ... : array_like One or more array-like sequences. Non-array inputs are converted to arrays. Arrays that already have three or more dimensions are preserved. Returns ------- res1, res2, ... : ndarray An array, or tuple of arrays, each with ``a.ndim >= 3``. Copies are avoided where possible, and views with three or more dimensions are returned. For example, a 1-D array of shape ``(N,)`` becomes a view of shape ``(1, N, 1)``, and a 2-D array of shape ``(M, N)`` becomes a view of shape ``(M, N, 1)``. See Also -------- atleast_1d, atleast_2d Examples -------- >>> np.atleast_3d(3.0) array([[[ 3.]]]) >>> x = np.arange(3.0) >>> np.atleast_3d(x).shape (1, 3, 1) >>> x = np.arange(12.0).reshape(4,3) >>> np.atleast_3d(x).shape (4, 3, 1) >>> np.atleast_3d(x).base is x True >>> for arr in np.atleast_3d([1, 2], [[1, 2]], [[[1, 2]]]): ... print arr, arr.shape ... [[[1] [2]]] (1, 2, 1) [[[1] [2]]] (1, 2, 1) [[[1 2]]] (1, 1, 2) """ res = [] for ary in arys: ary = asanyarray(ary) if len(ary.shape) == 0: result = ary.reshape(1, 1, 1) elif len(ary.shape) == 1: result = ary[newaxis,:, newaxis] elif len(ary.shape) == 2: result = ary[:,:, newaxis] else: result = ary res.append(result) if len(res) == 1: return res[0] else: return res def vstack(tup): """ Stack arrays in sequence vertically (row wise). Take a sequence of arrays and stack them vertically to make a single array. Rebuild arrays divided by `vsplit`. Parameters ---------- tup : sequence of ndarrays Tuple containing arrays to be stacked. The arrays must have the same shape along all but the first axis. Returns ------- stacked : ndarray The array formed by stacking the given arrays. See Also -------- stack : Join a sequence of arrays along a new axis. hstack : Stack arrays in sequence horizontally (column wise). dstack : Stack arrays in sequence depth wise (along third dimension). concatenate : Join a sequence of arrays along an existing axis. vsplit : Split array into a list of multiple sub-arrays vertically. Notes ----- Equivalent to ``np.concatenate(tup, axis=0)`` if `tup` contains arrays that are at least 2-dimensional. Examples -------- >>> a = np.array([1, 2, 3]) >>> b = np.array([2, 3, 4]) >>> np.vstack((a,b)) array([[1, 2, 3], [2, 3, 4]]) >>> a = np.array([[1], [2], [3]]) >>> b = np.array([[2], [3], [4]]) >>> np.vstack((a,b)) array([[1], [2], [3], [2], [3], [4]]) """ return _nx.concatenate([atleast_2d(_m) for _m in tup], 0) def hstack(tup): """ Stack arrays in sequence horizontally (column wise). Take a sequence of arrays and stack them horizontally to make a single array. Rebuild arrays divided by `hsplit`. Parameters ---------- tup : sequence of ndarrays All arrays must have the same shape along all but the second axis. Returns ------- stacked : ndarray The array formed by stacking the given arrays. See Also -------- stack : Join a sequence of arrays along a new axis. vstack : Stack arrays in sequence vertically (row wise). dstack : Stack arrays in sequence depth wise (along third axis). concatenate : Join a sequence of arrays along an existing axis. hsplit : Split array along second axis. Notes ----- Equivalent to ``np.concatenate(tup, axis=1)`` Examples -------- >>> a = np.array((1,2,3)) >>> b = np.array((2,3,4)) >>> np.hstack((a,b)) array([1, 2, 3, 2, 3, 4]) >>> a = np.array([[1],[2],[3]]) >>> b = np.array([[2],[3],[4]]) >>> np.hstack((a,b)) array([[1, 2], [2, 3], [3, 4]]) """ arrs = [atleast_1d(_m) for _m in tup] # As a special case, dimension 0 of 1-dimensional arrays is "horizontal" if arrs[0].ndim == 1: return _nx.concatenate(arrs, 0) else: return _nx.concatenate(arrs, 1) def stack(arrays, axis=0): """ Join a sequence of arrays along a new axis. The `axis` parameter specifies the index of the new axis in the dimensions of the result. For example, if ``axis=0`` it will be the first dimension and if ``axis=-1`` it will be the last dimension. .. versionadded:: 1.10.0 Parameters ---------- arrays : sequence of array_like Each array must have the same shape. axis : int, optional The axis in the result array along which the input arrays are stacked. Returns ------- stacked : ndarray The stacked array has one more dimension than the input arrays. See Also -------- concatenate : Join a sequence of arrays along an existing axis. split : Split array into a list of multiple sub-arrays of equal size. Examples -------- >>> arrays = [np.random.randn(3, 4) for _ in range(10)] >>> np.stack(arrays, axis=0).shape (10, 3, 4) >>> np.stack(arrays, axis=1).shape (3, 10, 4) >>> np.stack(arrays, axis=2).shape (3, 4, 10) >>> a = np.array([1, 2, 3]) >>> b = np.array([2, 3, 4]) >>> np.stack((a, b)) array([[1, 2, 3], [2, 3, 4]]) >>> np.stack((a, b), axis=-1) array([[1, 2], [2, 3], [3, 4]]) """ arrays = [asanyarray(arr) for arr in arrays] if not arrays: raise ValueError('need at least one array to stack') shapes = set(arr.shape for arr in arrays) if len(shapes) != 1: raise ValueError('all input arrays must have the same shape') result_ndim = arrays[0].ndim + 1 if not -result_ndim <= axis < result_ndim: msg = 'axis {0} out of bounds [-{1}, {1})'.format(axis, result_ndim) raise IndexError(msg) if axis < 0: axis += result_ndim sl = (slice(None),) * axis + (_nx.newaxis,) expanded_arrays = [arr[sl] for arr in arrays] return _nx.concatenate(expanded_arrays, axis=axis)
bsd-3-clause
2,915,219,051,616,551,400
24.857143
79
0.53558
false
joseph-torres/spark
examples/src/main/python/ml/pipeline_example.py
126
2522
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Pipeline Example. """ # $example on$ from pyspark.ml import Pipeline from pyspark.ml.classification import LogisticRegression from pyspark.ml.feature import HashingTF, Tokenizer # $example off$ from pyspark.sql import SparkSession if __name__ == "__main__": spark = SparkSession\ .builder\ .appName("PipelineExample")\ .getOrCreate() # $example on$ # Prepare training documents from a list of (id, text, label) tuples. training = spark.createDataFrame([ (0, "a b c d e spark", 1.0), (1, "b d", 0.0), (2, "spark f g h", 1.0), (3, "hadoop mapreduce", 0.0) ], ["id", "text", "label"]) # Configure an ML pipeline, which consists of three stages: tokenizer, hashingTF, and lr. tokenizer = Tokenizer(inputCol="text", outputCol="words") hashingTF = HashingTF(inputCol=tokenizer.getOutputCol(), outputCol="features") lr = LogisticRegression(maxIter=10, regParam=0.001) pipeline = Pipeline(stages=[tokenizer, hashingTF, lr]) # Fit the pipeline to training documents. model = pipeline.fit(training) # Prepare test documents, which are unlabeled (id, text) tuples. test = spark.createDataFrame([ (4, "spark i j k"), (5, "l m n"), (6, "spark hadoop spark"), (7, "apache hadoop") ], ["id", "text"]) # Make predictions on test documents and print columns of interest. prediction = model.transform(test) selected = prediction.select("id", "text", "probability", "prediction") for row in selected.collect(): rid, text, prob, prediction = row print("(%d, %s) --> prob=%s, prediction=%f" % (rid, text, str(prob), prediction)) # $example off$ spark.stop()
apache-2.0
-4,230,771,715,801,040,400
35.550725
93
0.672482
false
italomaia/django-allauth
allauth/socialaccount/south_migrations/0007_auto__add_field_socialapp_client_id.py
78
6438
# encoding: utf-8 import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'SocialApp.client_id' db.add_column('socialaccount_socialapp', 'client_id', self.gf('django.db.models.fields.CharField')(default='', max_length=100), keep_default=False) def backwards(self, orm): # Deleting field 'SocialApp.client_id' db.delete_column('socialaccount_socialapp', 'client_id') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 12, 22, 12, 51, 3, 966915)'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 12, 22, 12, 51, 3, 966743)'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'sites.site': { 'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"}, 'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'socialaccount.socialaccount': { 'Meta': {'unique_together': "(('provider', 'uid'),)", 'object_name': 'SocialAccount'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'extra_data': ('allauth.socialaccount.fields.JSONField', [], {'default': "'{}'"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'provider': ('django.db.models.fields.CharField', [], {'max_length': '30'}), 'uid': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'socialaccount.socialapp': { 'Meta': {'object_name': 'SocialApp'}, 'client_id': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}), 'provider': ('django.db.models.fields.CharField', [], {'max_length': '30'}), 'secret': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'sites': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sites.Site']", 'symmetrical': 'False', 'blank': 'True'}) }, 'socialaccount.socialtoken': { 'Meta': {'unique_together': "(('app', 'account'),)", 'object_name': 'SocialToken'}, 'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['socialaccount.SocialAccount']"}), 'app': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['socialaccount.SocialApp']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'token': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'token_secret': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}) } } complete_apps = ['socialaccount']
mit
-3,893,418,292,645,992,400
67.489362
182
0.55219
false
neutronpy/neutronpy
tests/test_functions.py
2
1985
# -*- coding: utf-8 -*- r"""Tests special functions """ import numpy as np import pytest from neutronpy import functions from scipy.integrate import simps def test_gauss_norm(): """Test 1d gaussian """ p = np.array([0., 0., 1., -30., 3., 1., 30., 3.]) x = np.linspace(-1e6, 1e6, int(8e6) + 1) y = functions.gaussian(p, x) integ = simps(y, x) assert (abs(integ - 2.) < 1e-5) def test_gauss2d_norm(): """Test 2d gaussian """ p = np.array([0., 0., 1., -3., 0., 0.3, 0.3, 1., 3., 0., 0.3, 0.3]) a, b = np.linspace(-10, 10, 1001), np.linspace(-10, 10, 1001) q = np.meshgrid(a, b, sparse=True) y = functions.gaussian2d(p, q) integ = simps(simps(y, b), a) assert (abs(integ - 2.) < 1e-5) def test_lorent_norm(): """Test 1d lorentzian """ p = np.array([0., 0., 1., -30., 3., 1., 30., 3.]) x = np.linspace(-1e6, 1e6, int(8e6) + 1) y = functions.lorentzian(p, x) integ = simps(y, x) assert (abs(integ - 2.) < 1e-5) def test_voigt_norm(): """Tests voigt function """ p = np.array([0., 0., 1., -30., 2., 3., 1., 30., 2., 3.]) x = np.linspace(-1e6, 1e6, int(8e6) + 1) y = functions.voigt(p, x) integ = simps(y, x) assert (abs(integ - 2.) < 1e-5) def test_gaussring_norm(): """Test gaussian ring """ p = np.array([0., 0., 1., 0., 0., 0.5, 0.5, 0.1]) a, b = np.linspace(-10, 10, 1001), np.linspace(-10, 10, 1001) q = np.meshgrid(a, b, sparse=True) y = functions.gaussian_ring(p, q) integ = simps(simps(y, b), a) assert (abs(integ - 1.) < 1e-5) def test_resolution_norm(): """Tests resolution gaussian """ p = np.array([0., 0., 1., 0., 0., 1.43, 23867.71, 22311.93, 20739.82]) a, b = np.linspace(-1, 1, 501), np.linspace(-1, 1, 501) q = np.meshgrid(a, b, sparse=True) y = functions.resolution(p, q) integ = simps(simps(y, b), a) assert (abs(integ - 1.) < 1e-5) if __name__ == '__main__': pytest.main()
mit
3,487,464,426,806,427,000
25.466667
74
0.531486
false
mohittahiliani/tcp-eval-suite-ns3
src/mesh/doc/source/conf.py
87
7452
# -*- coding: utf-8 -*- # # ns-3 documentation build configuration file, created by # sphinx-quickstart on Tue Dec 14 09:00:39 2010. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.pngmath'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'mesh' # General information about the project. project = u'ns-3' copyright = u'ns-3 project' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = 'ns-3-dev' # The full version, including alpha/beta/rc tags. release = 'ns-3-dev' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". #html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. #htmlhelp_basename = 'ns-3doc' # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ # ('mesh-testing', 'mesh-doc-testing.tex', u'Mesh Wi-Fi Testing Documentation', u'ns-3 project', 'manual'), # ('mesh-design', 'mesh-doc-design.tex', u'Mesh Wi-Fi Design Documentation', u'ns-3 project', 'manual'), # ('mesh-user', 'mesh-doc-user.tex', u'Mesh Wi-Fi User Documentation', u'ns-3 project', 'manual'), ('mesh', 'mesh-module-doc.tex', u'The ns-3 Mesh Wi-Fi Module Documentation', u'ns-3 project', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # add page breaks in the pdf. Level 1 is for top-level sections, level 2 for subsections, and so on. pdf_break_level = 4 # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'ns-3-model-library', u'ns-3 Model Library', [u'ns-3 project'], 1) ]
gpl-2.0
-8,412,465,044,112,385,000
32.41704
108
0.70518
false
titiushko/readthedocs.org
readthedocs/restapi/views/search_views.py
25
7188
import logging from rest_framework import decorators, permissions, status from rest_framework.renderers import JSONPRenderer, JSONRenderer, BrowsableAPIRenderer from rest_framework.response import Response import requests from readthedocs.builds.constants import LATEST from readthedocs.builds.models import Version from readthedocs.search.indexes import PageIndex, ProjectIndex, SectionIndex from readthedocs.projects.models import Project from readthedocs.restapi import utils log = logging.getLogger(__name__) @decorators.api_view(['POST']) @decorators.permission_classes((permissions.IsAdminUser,)) @decorators.renderer_classes((JSONRenderer, JSONPRenderer, BrowsableAPIRenderer)) def index_search(request): """ Add things to the search index. """ data = request.DATA['data'] project_pk = data['project_pk'] version_pk = data['version_pk'] commit = data.get('commit') project = Project.objects.get(pk=project_pk) version = Version.objects.get(pk=version_pk) project_scale = 1 page_scale = 1 utils.index_search_request( version=version, page_list=data['page_list'], commit=commit, project_scale=project_scale, page_scale=page_scale) return Response({'indexed': True}) @decorators.api_view(['GET']) @decorators.permission_classes((permissions.AllowAny,)) @decorators.renderer_classes((JSONRenderer, JSONPRenderer, BrowsableAPIRenderer)) def search(request): project_slug = request.GET.get('project', None) version_slug = request.GET.get('version', LATEST) query = request.GET.get('q', None) log.debug("(API Search) %s" % query) kwargs = {} body = { "query": { "function_score": { "field_value_factor": {"field": "weight"}, "query": { "bool": { "should": [ {"match": {"title": {"query": query, "boost": 10}}}, {"match": {"headers": {"query": query, "boost": 5}}}, {"match": {"content": {"query": query}}}, ] } } } }, "highlight": { "fields": { "title": {}, "headers": {}, "content": {}, } }, "fields": ["title", "project", "version", "path"], "size": 50 # TODO: Support pagination. } if project_slug: body['filter'] = { "and": [ {"term": {"project": project_slug}}, {"term": {"version": version_slug}}, ] } # Add routing to optimize search by hitting the right shard. kwargs['routing'] = project_slug results = PageIndex().search(body, **kwargs) return Response({'results': results}) @decorators.api_view(['GET']) @decorators.permission_classes((permissions.AllowAny,)) @decorators.renderer_classes((JSONRenderer, JSONPRenderer, BrowsableAPIRenderer)) def project_search(request): query = request.GET.get('q', None) log.debug("(API Project Search) %s" % (query)) body = { "query": { "function_score": { "field_value_factor": {"field": "weight"}, "query": { "bool": { "should": [ {"match": {"name": {"query": query, "boost": 10}}}, {"match": {"description": {"query": query}}}, ] } } } }, "fields": ["name", "slug", "description", "lang"] } results = ProjectIndex().search(body) return Response({'results': results}) @decorators.api_view(['GET']) @decorators.permission_classes((permissions.AllowAny,)) @decorators.renderer_classes((JSONRenderer, JSONPRenderer, BrowsableAPIRenderer)) def section_search(request): """ Search for a Section of content on Read the Docs. A Section is a subheading on a specific page. Query Thoughts -------------- If you want to search across all documents, just query with a ``q`` GET arg. If you want to filter by a specific project, include a ``project`` GET arg. Facets ------ When you search, you will have a ``project`` facet, which includes the number of matching sections per project. When you search inside a project, the ``path`` facet will show the number of matching sections per page. Possible GET args ----------------- * q - The query string **Required** * project - A project slug *Optional* * version - A version slug *Optional* * path - A file path slug *Optional* Example ------- GET /api/v2/search/section/?q=virtualenv&project=django Current Query ------------- """ query = request.GET.get('q', None) if not query: return Response( {'error': 'Search term required. Use the "q" GET arg to search. '}, status=status.HTTP_400_BAD_REQUEST) project_slug = request.GET.get('project', None) version_slug = request.GET.get('version', LATEST) path_slug = request.GET.get('path', None) log.debug("(API Section Search) [%s:%s] %s" % (project_slug, version_slug, query)) kwargs = {} body = { "query": { "function_score": { "field_value_factor": {"field": "weight"}, "query": { "bool": { "should": [ {"match": {"title": {"query": query, "boost": 10}}}, {"match": {"content": {"query": query}}}, ] } } } }, "facets": { "project": { "terms": {"field": "project"}, "facet_filter": { "term": {"version": version_slug}, } }, }, "highlight": { "fields": { "title": {}, "content": {}, } }, "fields": ["title", "project", "version", "path", "page_id", "content"], "size": 10 # TODO: Support pagination. } if project_slug: body['filter'] = { "and": [ {"term": {"project": project_slug}}, {"term": {"version": version_slug}}, ] } body['facets']['path'] = { "terms": {"field": "path"}, "facet_filter": { "term": {"project": project_slug}, } }, # Add routing to optimize search by hitting the right shard. kwargs['routing'] = project_slug if path_slug: body['filter'] = { "and": [ {"term": {"path": path_slug}}, ] } if path_slug and not project_slug: # Show facets when we only have a path body['facets']['path'] = { "terms": {"field": "path"} } results = SectionIndex().search(body, **kwargs) return Response({'results': results})
mit
-7,332,622,690,231,866,000
29.587234
86
0.516973
false
x303597316/hue
desktop/libs/libsaml/src/libsaml/conf.py
25
4857
#!/usr/bin/env python # Licensed to Cloudera, Inc. under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. Cloudera, Inc. licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import subprocess from django.utils.translation import ugettext_lazy as _t, ugettext as _ from desktop.lib.conf import Config, coerce_bool, coerce_csv BASEDIR = os.path.dirname(os.path.abspath(__file__)) USERNAME_SOURCES = ('attributes', 'nameid') def xmlsec(): """ xmlsec path """ try: proc = subprocess.Popen(['which', 'xmlsec1'], stdout=subprocess.PIPE) return proc.stdout.read().strip() except subprocess.CalledProcessError: return '/usr/local/bin/xmlsec1' def dict_list_map(value): if isinstance(value, str): d = {} for k, v in json.loads(value).iteritems(): d[k] = (v,) return d elif isinstance(value, dict): return value return None XMLSEC_BINARY = Config( key="xmlsec_binary", dynamic_default=xmlsec, type=str, help=_t("Xmlsec1 binary path. This program should be executable by the user running Hue.")) ENTITY_ID = Config( key="entity_id", default="<base_url>/saml2/metadata/", type=str, help=_t("Entity ID for Hue acting as service provider. Can also accept a pattern where '<base_url>' will be replaced with server URL base.")) CREATE_USERS_ON_LOGIN = Config( key="create_users_on_login", default=True, type=coerce_bool, help=_t("Create users from IdP on login.")) ATTRIBUTE_MAP_DIR = Config( key="attribute_map_dir", default=os.path.abspath( os.path.join(BASEDIR, '..', '..', 'attribute-maps') ), type=str, private=True, help=_t("Attribute map directory contains files that map SAML attributes to pysaml2 attributes.")) ALLOW_UNSOLICITED = Config( key="allow_unsolicited", default=True, type=coerce_bool, private=True, help=_t("Allow responses that are initiated by the IdP.")) REQUIRED_ATTRIBUTES = Config( key="required_attributes", default=['uid'], type=coerce_csv, help=_t("Required attributes to ask for from IdP.")) OPTIONAL_ATTRIBUTES = Config( key="optional_attributes", default=[], type=coerce_csv, help=_t("Optional attributes to ask for from IdP.")) METADATA_FILE = Config( key="metadata_file", default=os.path.abspath( os.path.join(BASEDIR, '..', '..', 'examples', 'idp.xml') ), type=str, help=_t("IdP metadata in the form of a file. This is generally an XML file containing metadata that the Identity Provider generates.")) KEY_FILE = Config( key="key_file", default="", type=str, help=_t("key_file is the name of a PEM formatted file that contains the private key of the Hue service. This is presently used both to encrypt/sign assertions and as client key in a HTTPS session.")) CERT_FILE = Config( key="cert_file", default="", type=str, help=_t("This is the public part of the service private/public key pair. cert_file must be a PEM formatted certificate chain file.")) USER_ATTRIBUTE_MAPPING = Config( key="user_attribute_mapping", default={'uid': ('username', )}, type=dict_list_map, help=_t("A mapping from attributes in the response from the IdP to django user attributes.")) AUTHN_REQUESTS_SIGNED = Config( key="authn_requests_signed", default=False, type=coerce_bool, help=_t("Have Hue initiated authn requests be signed and provide a certificate.")) LOGOUT_REQUESTS_SIGNED = Config( key="logout_requests_signed", default=False, type=coerce_bool, help=_t("Have Hue initiated logout requests be signed and provide a certificate.")) USERNAME_SOURCE = Config( key="username_source", default="attributes", type=str, help=_t("Username can be sourced from 'attributes' or 'nameid'")) LOGOUT_ENABLED = Config( key="logout_enabled", default=True, type=coerce_bool, help=_t("Performs the logout or not.")) NAME_ID_FORMAT = Config( key="name_id_format", default="urn:oasis:names:tc:SAML:2.0:nameid-format:persistent", type=str, help=_t("Request this NameID format from the server")) def config_validator(user): res = [] if USERNAME_SOURCE.get() not in USERNAME_SOURCES: res.append(("libsaml.username_source", _("username_source not configured properly. SAML integration may not work."))) return res
apache-2.0
-4,415,791,575,276,727,000
29.936306
201
0.711756
false
openqt/algorithms
leetcode/python/lc890-find-and-replace-pattern.py
1
1458
# coding=utf-8 import unittest """890. Find and Replace Pattern https://leetcode.com/problems/find-and-replace-pattern/description/ You have a list of `words` and a `pattern`, and you want to know which words in `words` matches the pattern. A word matches the pattern if there exists a permutation of letters `p` so that after replacing every letter `x` in the pattern with `p(x)`, we get the desired word. ( _Recall that a permutation of letters is a bijection from letters to letters: every letter maps to another letter, and no two letters map to the same letter._ ) Return a list of the words in `words` that match the given pattern. You may return the answer in any order. **Example 1:** **Input:** words = ["abc","deq","mee","aqq","dkd","ccc"], pattern = "abb" **Output:** ["mee","aqq"] **Explanation:** "mee" matches the pattern because there is a permutation {a -> m, b -> e, ...}. "ccc" does not match the pattern because {a -> c, b -> c, ...} is not a permutation, since a and b map to the same letter. **Note:** * `1 <= words.length <= 50` * `1 <= pattern.length = words[i].length <= 20` Similar Questions: """ class Solution(object): def findAndReplacePattern(self, words, pattern): """ :type words: List[str] :type pattern: str :rtype: List[str] """ def test(self): pass if __name__ == "__main__": unittest.main()
gpl-3.0
6,195,882,860,703,908,000
22.819672
101
0.630316
false
jamessergeant/pylearn2
pylearn2/expr/preprocessing.py
49
3088
""" Low-level utilities for preprocessing. Should be functions that apply to NumPy arrays, not preprocessor classes (though preprocessor classes should reuse these). """ __author__ = "David Warde-Farley" __copyright__ = "Copyright 2012, Universite de Montreal" __credits__ = ["David Warde-Farley"] __license__ = "3-clause BSD" __email__ = "wardefar@iro" __maintainer__ = "David Warde-Farley" import numpy def global_contrast_normalize(X, scale=1., subtract_mean=True, use_std=False, sqrt_bias=0., min_divisor=1e-8): """ Global contrast normalizes by (optionally) subtracting the mean across features and then normalizes by either the vector norm or the standard deviation (across features, for each example). Parameters ---------- X : ndarray, 2-dimensional Design matrix with examples indexed on the first axis and \ features indexed on the second. scale : float, optional Multiply features by this const. subtract_mean : bool, optional Remove the mean across features/pixels before normalizing. \ Defaults to `True`. use_std : bool, optional Normalize by the per-example standard deviation across features \ instead of the vector norm. Defaults to `False`. sqrt_bias : float, optional Fudge factor added inside the square root. Defaults to 0. min_divisor : float, optional If the divisor for an example is less than this value, \ do not apply it. Defaults to `1e-8`. Returns ------- Xp : ndarray, 2-dimensional The contrast-normalized features. Notes ----- `sqrt_bias` = 10 and `use_std = True` (and defaults for all other parameters) corresponds to the preprocessing used in [1]. References ---------- .. [1] A. Coates, H. Lee and A. Ng. "An Analysis of Single-Layer Networks in Unsupervised Feature Learning". AISTATS 14, 2011. http://www.stanford.edu/~acoates/papers/coatesleeng_aistats_2011.pdf """ assert X.ndim == 2, "X.ndim must be 2" scale = float(scale) assert scale >= min_divisor # Note: this is per-example mean across pixels, not the # per-pixel mean across examples. So it is perfectly fine # to subtract this without worrying about whether the current # object is the train, valid, or test set. mean = X.mean(axis=1) if subtract_mean: X = X - mean[:, numpy.newaxis] # Makes a copy. else: X = X.copy() if use_std: # ddof=1 simulates MATLAB's var() behaviour, which is what Adam # Coates' code does. ddof = 1 # If we don't do this, X.var will return nan. if X.shape[1] == 1: ddof = 0 normalizers = numpy.sqrt(sqrt_bias + X.var(axis=1, ddof=ddof)) / scale else: normalizers = numpy.sqrt(sqrt_bias + (X ** 2).sum(axis=1)) / scale # Don't normalize by anything too small. normalizers[normalizers < min_divisor] = 1. X /= normalizers[:, numpy.newaxis] # Does not make a copy. return X
bsd-3-clause
-3,666,265,793,086,349,300
31.851064
78
0.637306
false
Azure/azure-sdk-for-python
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_04_01/aio/operations/_load_balancer_network_interfaces_operations.py
1
5637
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar import warnings from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest from azure.mgmt.core.exceptions import ARMErrorFormat from ... import models as _models T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] class LoadBalancerNetworkInterfacesOperations: """LoadBalancerNetworkInterfacesOperations async operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.network.v2018_04_01.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer) -> None: self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config def list( self, resource_group_name: str, load_balancer_name: str, **kwargs ) -> AsyncIterable["_models.NetworkInterfaceListResult"]: """Gets associated load balancer network interfaces. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param load_balancer_name: The name of the load balancer. :type load_balancer_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_04_01.models.NetworkInterfaceListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2018-04-01" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request async def extract_data(pipeline_response): deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/networkInterfaces'} # type: ignore
mit
7,452,728,853,334,514,000
47.594828
192
0.649991
false
HERA-Team/hera_mc
alembic/versions/93ff199763ac_update_lib_raid_errors_table.py
2
1114
"""update lib_raid_errors table Revision ID: 93ff199763ac Revises: b1063869f198 Create Date: 2017-07-27 00:13:29.765073+00:00 """ from alembic import op import sqlalchemy as sa from sqlalchemy.schema import Sequence, CreateSequence # revision identifiers, used by Alembic. revision = '93ff199763ac' down_revision = 'b1063869f198' branch_labels = None depends_on = None def upgrade(): op.drop_constraint('lib_raid_errors_pkey', 'lib_raid_errors', type_='primary') op.execute(CreateSequence(Sequence("lib_raid_errors_id_seq"))) op.add_column('lib_raid_errors', sa.Column('id', sa.BigInteger(), nullable=False, server_default=sa.text("nextval('lib_raid_errors_id_seq'::regclass)"))) op.create_primary_key("lib_raid_errors_pkey", "lib_raid_errors", ["id", ]) # ### end Alembic commands ### def downgrade(): op.drop_constraint('lib_raid_errors_pkey', 'lib_raid_errors', type_='primary') op.drop_column('lib_raid_errors', 'id') op.create_primary_key("lib_raid_errors_pkey", "lib_raid_errors", ["time", "hostname", "disk"]) # ### end Alembic commands ###
bsd-2-clause
6,957,662,874,944,768,000
29.108108
98
0.685817
false
alexhenrie/poedit
deps/boost/tools/build/test/implicit_dependency.py
7
1347
#!/usr/bin/python # Copyright (C) Vladimir Prus 2006. # Distributed under the Boost Software License, Version 1.0. (See # accompanying file LICENSE_1_0.txt or copy at # http://www.boost.org/LICENSE_1_0.txt) # Test the <implicit-dependency> is respected even if the target referred to is # not built itself, but only referred to by <implicit-dependency>. import BoostBuild t = BoostBuild.Tester(use_test_config=False) t.write("jamroot.jam", """ make a.h : : gen-header ; explicit a.h ; exe hello : hello.cpp : <implicit-dependency>a.h ; import os ; if [ os.name ] = NT { actions gen-header { echo int i; > $(<) } } else { actions gen-header { echo "int i;" > $(<) } } """) t.write("hello.cpp", """ #include "a.h" int main() { return i; } """) t.run_build_system() t.expect_addition("bin/$toolset/debug*/hello.exe") t.rm("bin") t.write("jamroot.jam", """ make dir/a.h : : gen-header ; explicit dir/a.h ; exe hello : hello.cpp : <implicit-dependency>dir/a.h ; import os ; if [ os.name ] = NT { actions gen-header { echo int i; > $(<) } } else { actions gen-header { echo "int i;" > $(<) } } """) t.write("hello.cpp", """ #include "dir/a.h" int main() { return i; } """) t.run_build_system() t.expect_addition("bin/$toolset/debug*/hello.exe") t.cleanup()
mit
-6,630,603,134,510,737,000
15.62963
79
0.606533
false
gmarkall/numba
numba/cuda/tests/cudadrv/test_events.py
6
1075
import numpy as np from numba import cuda from numba.cuda.testing import unittest, CUDATestCase class TestCudaEvent(CUDATestCase): def test_event_elapsed(self): N = 32 dary = cuda.device_array(N, dtype=np.double) evtstart = cuda.event() evtend = cuda.event() evtstart.record() cuda.to_device(np.arange(N, dtype=np.double), to=dary) evtend.record() evtend.wait() evtend.synchronize() # Exercise the code path evtstart.elapsed_time(evtend) def test_event_elapsed_stream(self): N = 32 stream = cuda.stream() dary = cuda.device_array(N, dtype=np.double) evtstart = cuda.event() evtend = cuda.event() evtstart.record(stream=stream) cuda.to_device(np.arange(N, dtype=np.double), to=dary, stream=stream) evtend.record(stream=stream) evtend.wait(stream=stream) evtend.synchronize() # Exercise the code path evtstart.elapsed_time(evtend) if __name__ == '__main__': unittest.main()
bsd-2-clause
-197,633,809,228,476,770
27.289474
77
0.612093
false
robotican/ric
ric_board/scripts/RiCConfigurator/GUI/Schemes/gazeboGui.py
1
5385
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'gazeboGui.ui' # # Created: Wed Aug 26 11:06:18 2015 # by: PyQt4 UI code generator 4.10.4 # # WARNING! All changes made in this file will be lost! from PyQt4 import QtCore, QtGui try: _fromUtf8 = QtCore.QString.fromUtf8 except AttributeError: def _fromUtf8(s): return s try: _encoding = QtGui.QApplication.UnicodeUTF8 def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig, _encoding) except AttributeError: def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig) class Ui_gazebo_gui(object): def setupUi(self, gazebo_gui): gazebo_gui.setObjectName(_fromUtf8("gazebo_gui")) gazebo_gui.resize(276, 393) gazebo_gui.setMinimumSize(QtCore.QSize(276, 393)) gazebo_gui.setMaximumSize(QtCore.QSize(276, 393)) gazebo_gui.setStyleSheet(_fromUtf8("#gazebo_gui { \n" " background-color: \n" " qlineargradient(spread:pad, x1:1, y1:0.682, x2:0.966825, y2:0, stop:0 \n" " rgba(224, 224, 224, 255), stop:1 rgba(171, 171, 171, 255));\n" "}")) self.verticalLayoutWidget_2 = QtGui.QWidget(gazebo_gui) self.verticalLayoutWidget_2.setGeometry(QtCore.QRect(10, 10, 251, 331)) self.verticalLayoutWidget_2.setObjectName(_fromUtf8("verticalLayoutWidget_2")) self.verticalLayout_2 = QtGui.QVBoxLayout(self.verticalLayoutWidget_2) self.verticalLayout_2.setSpacing(15) self.verticalLayout_2.setMargin(0) self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2")) self.formLayout = QtGui.QFormLayout() self.formLayout.setFieldGrowthPolicy(QtGui.QFormLayout.AllNonFixedFieldsGrow) self.formLayout.setObjectName(_fromUtf8("formLayout")) self.nameSpaceLabel = QtGui.QLabel(self.verticalLayoutWidget_2) self.nameSpaceLabel.setObjectName(_fromUtf8("nameSpaceLabel")) self.formLayout.setWidget(1, QtGui.QFormLayout.LabelRole, self.nameSpaceLabel) self.nameSpaceLineEdit = QtGui.QLineEdit(self.verticalLayoutWidget_2) self.nameSpaceLineEdit.setObjectName(_fromUtf8("nameSpaceLineEdit")) self.formLayout.setWidget(1, QtGui.QFormLayout.FieldRole, self.nameSpaceLineEdit) self.numberOfRobotsLabel = QtGui.QLabel(self.verticalLayoutWidget_2) self.numberOfRobotsLabel.setObjectName(_fromUtf8("numberOfRobotsLabel")) self.formLayout.setWidget(0, QtGui.QFormLayout.LabelRole, self.numberOfRobotsLabel) self.numberOfRobotsSpinBox = QtGui.QSpinBox(self.verticalLayoutWidget_2) self.numberOfRobotsSpinBox.setMinimum(1) self.numberOfRobotsSpinBox.setMaximum(10) self.numberOfRobotsSpinBox.setObjectName(_fromUtf8("numberOfRobotsSpinBox")) self.formLayout.setWidget(0, QtGui.QFormLayout.FieldRole, self.numberOfRobotsSpinBox) self.verticalLayout_2.addLayout(self.formLayout) self.robotComboBox = QtGui.QComboBox(self.verticalLayoutWidget_2) self.robotComboBox.setObjectName(_fromUtf8("robotComboBox")) self.robotComboBox.addItem(_fromUtf8("")) self.robotComboBox.addItem(_fromUtf8("")) self.robotComboBox.addItem(_fromUtf8("")) self.verticalLayout_2.addWidget(self.robotComboBox) self.label = QtGui.QLabel(self.verticalLayoutWidget_2) self.label.setAlignment(QtCore.Qt.AlignCenter) self.label.setObjectName(_fromUtf8("label")) self.verticalLayout_2.addWidget(self.label) self.devList = QtGui.QListWidget(self.verticalLayoutWidget_2) self.devList.setFrameShape(QtGui.QFrame.StyledPanel) self.devList.setFrameShadow(QtGui.QFrame.Sunken) self.devList.setObjectName(_fromUtf8("devList")) self.verticalLayout_2.addWidget(self.devList) self.launchButton = QtGui.QPushButton(gazebo_gui) self.launchButton.setGeometry(QtCore.QRect(140, 350, 121, 27)) self.launchButton.setObjectName(_fromUtf8("launchButton")) self.loadButton = QtGui.QPushButton(gazebo_gui) self.loadButton.setGeometry(QtCore.QRect(10, 350, 111, 27)) self.loadButton.setObjectName(_fromUtf8("loadButton")) self.retranslateUi(gazebo_gui) QtCore.QMetaObject.connectSlotsByName(gazebo_gui) def retranslateUi(self, gazebo_gui): gazebo_gui.setWindowTitle(_translate("gazebo_gui", "gazebo build", None)) self.nameSpaceLabel.setText(_translate("gazebo_gui", "Name Space:", None)) self.numberOfRobotsLabel.setText(_translate("gazebo_gui", "Number of robots:", None)) self.robotComboBox.setToolTip(_translate("gazebo_gui", "<html><head/><body><p>Select robot </p></body></html>", None)) self.robotComboBox.setItemText(0, _translate("gazebo_gui", "Select robot", None)) self.robotComboBox.setItemText(1, _translate("gazebo_gui", "Komodo", None)) self.robotComboBox.setItemText(2, _translate("gazebo_gui", "Lizi", None)) self.label.setText(_translate("gazebo_gui", "<html><head/><body><p><span style=\" font-size:14pt;\">Robot details</span></p></body></html>", None)) self.launchButton.setText(_translate("gazebo_gui", "launch gazebo", None)) self.loadButton.setText(_translate("gazebo_gui", "Load new file", None))
bsd-3-clause
-1,613,703,236,466,104,000
53.94898
155
0.708449
false
cloudtools/awacs
awacs/worklink.py
1
2663
# Copyright (c) 2012-2021, Mark Peek <[email protected]> # All rights reserved. # # See LICENSE file for full license. from .aws import Action as BaseAction from .aws import BaseARN service_name = "Amazon WorkLink" prefix = "worklink" class Action(BaseAction): def __init__(self, action: str = None) -> None: super().__init__(prefix, action) class ARN(BaseARN): def __init__(self, resource: str = "", region: str = "", account: str = "") -> None: super().__init__( service=prefix, resource=resource, region=region, account=account ) AssociateDomain = Action("AssociateDomain") AssociateWebsiteAuthorizationProvider = Action("AssociateWebsiteAuthorizationProvider") AssociateWebsiteCertificateAuthority = Action("AssociateWebsiteCertificateAuthority") CreateFleet = Action("CreateFleet") DeleteFleet = Action("DeleteFleet") DescribeAuditStreamConfiguration = Action("DescribeAuditStreamConfiguration") DescribeCompanyNetworkConfiguration = Action("DescribeCompanyNetworkConfiguration") DescribeDevice = Action("DescribeDevice") DescribeDevicePolicyConfiguration = Action("DescribeDevicePolicyConfiguration") DescribeDomain = Action("DescribeDomain") DescribeFleetMetadata = Action("DescribeFleetMetadata") DescribeIdentityProviderConfiguration = Action("DescribeIdentityProviderConfiguration") DescribeWebsiteCertificateAuthority = Action("DescribeWebsiteCertificateAuthority") DisassociateDomain = Action("DisassociateDomain") DisassociateWebsiteAuthorizationProvider = Action( "DisassociateWebsiteAuthorizationProvider" ) DisassociateWebsiteCertificateAuthority = Action( "DisassociateWebsiteCertificateAuthority" ) ListDevices = Action("ListDevices") ListDomains = Action("ListDomains") ListFleets = Action("ListFleets") ListTagsForResource = Action("ListTagsForResource") ListWebsiteAuthorizationProviders = Action("ListWebsiteAuthorizationProviders") ListWebsiteCertificateAuthorities = Action("ListWebsiteCertificateAuthorities") RestoreDomainAccess = Action("RestoreDomainAccess") RevokeDomainAccess = Action("RevokeDomainAccess") SearchEntity = Action("SearchEntity") SignOutUser = Action("SignOutUser") TagResource = Action("TagResource") UntagResource = Action("UntagResource") UpdateAuditStreamConfiguration = Action("UpdateAuditStreamConfiguration") UpdateCompanyNetworkConfiguration = Action("UpdateCompanyNetworkConfiguration") UpdateDevicePolicyConfiguration = Action("UpdateDevicePolicyConfiguration") UpdateDomainMetadata = Action("UpdateDomainMetadata") UpdateFleetMetadata = Action("UpdateFleetMetadata") UpdateIdentityProviderConfiguration = Action("UpdateIdentityProviderConfiguration")
bsd-2-clause
8,752,525,424,813,745,000
41.951613
88
0.812617
false
ktosiek/spacewalk
spacecmd/src/lib/user.py
2
17881
# # Licensed under the GNU General Public License Version 3 # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # Copyright 2013 Aron Parsons <[email protected]> # Copyright (c) 2013 Red Hat, Inc. # # NOTE: the 'self' variable is an instance of SpacewalkShell import shlex from getpass import getpass from optparse import Option from spacecmd.utils import * def help_user_create(self): print 'user_create: Create an user' print '''usage: user_create [options] options: -u USERNAME -f FIRST_NAME -l LAST_NAME -e EMAIL -p PASSWORD --pam enable PAM authentication''' def do_user_create(self, args): options = [ Option('-u', '--username', action='store'), Option('-f', '--first-name', action='store'), Option('-l', '--last-name', action='store'), Option('-e', '--email', action='store'), Option('-p', '--password', action='store'), Option('', '--pam', action='store_true') ] (args, options) = parse_arguments(args, options) if is_interactive(options): options.username = prompt_user('Username:', noblank = True) options.first_name = prompt_user('First Name:', noblank = True) options.last_name = prompt_user('Last Name:', noblank = True) options.email = prompt_user('Email:', noblank = True) options.pam = self.user_confirm('PAM Authentication [y/N]:', nospacer = True, integer = True, ignore_yes = True) options.password = '' while options.password == '': password1 = getpass('Password: ') password2 = getpass('Repeat Password: ') if password1 == password2: options.password = password1 elif password1 == '': logging.warning('Password must be at least 5 characters') else: logging.warning("Passwords don't match") else: if not options.username: logging.error('A username is required') return if not options.first_name: logging.error('A first name is required') return if not options.last_name: logging.error('A last name is required') return if not options.email: logging.error('An email address is required') return if not options.password and not options.pam: logging.error('A password is required') return if options.pam: options.pam = 1 # API requires a non-None password even though it's not used # when PAM is enabled if options.password: logging.warning("Note password field is ignored for PAM mode") options.password="" else: options.pam = 0 self.client.user.create(self.session, options.username, options.password, options.first_name, options.last_name, options.email, options.pam) #################### def help_user_delete(self): print 'user_delete: Delete an user' print 'usage: user_delete NAME' def complete_user_delete(self, text, line, beg, end): return tab_completer(self.do_user_list('', True), text) def do_user_delete(self, args): (args, options) = parse_arguments(args) if len(args) != 1: self.help_user_delete() return name = args[0] if self.user_confirm('Delete this user [y/N]:'): self.client.user.delete(self.session, name) #################### def help_user_disable(self): print 'user_disable: Disable an user account' print 'usage: user_disable NAME' def complete_user_disable(self, text, line, beg, end): return tab_completer(self.do_user_list('', True), text) def do_user_disable(self, args): (args, options) = parse_arguments(args) if len(args) != 1: self.help_user_disable() return name = args[0] self.client.user.disable(self.session, name) #################### def help_user_enable(self): print 'user_enable: Enable an user account' print 'usage: user_enable NAME' def complete_user_enable(self, text, line, beg, end): return tab_completer(self.do_user_list('', True), text) def do_user_enable(self, args): (args, options) = parse_arguments(args) if len(args) != 1: self.help_user_enable() return name = args[0] self.client.user.enable(self.session, name) #################### def help_user_list(self): print 'user_list: List all users' print 'usage: user_list' def do_user_list(self, args, doreturn = False): users = self.client.user.listUsers(self.session) users = [u.get('login') for u in users] if doreturn: return users else: if len(users): print '\n'.join(sorted(users)) #################### def help_user_listavailableroles(self): print 'user_list: List all available roles for users' print 'usage: user_listavailableroles' def do_user_listavailableroles(self, args, doreturn = False): roles = self.client.user.listAssignableRoles(self.session) if doreturn: return roles else: if len(roles): print '\n'.join(sorted(roles)) #################### def help_user_addrole(self): print 'user_addrole: Add a role to an user account' print 'usage: user_addrole USER ROLE' def complete_user_addrole(self, text, line, beg, end): parts = line.split(' ') if len(parts) == 2: return tab_completer(self.do_user_list('', True), text) elif len(parts) == 3: return tab_completer(self.do_user_listavailableroles('', True), text) def do_user_addrole(self, args): (args, options) = parse_arguments(args) if len(args) != 2: self.help_user_addrole() return user = args[0] role = args[1] self.client.user.addRole(self.session, user, role) #################### def help_user_removerole(self): print 'user_removerole: Remove a role from an user account' print 'usage: user_removerole USER ROLE' def complete_user_removerole(self, text, line, beg, end): parts = line.split(' ') if len(parts) == 2: return tab_completer(self.do_user_list('', True), text) elif len(parts) == 3: # only list the roles currently assigned to this user roles = self.client.user.listRoles(self.session, parts[1]) return tab_completer(roles, text) def do_user_removerole(self, args): (args, options) = parse_arguments(args) if len(args) != 2: self.help_user_removerole() return user = args[0] role = args[1] self.client.user.removeRole(self.session, user, role) #################### def help_user_details(self): print 'user_details: Show the details of an user' print 'usage: user_details USER ...' def complete_user_details(self, text, line, beg, end): return tab_completer(self.do_user_list('', True), text) def do_user_details(self, args): (args, options) = parse_arguments(args) if not len(args): self.help_user_details() return add_separator = False for user in args: try: details = self.client.user.getDetails(self.session, user) roles = self.client.user.listRoles(self.session, user) groups = \ self.client.user.listAssignedSystemGroups(self.session, user) default_groups = \ self.client.user.listDefaultSystemGroups(self.session, user) except: logging.warning('%s is not a valid user' % user) continue org_details = self.client.org.getDetails(self.session, details.get('org_id')) organization = org_details.get('name') if add_separator: print self.SEPARATOR add_separator = True print 'Username: %s' % user print 'First Name: %s' % details.get('first_name') print 'Last Name: %s' % details.get('last_name') print 'Email Address: %s' % details.get('email') print 'Organization: %s' % organization print 'Last Login: %s' % details.get('last_login_date') print 'Created: %s' % details.get('created_date') print 'Enabled: %s' % details.get('enabled') if len(roles): print print 'Roles' print '-----' print '\n'.join(sorted(roles)) if len(groups): print print 'Assigned Groups' print '---------------' print '\n'.join(sorted([g.get('name') for g in groups])) if len(default_groups): print print 'Default Groups' print '--------------' print '\n'.join(sorted([g.get('name') for g in default_groups])) #################### def help_user_addgroup(self): print 'user_addgroup: Add a group to an user account' print 'usage: user_addgroup USER <GROUP ...>' def complete_user_addgroup(self, text, line, beg, end): parts = shlex.split(line) if line[-1] == ' ': parts.append('') if len(parts) == 2: return tab_completer(self.do_user_list('', True), text) elif len(parts) > 2: return tab_completer(self.do_group_list('', True), parts[-1]) def do_user_addgroup(self, args): (args, options) = parse_arguments(args) if len(args) != 2: self.help_user_addgroup() return user = args.pop(0) groups = args self.client.user.addAssignedSystemGroups(self.session, user, groups, False) #################### def help_user_adddefaultgroup(self): print 'user_adddefaultgroup: Add a default group to an user account' print 'usage: user_adddefaultgroup USER <GROUP ...>' def complete_user_adddefaultgroup(self, text, line, beg, end): parts = shlex.split(line) if line[-1] == ' ': parts.append('') if len(parts) == 2: return tab_completer(self.do_user_list('', True), text) elif len(parts) > 2: return tab_completer(self.do_group_list('', True), parts[-1]) def do_user_adddefaultgroup(self, args): (args, options) = parse_arguments(args) if len(args) != 2: self.help_user_adddefaultgroup() return user = args.pop(0) groups = args self.client.user.addDefaultSystemGroups(self.session, user, groups) #################### def help_user_removegroup(self): print 'user_removegroup: Remove a group to an user account' print 'usage: user_removegroup USER <GROUP ...>' def complete_user_removegroup(self, text, line, beg, end): parts = shlex.split(line) if line[-1] == ' ': parts.append('') if len(parts) == 2: return tab_completer(self.do_user_list('', True), text) elif len(parts) > 2: # only list the groups currently assigned to this user groups = self.client.user.listAssignedSystemGroups(self.session, parts[1]) return tab_completer([ g.get('name') for g in groups ], parts[-1]) def do_user_removegroup(self, args): (args, options) = parse_arguments(args) if len(args) != 2: self.help_user_removegroup() return user = args.pop(0) groups = args self.client.user.removeAssignedSystemGroups(self.session, user, groups, True) #################### def help_user_removedefaultgroup(self): print 'user_removedefaultgroup: Remove a default group from an ' + \ 'user account' print 'usage: user_removedefaultgroup USER <GROUP ...>' def complete_user_removedefaultgroup(self, text, line, beg, end): parts = shlex.split(line) if line[-1] == ' ': parts.append('') if len(parts) == 2: return tab_completer(self.do_user_list('', True), text) elif len(parts) > 2: # only list the groups currently assigned to this user groups = self.client.user.listDefaultSystemGroups(self.session, parts[1]) return tab_completer([ g.get('name') for g in groups ], parts[-1]) def do_user_removedefaultgroup(self, args): (args, options) = parse_arguments(args) if len(args) != 2: self.help_user_removedefaultgroup() return user = args.pop(0) groups = args self.client.user.removeDefaultSystemGroups(self.session, user, groups) #################### def help_user_setfirstname(self): print 'user_setfirstname: Set an user accounts first name field' print 'usage: user_setfirstname USER FIRST_NAME' def complete_user_setfirstname(self, text, line, beg, end): parts = shlex.split(line) if line[-1] == ' ': parts.append(' ') if len(parts) == 2: return tab_completer(self.do_user_list('', True), text) elif len(parts) > 2: return def do_user_setfirstname(self, args): (args, options) = parse_arguments(args) if len(args) != 2: self.help_user_setfirstname() return user = args.pop(0) details = { 'first_name' : args.pop(0) } self.client.user.setDetails(self.session, user, details) #################### def help_user_setlastname(self): print 'user_setlastname: Set an user accounts last name field' print 'usage: user_setlastname USER LAST_NAME' def complete_user_setlastname(self, text, line, beg, end): parts = shlex.split(line) if line[-1] == ' ': parts.append(' ') if len(parts) == 2: return tab_completer(self.do_user_list('', True), text) elif len(parts) > 2: return def do_user_setlastname(self, args): (args, options) = parse_arguments(args) if len(args) != 2: self.help_user_setlastname() return user = args.pop(0) details = { 'last_name' : args.pop(0) } self.client.user.setDetails(self.session, user, details) #################### def help_user_setemail(self): print 'user_setemail: Set an user accounts email field' print 'usage: user_setemail USER EMAIL' def complete_user_setemail(self, text, line, beg, end): parts = shlex.split(line) if line[-1] == ' ': parts.append(' ') if len(parts) == 2: return tab_completer(self.do_user_list('', True), text) elif len(parts) > 2: return def do_user_setemail(self, args): (args, options) = parse_arguments(args) if len(args) != 2: self.help_user_setemail() return user = args.pop(0) details = { 'email' : args.pop(0) } self.client.user.setDetails(self.session, user, details) #################### def help_user_setprefix(self): print 'user_setprefix: Set an user accounts name prefix field' print 'usage: user_setprefix USER PREFIX' def complete_user_setprefix(self, text, line, beg, end): parts = shlex.split(line) if line[-1] == ' ': parts.append(' ') if len(parts) == 2: return tab_completer(self.do_user_list('', True), text) elif len(parts) > 2: return def do_user_setprefix(self, args): (args, options) = parse_arguments(args) if len(args) > 2: self.help_user_setprefix() return user = args.pop(0) if len(args) == 0: # clearing prefix with a space currently does not work # spacewalk requires a space to clear the prefix but the # space seems to be stripped when submitted to the API gateway # attempts to use %x20 and \u0020 (among others) also fail details = { 'prefix' : ' ' } else: details = { 'prefix' : args.pop(0) } self.client.user.setDetails(self.session, user, details) #################### def help_user_setpassword(self): print 'user_setpassword: Set an user accounts name prefix field' print 'usage: user_setpassword USER PASSWORD' def complete_user_setpassword(self, text, line, beg, end): parts = shlex.split(line) if line[-1] == ' ': parts.append(' ') if len(parts) == 2: return tab_completer(self.do_user_list('', True), text) elif len(parts) > 2: return def do_user_setpassword(self, args): (args, options) = parse_arguments(args) if len(args) != 2: self.help_user_setpassword() return user = args.pop(0) details = { 'password' : args.pop(0) } self.client.user.setDetails(self.session, user, details) # vim:ts=4:expandtab:
gpl-2.0
-3,437,310,527,126,000,000
29.052101
79
0.573458
false
nikitos/npui
netprofile/netprofile/export/pdf.py
2
7572
#!/usr/bin/env python # -*- coding: utf-8; tab-width: 4; indent-tabs-mode: t -*- # # NetProfile: Data export support for PDF files # © Copyright 2015 Alex 'Unik' Unigovsky # # This file is part of NetProfile. # NetProfile is free software: you can redistribute it and/or # modify it under the terms of the GNU Affero General Public # License as published by the Free Software Foundation, either # version 3 of the License, or (at your option) any later # version. # # NetProfile is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General # Public License along with NetProfile. If not, see # <http://www.gnu.org/licenses/>. from __future__ import ( unicode_literals, print_function, absolute_import, division ) import datetime import io import urllib from netprofile import PY3 from netprofile.ext.columns import PseudoColumn from netprofile.export import ExportFormat from netprofile.pdf import ( DefaultDocTemplate, PAGE_ORIENTATIONS, PAGE_SIZES, TABLE_STYLE_DEFAULT ) from pyramid.i18n import ( TranslationStringFactory, get_localizer ) from pyramid.response import Response from reportlab.platypus import ( Paragraph, LongTable ) from reportlab.lib.units import ( cm, inch ) from babel.dates import format_datetime _ = TranslationStringFactory('netprofile') class PDFExportFormat(ExportFormat): """ Export data as PDF files. """ @property def name(self): return _('PDF') @property def icon(self): return 'ico-pdf' def enabled(self, req): if req.pdf_styles is None: return False return True def options(self, req, name): loc = get_localizer(req) return ({ 'name' : 'pdf_pagesz', 'fieldLabel' : loc.translate(_('Page size')), 'xtype' : 'combobox', 'displayField' : 'value', 'valueField' : 'id', 'format' : 'string', 'queryMode' : 'local', 'grow' : True, 'shrinkWrap' : True, 'value' : 'a4', 'allowBlank' : False, 'forceSelection' : True, 'editable' : False, 'store' : { 'xtype' : 'simplestore', 'sorters' : [{ 'property' : 'value', 'direction' : 'ASC' }], 'fields' : ('id', 'value'), 'data' : tuple({ 'id' : k, 'value' : v[0] } for k, v in PAGE_SIZES.items()) } }, { 'name' : 'pdf_orient', 'fieldLabel' : loc.translate(_('Orientation')), 'xtype' : 'combobox', 'displayField' : 'value', 'valueField' : 'id', 'format' : 'string', 'queryMode' : 'local', 'grow' : True, 'shrinkWrap' : True, 'value' : 'portrait', 'allowBlank' : False, 'forceSelection' : True, 'editable' : False, 'store' : { 'xtype' : 'simplestore', 'sorters' : [{ 'property' : 'value', 'direction' : 'ASC' }], 'fields' : ('id', 'value'), 'data' : tuple({ 'id' : k, 'value' : loc.translate(v[0]) } for k, v in PAGE_ORIENTATIONS.items()) } }, { 'name' : 'pdf_hmargins', 'fieldLabel' : loc.translate(_('Horizontal Margins')), 'xtype' : 'numberfield', 'allowBlank' : False, 'autoStripChars' : True, 'minValue' : 0.0, 'step' : 0.2, 'value' : 1.8 }, { 'name' : 'pdf_vmargins', 'fieldLabel' : loc.translate(_('Vertical Margins')), 'xtype' : 'numberfield', 'allowBlank' : False, 'autoStripChars' : True, 'minValue' : 0.0, 'step' : 0.2, 'value' : 2.0 }) def export(self, extm, params, req): pdf_pagesz = params.pop('pdf_pagesz', 'a4') pdf_orient = params.pop('pdf_orient', 'portrait') try: pdf_hmargins = float(params.pop('pdf_hmargins', 1.8)) except ValueError: pdf_hmargins = 1.8 try: pdf_vmargins = float(params.pop('pdf_vmargins', 2.0)) except ValueError: pdf_vmargins = 2.0 fields = [] flddef = [] col_widths = [] col_flexes = [] total_width = 0 total_flex = 0 for field in extm.export_view: if isinstance(field, PseudoColumn): fld = field field = fld.name else: fld = extm.get_column(field) fields.append(field) flddef.append(fld) width = fld.column_width flex = fld.column_flex if not width: width = fld.pixels if not width: width = 200 width = width / 200 * inch col_widths.append(width) if flex: col_flexes.append(flex) total_flex += flex else: col_flexes.append(None) total_width += width if pdf_pagesz not in PAGE_SIZES: raise ValueError('Unknown page size specified') if pdf_orient not in ('portrait', 'landscape'): raise ValueError('Unknown page orientation specified') res = Response() loc = get_localizer(req) now = datetime.datetime.now() res.last_modified = now res.content_type = 'application/pdf' res.cache_control.no_cache = True res.cache_control.no_store = True res.cache_control.private = True res.cache_control.must_revalidate = True res.headerlist.append(('X-Frame-Options', 'SAMEORIGIN')) if PY3: res.content_disposition = \ 'attachment; filename*=UTF-8\'\'%s-%s.pdf' % ( urllib.parse.quote(loc.translate(extm.menu_name), ''), now.date().isoformat() ) else: res.content_disposition = \ 'attachment; filename*=UTF-8\'\'%s-%s.pdf' % ( urllib.quote(loc.translate(extm.menu_name).encode(), ''), now.date().isoformat() ) for prop in ('__page', '__start', '__limit'): if prop in params: del params[prop] data = extm.read(params, req)['records'] doc = DefaultDocTemplate( res, request=req, pagesize=pdf_pagesz, orientation=pdf_orient, topMargin=pdf_vmargins * cm, leftMargin=pdf_hmargins * cm, rightMargin=pdf_hmargins * cm, bottomMargin=pdf_vmargins * cm, title=loc.translate(_('{0}, exported at {1}')).format( loc.translate(extm.menu_name), format_datetime(now, locale=req.current_locale) ) ) total_width = doc.width - total_width - 12 if total_flex > 0: width_per_flex = total_width / total_flex else: width_per_flex = 0.0 table_widths = [] for idx, field in enumerate(fields): if col_flexes[idx]: table_widths.append(col_flexes[idx] * width_per_flex) else: table_widths.append(col_widths[idx]) ss = req.pdf_styles if ss is None: raise RuntimeError('PDF subsystem is not configured. See application .INI files.') # TODO: add custom extmodel option to specify rowHeights, as an # optimization measure. Otherwise reportlab takes +Inf time on huge # tables. # Crude hack: rowHeights=([0.5 * inch] * (len(data) + 1) table = LongTable( tuple(storyteller(data, fields, flddef, localizer=loc, model=extm, styles=ss)), colWidths=table_widths, repeatRows=1 ) table.setStyle(TABLE_STYLE_DEFAULT) story = [table] doc.build(story) return res def storyteller(data, fields, flddef, localizer=None, model=None, styles=None, write_header=True): if model and localizer and write_header: yield tuple( Paragraph(localizer.translate(flddef[idx].header_string), styles['table_header']) for idx, field in enumerate(fields) ) for row in data: pdfrow = [] for field in fields: if (field not in row) or (row[field] is None): pdfrow.append('') continue pdfrow.append(Paragraph(str(row[field]), styles['body'])) yield pdfrow
agpl-3.0
8,866,479,920,615,688,000
26.732601
104
0.629639
false
vijayendrabvs/ssl-neutron
neutron/tests/unit/brocade/test_brocade_plugin.py
38
2433
# Copyright (c) 2012 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from neutron.extensions import portbindings from neutron.openstack.common import importutils from neutron.plugins.brocade import NeutronPlugin as brocade_plugin from neutron.tests.unit import _test_extension_portbindings as test_bindings from neutron.tests.unit import test_db_plugin as test_plugin PLUGIN_NAME = ('neutron.plugins.brocade.' 'NeutronPlugin.BrocadePluginV2') NOS_DRIVER = ('neutron.plugins.brocade.' 'nos.fake_nosdriver.NOSdriver') FAKE_IPADDRESS = '2.2.2.2' FAKE_USERNAME = 'user' FAKE_PASSWORD = 'password' FAKE_PHYSICAL_INTERFACE = 'em1' class BrocadePluginV2TestCase(test_plugin.NeutronDbPluginV2TestCase): _plugin_name = PLUGIN_NAME def setUp(self): def mocked_brocade_init(self): self._switch = {'address': FAKE_IPADDRESS, 'username': FAKE_USERNAME, 'password': FAKE_PASSWORD } self._driver = importutils.import_object(NOS_DRIVER) with mock.patch.object(brocade_plugin.BrocadePluginV2, 'brocade_init', new=mocked_brocade_init): super(BrocadePluginV2TestCase, self).setUp(self._plugin_name) class TestBrocadeBasicGet(test_plugin.TestBasicGet, BrocadePluginV2TestCase): pass class TestBrocadeV2HTTPResponse(test_plugin.TestV2HTTPResponse, BrocadePluginV2TestCase): pass class TestBrocadePortsV2(test_plugin.TestPortsV2, BrocadePluginV2TestCase, test_bindings.PortBindingsTestCase): VIF_TYPE = portbindings.VIF_TYPE_BRIDGE HAS_PORT_FILTER = True class TestBrocadeNetworksV2(test_plugin.TestNetworksV2, BrocadePluginV2TestCase): pass
apache-2.0
5,784,719,034,305,688,000
31.878378
76
0.677764
false
Nindaleth/ansible-modules-core
cloud/openstack/os_user.py
2
6966
#!/usr/bin/python # Copyright (c) 2015 Hewlett-Packard Development Company, L.P. # # This module is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this software. If not, see <http://www.gnu.org/licenses/>. try: import shade HAS_SHADE = True except ImportError: HAS_SHADE = False DOCUMENTATION = ''' --- module: os_user short_description: Manage OpenStack Identity Users extends_documentation_fragment: openstack version_added: "2.0" description: - Manage OpenStack Identity users. Users can be created, updated or deleted using this module. A user will be updated if I(name) matches an existing user and I(state) is present. The value for I(name) cannot be updated without deleting and re-creating the user. options: name: description: - Username for the user required: true password: description: - Password for the user required: true when I(state) is present default: None email: description: - Email address for the user required: false default: None default_project: description: - Project name or ID that the user should be associated with by default required: false default: None domain: description: - Domain to create the user in if the cloud supports domains required: false default: None enabled: description: - Is the user enabled required: false default: True state: description: - Should the resource be present or absent. choices: [present, absent] default: present requirements: - "python >= 2.6" - "shade" ''' EXAMPLES = ''' # Create a user - os_user: cloud: mycloud state: present name: demouser password: secret email: [email protected] domain: default default_project: demo # Delete a user - os_user: cloud: mycloud state: absent name: demouser ''' RETURN = ''' user: description: Dictionary describing the user. returned: On success when I(state) is 'present' type: dictionary contains: default_project_id: description: User default project ID. Only present with Keystone >= v3. type: string sample: "4427115787be45f08f0ec22a03bfc735" domain_id: description: User domain ID. Only present with Keystone >= v3. type: string sample: "default" email: description: User email address type: string sample: "[email protected]" id: description: User ID type: string sample: "f59382db809c43139982ca4189404650" name: description: User name type: string sample: "demouser" ''' def _needs_update(module, user): keys = ('email', 'default_project', 'domain', 'enabled') for key in keys: if module.params[key] is not None and module.params[key] != user.get(key): return True # We don't get password back in the user object, so assume any supplied # password is a change. if module.params['password'] is not None: return True return False def main(): argument_spec = openstack_full_argument_spec( name=dict(required=True), password=dict(required=False, default=None), email=dict(required=False, default=None), default_project=dict(required=False, default=None), domain=dict(required=False, default=None), enabled=dict(default=True, type='bool'), state=dict(default='present', choices=['absent', 'present']), ) module_kwargs = openstack_module_kwargs() module = AnsibleModule( argument_spec, required_if=[ ('state', 'present', ['password']) ], **module_kwargs) if not HAS_SHADE: module.fail_json(msg='shade is required for this module') name = module.params['name'] password = module.params['password'] email = module.params['email'] default_project = module.params['default_project'] domain = module.params['domain'] enabled = module.params['enabled'] state = module.params['state'] try: cloud = shade.openstack_cloud(**module.params) user = cloud.get_user(name) project_id = None if default_project: project = cloud.get_project(default_project) if not project: module.fail_json(msg='Default project %s is not valid' % default_project) project_id = project['id'] if domain: opcloud = shade.operator_cloud(**module.params) try: # We assume admin is passing domain id dom = opcloud.get_domain(domain)['id'] domain = dom except: # If we fail, maybe admin is passing a domain name. # Note that domains have unique names, just like id. try: dom = opcloud.search_domains(filters={'name': domain})[0]['id'] domain = dom except: # Ok, let's hope the user is non-admin and passing a sane id pass if state == 'present': if user is None: user = cloud.create_user( name=name, password=password, email=email, default_project=default_project, domain_id=domain, enabled=enabled) changed = True else: if _needs_update(module, user): user = cloud.update_user( user['id'], password=password, email=email, default_project=project_id, domain_id=domain, enabled=enabled) changed = True else: changed = False module.exit_json(changed=changed, user=user) elif state == 'absent': if user is None: changed=False else: cloud.delete_user(user['id']) changed=True module.exit_json(changed=changed) except shade.OpenStackCloudException as e: module.fail_json(msg=str(e), extra_data=e.extra_data) from ansible.module_utils.basic import * from ansible.module_utils.openstack import * if __name__ == '__main__': main()
gpl-3.0
7,634,729,258,371,155,000
29.552632
89
0.597186
false
CapOM/ChromiumGStreamerBackend
tools/telemetry/third_party/webpagereplay/third_party/dns/rdtypes/sigbase.py
235
6349
# Copyright (C) 2004-2007, 2009, 2010 Nominum, Inc. # # Permission to use, copy, modify, and distribute this software and its # documentation for any purpose with or without fee is hereby granted, # provided that the above copyright notice and this permission notice # appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. import calendar import struct import time import dns.dnssec import dns.exception import dns.rdata import dns.rdatatype class BadSigTime(dns.exception.DNSException): """Raised when a SIG or RRSIG RR's time cannot be parsed.""" pass def sigtime_to_posixtime(what): if len(what) != 14: raise BadSigTime year = int(what[0:4]) month = int(what[4:6]) day = int(what[6:8]) hour = int(what[8:10]) minute = int(what[10:12]) second = int(what[12:14]) return calendar.timegm((year, month, day, hour, minute, second, 0, 0, 0)) def posixtime_to_sigtime(what): return time.strftime('%Y%m%d%H%M%S', time.gmtime(what)) class SIGBase(dns.rdata.Rdata): """SIG-like record base @ivar type_covered: the rdata type this signature covers @type type_covered: int @ivar algorithm: the algorithm used for the sig @type algorithm: int @ivar labels: number of labels @type labels: int @ivar original_ttl: the original TTL @type original_ttl: long @ivar expiration: signature expiration time @type expiration: long @ivar inception: signature inception time @type inception: long @ivar key_tag: the key tag @type key_tag: int @ivar signer: the signer @type signer: dns.name.Name object @ivar signature: the signature @type signature: string""" __slots__ = ['type_covered', 'algorithm', 'labels', 'original_ttl', 'expiration', 'inception', 'key_tag', 'signer', 'signature'] def __init__(self, rdclass, rdtype, type_covered, algorithm, labels, original_ttl, expiration, inception, key_tag, signer, signature): super(SIGBase, self).__init__(rdclass, rdtype) self.type_covered = type_covered self.algorithm = algorithm self.labels = labels self.original_ttl = original_ttl self.expiration = expiration self.inception = inception self.key_tag = key_tag self.signer = signer self.signature = signature def covers(self): return self.type_covered def to_text(self, origin=None, relativize=True, **kw): return '%s %d %d %d %s %s %d %s %s' % ( dns.rdatatype.to_text(self.type_covered), self.algorithm, self.labels, self.original_ttl, posixtime_to_sigtime(self.expiration), posixtime_to_sigtime(self.inception), self.key_tag, self.signer, dns.rdata._base64ify(self.signature) ) def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True): type_covered = dns.rdatatype.from_text(tok.get_string()) algorithm = dns.dnssec.algorithm_from_text(tok.get_string()) labels = tok.get_int() original_ttl = tok.get_ttl() expiration = sigtime_to_posixtime(tok.get_string()) inception = sigtime_to_posixtime(tok.get_string()) key_tag = tok.get_int() signer = tok.get_name() signer = signer.choose_relativity(origin, relativize) chunks = [] while 1: t = tok.get().unescape() if t.is_eol_or_eof(): break if not t.is_identifier(): raise dns.exception.SyntaxError chunks.append(t.value) b64 = ''.join(chunks) signature = b64.decode('base64_codec') return cls(rdclass, rdtype, type_covered, algorithm, labels, original_ttl, expiration, inception, key_tag, signer, signature) from_text = classmethod(from_text) def to_wire(self, file, compress = None, origin = None): header = struct.pack('!HBBIIIH', self.type_covered, self.algorithm, self.labels, self.original_ttl, self.expiration, self.inception, self.key_tag) file.write(header) self.signer.to_wire(file, None, origin) file.write(self.signature) def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None): header = struct.unpack('!HBBIIIH', wire[current : current + 18]) current += 18 rdlen -= 18 (signer, cused) = dns.name.from_wire(wire[: current + rdlen], current) current += cused rdlen -= cused if not origin is None: signer = signer.relativize(origin) signature = wire[current : current + rdlen] return cls(rdclass, rdtype, header[0], header[1], header[2], header[3], header[4], header[5], header[6], signer, signature) from_wire = classmethod(from_wire) def choose_relativity(self, origin = None, relativize = True): self.signer = self.signer.choose_relativity(origin, relativize) def _cmp(self, other): hs = struct.pack('!HBBIIIH', self.type_covered, self.algorithm, self.labels, self.original_ttl, self.expiration, self.inception, self.key_tag) ho = struct.pack('!HBBIIIH', other.type_covered, other.algorithm, other.labels, other.original_ttl, other.expiration, other.inception, other.key_tag) v = cmp(hs, ho) if v == 0: v = cmp(self.signer, other.signer) if v == 0: v = cmp(self.signature, other.signature) return v
bsd-3-clause
5,038,593,685,260,407,000
36.791667
79
0.603087
false
marcosmodesto/django-testapp
django/contrib/gis/geos/coordseq.py
411
5396
""" This module houses the GEOSCoordSeq object, which is used internally by GEOSGeometry to house the actual coordinates of the Point, LineString, and LinearRing geometries. """ from ctypes import c_double, c_uint, byref from django.contrib.gis.geos.base import GEOSBase, numpy from django.contrib.gis.geos.error import GEOSException, GEOSIndexError from django.contrib.gis.geos.libgeos import CS_PTR from django.contrib.gis.geos import prototypes as capi class GEOSCoordSeq(GEOSBase): "The internal representation of a list of coordinates inside a Geometry." ptr_type = CS_PTR #### Python 'magic' routines #### def __init__(self, ptr, z=False): "Initializes from a GEOS pointer." if not isinstance(ptr, CS_PTR): raise TypeError('Coordinate sequence should initialize with a CS_PTR.') self._ptr = ptr self._z = z def __iter__(self): "Iterates over each point in the coordinate sequence." for i in xrange(self.size): yield self[i] def __len__(self): "Returns the number of points in the coordinate sequence." return int(self.size) def __str__(self): "Returns the string representation of the coordinate sequence." return str(self.tuple) def __getitem__(self, index): "Returns the coordinate sequence value at the given index." coords = [self.getX(index), self.getY(index)] if self.dims == 3 and self._z: coords.append(self.getZ(index)) return tuple(coords) def __setitem__(self, index, value): "Sets the coordinate sequence value at the given index." # Checking the input value if isinstance(value, (list, tuple)): pass elif numpy and isinstance(value, numpy.ndarray): pass else: raise TypeError('Must set coordinate with a sequence (list, tuple, or numpy array).') # Checking the dims of the input if self.dims == 3 and self._z: n_args = 3 set_3d = True else: n_args = 2 set_3d = False if len(value) != n_args: raise TypeError('Dimension of value does not match.') # Setting the X, Y, Z self.setX(index, value[0]) self.setY(index, value[1]) if set_3d: self.setZ(index, value[2]) #### Internal Routines #### def _checkindex(self, index): "Checks the given index." sz = self.size if (sz < 1) or (index < 0) or (index >= sz): raise GEOSIndexError('invalid GEOS Geometry index: %s' % str(index)) def _checkdim(self, dim): "Checks the given dimension." if dim < 0 or dim > 2: raise GEOSException('invalid ordinate dimension "%d"' % dim) #### Ordinate getting and setting routines #### def getOrdinate(self, dimension, index): "Returns the value for the given dimension and index." self._checkindex(index) self._checkdim(dimension) return capi.cs_getordinate(self.ptr, index, dimension, byref(c_double())) def setOrdinate(self, dimension, index, value): "Sets the value for the given dimension and index." self._checkindex(index) self._checkdim(dimension) capi.cs_setordinate(self.ptr, index, dimension, value) def getX(self, index): "Get the X value at the index." return self.getOrdinate(0, index) def setX(self, index, value): "Set X with the value at the given index." self.setOrdinate(0, index, value) def getY(self, index): "Get the Y value at the given index." return self.getOrdinate(1, index) def setY(self, index, value): "Set Y with the value at the given index." self.setOrdinate(1, index, value) def getZ(self, index): "Get Z with the value at the given index." return self.getOrdinate(2, index) def setZ(self, index, value): "Set Z with the value at the given index." self.setOrdinate(2, index, value) ### Dimensions ### @property def size(self): "Returns the size of this coordinate sequence." return capi.cs_getsize(self.ptr, byref(c_uint())) @property def dims(self): "Returns the dimensions of this coordinate sequence." return capi.cs_getdims(self.ptr, byref(c_uint())) @property def hasz(self): """ Returns whether this coordinate sequence is 3D. This property value is inherited from the parent Geometry. """ return self._z ### Other Methods ### def clone(self): "Clones this coordinate sequence." return GEOSCoordSeq(capi.cs_clone(self.ptr), self.hasz) @property def kml(self): "Returns the KML representation for the coordinates." # Getting the substitution string depending on whether the coordinates have # a Z dimension. if self.hasz: substr = '%s,%s,%s ' else: substr = '%s,%s,0 ' return '<coordinates>%s</coordinates>' % \ ''.join([substr % self[i] for i in xrange(len(self))]).strip() @property def tuple(self): "Returns a tuple version of this coordinate sequence." n = self.size if n == 1: return self[0] else: return tuple([self[i] for i in xrange(n)])
bsd-3-clause
-9,033,275,954,397,161,000
33.589744
97
0.611379
false
faust64/ansible
lib/ansible/modules/cloud/webfaction/webfaction_db.py
19
6415
#!/usr/bin/python # # Create a webfaction database using Ansible and the Webfaction API # # ------------------------------------------ # # (c) Quentin Stafford-Fraser 2015, with contributions gratefully acknowledged from: # * Andy Baker # * Federico Tarantini # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'version': '1.0'} DOCUMENTATION = ''' --- module: webfaction_db short_description: Add or remove a database on Webfaction description: - Add or remove a database on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction. author: Quentin Stafford-Fraser (@quentinsf) version_added: "2.0" notes: - "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays." - See `the webfaction API <http://docs.webfaction.com/xmlrpc-api/>`_ for more info. options: name: description: - The name of the database required: true state: description: - Whether the database should exist required: false choices: ['present', 'absent'] default: "present" type: description: - The type of database to create. required: true choices: ['mysql', 'postgresql'] password: description: - The password for the new database user. required: false default: None login_name: description: - The webfaction account to use required: true login_password: description: - The webfaction password to use required: true machine: description: - The machine name to use (optional for accounts with only one machine) required: false ''' EXAMPLES = ''' # This will also create a default DB user with the same # name as the database, and the specified password. - name: Create a database webfaction_db: name: "{{webfaction_user}}_db1" password: mytestsql type: mysql login_name: "{{webfaction_user}}" login_password: "{{webfaction_passwd}}" machine: "{{webfaction_machine}}" # Note that, for symmetry's sake, deleting a database using # 'state: absent' will also delete the matching user. ''' import socket import xmlrpclib webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/') def main(): module = AnsibleModule( argument_spec = dict( name = dict(required=True), state = dict(required=False, choices=['present', 'absent'], default='present'), # You can specify an IP address or hostname. type = dict(required=True), password = dict(required=False, default=None, no_log=True), login_name = dict(required=True), login_password = dict(required=True, no_log=True), machine = dict(required=False, default=False), ), supports_check_mode=True ) db_name = module.params['name'] db_state = module.params['state'] db_type = module.params['type'] db_passwd = module.params['password'] if module.params['machine']: session_id, account = webfaction.login( module.params['login_name'], module.params['login_password'], module.params['machine'] ) else: session_id, account = webfaction.login( module.params['login_name'], module.params['login_password'] ) db_list = webfaction.list_dbs(session_id) db_map = dict([(i['name'], i) for i in db_list]) existing_db = db_map.get(db_name) user_list = webfaction.list_db_users(session_id) user_map = dict([(i['username'], i) for i in user_list]) existing_user = user_map.get(db_name) result = {} # Here's where the real stuff happens if db_state == 'present': # Does a database with this name already exist? if existing_db: # Yes, but of a different type - fail if existing_db['db_type'] != db_type: module.fail_json(msg="Database already exists but is a different type. Please fix by hand.") # If it exists with the right type, we don't change anything. module.exit_json( changed = False, ) if not module.check_mode: # If this isn't a dry run, create the db # and default user. result.update( webfaction.create_db( session_id, db_name, db_type, db_passwd ) ) elif db_state == 'absent': # If this isn't a dry run... if not module.check_mode: if not (existing_db or existing_user): module.exit_json(changed = False,) if existing_db: # Delete the db if it exists result.update( webfaction.delete_db(session_id, db_name, db_type) ) if existing_user: # Delete the default db user if it exists result.update( webfaction.delete_db_user(session_id, db_name, db_type) ) else: module.fail_json(msg="Unknown state specified: {}".format(db_state)) module.exit_json( changed = True, result = result ) from ansible.module_utils.basic import * if __name__ == '__main__': main()
gpl-3.0
6,204,298,063,020,600,000
30.292683
353
0.602027
false
DistributedSystemsGroup/zoe
zoe_api/web/executions.py
1
5923
# Copyright (c) 2017, Daniele Venzano # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Web pages and functions related to executions.""" import datetime import json import math import time from zoe_lib.config import get_conf import zoe_api.exceptions from zoe_api.web.request_handler import ZoeWebRequestHandler class ExecutionStartWeb(ZoeWebRequestHandler): """Handler class""" def post(self): """Start an execution.""" if self.current_user is None: return app_descr_json = self.request.files['file'][0]['body'].decode('utf-8') app_descr = json.loads(app_descr_json) exec_name = self.get_argument('exec_name') try: new_id = self.api_endpoint.execution_start(self.current_user, exec_name, app_descr) except zoe_api.exceptions.ZoeException as e: self.error_page(error_message=e.message) return self.redirect(self.reverse_url('execution_inspect', new_id)) class ExecutionListWeb(ZoeWebRequestHandler): """Handler class""" PAGINATION_ITEM_COUNT = 50 def get(self, page=0): """Home page with authentication.""" if self.current_user is None: return page = int(page) executions_count = self.api_endpoint.execution_count(self.current_user) executions = self.api_endpoint.execution_list(self.current_user, base=page*self.PAGINATION_ITEM_COUNT, limit=self.PAGINATION_ITEM_COUNT) template_vars = { "user": self.current_user, 'executions': sorted(executions, key=lambda e: e.id, reverse=True), 'current_page': page, 'max_page': math.ceil(executions_count / self.PAGINATION_ITEM_COUNT), 'last_page': len(executions) < self.PAGINATION_ITEM_COUNT } self.render('execution_list.jinja2', **template_vars) class ExecutionRestartWeb(ZoeWebRequestHandler): """Handler class""" def get(self, execution_id: int): """Restart an already defined (and not running) execution.""" if self.current_user is None: return try: e = self.api_endpoint.execution_by_id(self.current_user, execution_id) new_id = self.api_endpoint.execution_start(self.current_user, e.name, e.description) except zoe_api.exceptions.ZoeException as e: self.error_page(error_message=e.message) return self.redirect(self.reverse_url('execution_inspect', new_id)) class ExecutionTerminateWeb(ZoeWebRequestHandler): """Handler class""" def get(self, execution_id: int): """Terminate an execution.""" if self.current_user is None: return try: self.api_endpoint.execution_terminate(self.current_user, execution_id, 'user {} request from web interface'.format(self.current_user.username)) except zoe_api.exceptions.ZoeException as e: self.set_status(e.status_code, e.message) return self.redirect(self.reverse_url('home_user')) class ExecutionInspectWeb(ZoeWebRequestHandler): """Handler class""" def get(self, execution_id): """Gather details about an execution.""" if self.current_user is None: return try: e = self.api_endpoint.execution_by_id(self.current_user, execution_id) except zoe_api.exceptions.ZoeException as ex: self.set_status(ex.status_code, ex.message) return services_info, endpoints = self.api_endpoint.execution_endpoints(self.current_user, e) template_vars = { "e": e, "services_info": services_info, "endpoints": endpoints, 'killed_at': e.time_submit + datetime.timedelta(hours=e.owner.quota.runtime_limit) } if get_conf().enable_plots and e.time_start is not None: grafana_url_template = 'https://cloud-platform.eurecom.fr/grafana/dashboard/db/zoe-executions?orgId=1&from={}&to={}&var-execution_id={}&refresh=1y' if e.time_end is None: e_time_end = int(time.time() * 1000) else: e_time_end = int((e.time_end - datetime.datetime(1970, 1, 1)) / datetime.timedelta(seconds=1) * 1000) e_time_start = int((e.time_start - datetime.datetime(1970, 1, 1)) / datetime.timedelta(seconds=1) * 1000) template_vars['grafana_url'] = grafana_url_template.format(e_time_start, e_time_end, execution_id) self.render('execution_inspect.jinja2', **template_vars) class ServiceLogsWeb(ZoeWebRequestHandler): """Handler class""" def get(self, service_id): """Gather details about an execution.""" if self.current_user is None: return try: service = self.api_endpoint.service_by_id(self.current_user, service_id) except zoe_api.exceptions.ZoeException as e: self.set_status(e.status_code, e.message) return template_vars = { "service": service, "log_path": "{}/{}/{}/{}.txt".format(get_conf().log_url, get_conf().deployment_name, service.execution_id, service.name), "websocket_base": get_conf().websocket_base + get_conf().reverse_proxy_path, 'use_websockets': get_conf().log_use_websockets } self.render('service_logs.jinja2', **template_vars)
apache-2.0
-489,584,597,766,974,660
35.337423
159
0.637684
false
googleapis/python-automl
google/cloud/automl_v1/services/prediction_service/transports/__init__.py
6
1221
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from collections import OrderedDict from typing import Dict, Type from .base import PredictionServiceTransport from .grpc import PredictionServiceGrpcTransport from .grpc_asyncio import PredictionServiceGrpcAsyncIOTransport # Compile a registry of transports. _transport_registry = OrderedDict() # type: Dict[str, Type[PredictionServiceTransport]] _transport_registry["grpc"] = PredictionServiceGrpcTransport _transport_registry["grpc_asyncio"] = PredictionServiceGrpcAsyncIOTransport __all__ = ( "PredictionServiceTransport", "PredictionServiceGrpcTransport", "PredictionServiceGrpcAsyncIOTransport", )
apache-2.0
-367,353,281,493,697,500
36
88
0.782146
false
ChristineLaMuse/mozillians
vendor-local/lib/python/import_export/widgets.py
9
3870
from __future__ import unicode_literals from decimal import Decimal from datetime import datetime try: from django.utils.encoding import force_text except ImportError: from django.utils.encoding import force_unicode as force_text class Widget(object): """ Widget takes care of converting between import and export representations. Widget objects have two functions: * converts object field value to export representation * converts import value and converts it to appropriate python representation """ def clean(self, value): """ Returns appropriate python objects for import value. """ return value def render(self, value): """ Returns export representation of python value. """ return force_text(value) class IntegerWidget(Widget): """ Widget for converting integer fields. """ def clean(self, value): if not value: return None return int(value) class DecimalWidget(Widget): """ Widget for converting decimal fields. """ def clean(self, value): if not value: return None return Decimal(value) class CharWidget(Widget): """ Widget for converting text fields. """ def render(self, value): return force_text(value) class BooleanWidget(Widget): """ Widget for converting boolean fields. """ TRUE_VALUES = ["1", 1] FALSE_VALUE = "0" def render(self, value): return self.TRUE_VALUES[0] if value else self.FALSE_VALUE def clean(self, value): return True if value in self.TRUE_VALUES else False class DateWidget(Widget): """ Widget for converting date fields. Takes optional ``format`` parameter. """ def __init__(self, format=None): if format is None: format = "%Y-%m-%d" self.format = format def clean(self, value): if not value: return None return datetime.strptime(value, self.format).date() def render(self, value): return value.strftime(self.format) class DateTimeWidget(Widget): """ Widget for converting date fields. Takes optional ``format`` parameter. """ def __init__(self, format=None): if format is None: format = "%Y-%m-%d %H:%M:%S" self.format = format def clean(self, value): if not value: return None return datetime.strptime(value, self.format) def render(self, value): return value.strftime(self.format) class ForeignKeyWidget(Widget): """ Widget for ``ForeignKey`` model field that represent ForeignKey as integer value. Requires a positional argument: the class to which the field is related. """ def __init__(self, model, *args, **kwargs): self.model = model super(ForeignKeyWidget, self).__init__(*args, **kwargs) def clean(self, value): pk = super(ForeignKeyWidget, self).clean(value) return self.model.objects.get(pk=pk) if pk else None def render(self, value): if value is None: return "" return value.pk class ManyToManyWidget(Widget): """ Widget for ``ManyToManyField`` model field that represent m2m field as comma separated pk values. Requires a positional argument: the class to which the field is related. """ def __init__(self, model, *args, **kwargs): self.model = model super(ManyToManyWidget, self).__init__(*args, **kwargs) def clean(self, value): if not value: return self.model.objects.none() ids = value.split(",") return self.model.objects.filter(pk__in=ids) def render(self, value): ids = [str(obj.pk) for obj in value.all()] return ",".join(ids)
bsd-3-clause
7,886,282,879,968,031,000
22.454545
78
0.614729
false
alfredoavanzosc/odoomrp-wip-1
mrp_operations_start_without_material/__openerp__.py
11
1438
# -*- encoding: utf-8 -*- ############################################################################## # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see http://www.gnu.org/licenses/. # ############################################################################## { 'name': 'MRP Operations start without material', 'version': '1.0', 'author': 'OdooMRP team', 'contributors': ["Daniel Campos <[email protected]>", "Pedro M. Baeza <[email protected]>", "Ana Juaristi <[email protected]>"], 'website': 'http://www.odoomrp.com', "depends": ['mrp_operations_extension'], "category": "Manufacturing", "data": ['views/mrp_routing_view.xml', 'views/mrp_production_view.xml' ], "installable": True, "application": True }
agpl-3.0
-1,368,945,667,069,260,500
40.085714
78
0.580668
false
asphalt-framework/asphalt-web
asphalt/web/servers/http.py
1
3331
import logging from asyncio import ( start_unix_server, start_server, StreamReader, StreamWriter) from ssl import SSLContext from typing import Union from urllib.parse import urlparse, unquote import h11 from multidict import CIMultiDict from typeguard import check_argument_types from asphalt.core import Context from asphalt.web.request import HTTPRequest from asphalt.web.servers.base import BaseWebServer logger = logging.getLogger(__name__) class HTTPServer(BaseWebServer): """ Serves HTTP requests using HTTP/1.1. TLS support can be enabled by passing ``tls_context`` equipped with a key and certificate. :param tls_context: an :class:`~ssl.SSLContext` instance or the resource name of one :param kwargs: keyword arguments passed to :class:`~asphalt.web.servers.base.BaseWebServer` """ def __init__(self, tls_context: Union[SSLContext, str] = None, **kwargs): super().__init__(**kwargs) assert check_argument_types() self.tls_context = tls_context self._server = None async def start(self, parent_ctx: Context) -> None: await super().start(parent_ctx) if isinstance(self.tls_context, str): self.tls_context = await parent_ctx.request_resource(SSLContext, self.tls_context) if self.socket_path: self._server = start_unix_server( self.handle_client, self.socket_path, backlog=self.backlog, ssl=self.tls_context) else: self._server = start_server( self.handle_client, self.host, self.port, backlog=self.backlog, ssl=self.tls_context) async def shutdown(self) -> None: # Stop accepting new connections self._server.close() await self._server.wait_closed() # Wait until the existing requests have been processed if self.clients: logger.info('Waiting for %d requests to finish', len(self.clients)) for protocol in self.clients: await protocol.wait_finish() async def handle_client(self, reader: StreamReader, writer: StreamWriter) -> None: connection = h11.Connection(h11.SERVER) body = None # type: StreamReader while True: data = await reader.read(65536) connection.receive_data(data) event = connection.next_event() if event is h11.NEED_DATA: continue elif isinstance(event, h11.Request): headers = CIMultiDict((key.decode('ascii'), value.decode('iso-8859-1')) for key, value in event.headers) peername = writer.get_extra_info('peername') peercert = writer.get_extra_info('peercert') parsed = urlparse(event.target, allow_fragments=False) query = unquote(parsed.query.decode('ascii')) request = HTTPRequest( event.http_version.decode('ascii'), event.method.decode('ascii'), parsed.path.decode('utf-8'), query, headers, body, bool(self.tls_context), peername, peercert) elif isinstance(event, h11.Data): body.feed_data(event.data) elif isinstance(event, h11.EndOfMessage): body.feed_eof()
apache-2.0
7,885,006,894,332,097,000
39.13253
97
0.623536
false
mengxn/tensorflow
tensorflow/contrib/distributions/python/ops/transformed_distribution.py
8
22377
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A Transformed Distribution class.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.contrib.distributions.python.ops import distribution as distributions from tensorflow.contrib.distributions.python.ops import distribution_util from tensorflow.contrib.distributions.python.ops.bijectors import identity as identity_lib from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops __all__ = [ "TransformedDistribution", ] # The following helper functions attempt to statically perform a TF operation. # These functions make debugging easier since we can do more validation during # graph construction. def _static_value(x): """Returns the static value of a `Tensor` or `None`.""" return tensor_util.constant_value(ops.convert_to_tensor(x)) def _logical_and(*args): """Convenience function which attempts to statically `reduce_all`.""" args_ = [_static_value(x) for x in args] if any(x is not None and not bool(x) for x in args_): return constant_op.constant(False) if all(x is not None and bool(x) for x in args_): return constant_op.constant(True) if len(args) == 2: return math_ops.logical_and(*args) return math_ops.reduce_all(args) def _logical_equal(x, y): """Convenience function which attempts to statically compute `x == y`.""" x_ = _static_value(x) y_ = _static_value(y) if x_ is None or y_ is None: return math_ops.equal(x, y) return constant_op.constant(np.array_equal(x_, y_)) def _logical_not(x): """Convenience function which attempts to statically apply `logical_not`.""" x_ = _static_value(x) if x_ is None: return math_ops.logical_not(x) return constant_op.constant(np.logical_not(x_)) def _concat_vectors(*args): """Convenience function which concatenates input vectors.""" args_ = [_static_value(x) for x in args] if any(x_ is None for x_ in args_): return array_ops.concat(args, 0) return constant_op.constant([x_ for vec_ in args_ for x_ in vec_]) def _pick_scalar_condition(pred, cond_true, cond_false): """Convenience function which chooses the condition based on the predicate.""" # Note: This function is only valid if all of pred, cond_true, and cond_false # are scalars. This means its semantics are arguably more like tf.cond than # tf.select even though we use tf.select to implement it. pred_ = _static_value(pred) if pred_ is None: return array_ops.where(pred, cond_true, cond_false) return cond_true if pred_ else cond_false def _ones_like(x): """Convenience function attempts to statically construct `ones_like`.""" # Should only be used for small vectors. if x.get_shape().is_fully_defined(): return array_ops.ones(x.get_shape().as_list(), dtype=x.dtype) return array_ops.ones_like(x) def _ndims_from_shape(shape): """Returns `Tensor`'s `rank` implied by a `Tensor` shape.""" if shape.get_shape().ndims not in (None, 1): raise ValueError("input is not a valid shape: not 1D") if not shape.dtype.is_integer: raise TypeError("input is not a valid shape: wrong dtype") if shape.get_shape().is_fully_defined(): return constant_op.constant(shape.get_shape().as_list()[0]) return array_ops.shape(shape)[0] def _is_scalar_from_shape(shape): """Returns `True` `Tensor` if `Tensor` shape implies a scalar.""" return _logical_equal(_ndims_from_shape(shape), 0) class TransformedDistribution(distributions.Distribution): """A Transformed Distribution. A `TransformedDistribution` models `p(y)` given a base distribution `p(x)`, and a deterministic, invertible, differentiable transform, `Y = g(X)`. The transform is typically an instance of the `Bijector` class and the base distribution is typically an instance of the `Distribution` class. A `Bijector` is expected to implement the following functions: - `forward`, - `inverse`, - `inverse_log_det_jacobian`. The semantics of these functions are outlined in the `Bijector` documentation. We now describe how a `TransformedDistribution` alters the input/outputs of a `Distribution` associated with a random variable (rv) `X`. Write `cdf(Y=y)` for an absolutely continuous cumulative distribution function of random variable `Y`; write the probability density function `pdf(Y=y) := d^k / (dy_1,...,dy_k) cdf(Y=y)` for its derivative wrt to `Y` evaluated at `y`. Assume that `Y = g(X)` where `g` is a deterministic diffeomorphism, i.e., a non-random, continuous, differentiable, and invertible function. Write the inverse of `g` as `X = g^{-1}(Y)` and `(J o g)(x)` for the Jacobian of `g` evaluated at `x`. A `TransformedDistribution` implements the following operations: * `sample`: Mathematically: ```none Y = g(X) ``` Programmatically: ```python return bijector.forward(distribution.sample(...)) ``` * `log_prob`: Mathematically: ```none (log o pdf)(Y=y) = (log o pdf o g^{-1})(y) + (log o abs o det o J o g^{-1})(y) ``` Programmatically: ```python return (distribution.log_prob(bijector.inverse(y)) + bijector.inverse_log_det_jacobian(y)) ``` * `log_cdf`: Mathematically: ```none (log o cdf)(Y=y) = (log o cdf o g^{-1})(y) ``` Programmatically: ```python return distribution.log_cdf(bijector.inverse(x)) ``` * and similarly for: `cdf`, `prob`, `log_survival_function`, `survival_function`. A simple example constructing a Log-Normal distribution from a Normal distribution: ```python ds = tf.contrib.distributions log_normal = ds.TransformedDistribution( distribution=ds.Normal(loc=mu, scale=sigma), bijector=ds.bijectors.Exp(), name="LogNormalTransformedDistribution") ``` A `LogNormal` made from callables: ```python ds = tf.contrib.distributions log_normal = ds.TransformedDistribution( distribution=ds.Normal(loc=mu, scale=sigma), bijector=ds.bijectors.Inline( forward_fn=tf.exp, inverse_fn=tf.log, inverse_log_det_jacobian_fn=( lambda y: -tf.reduce_sum(tf.log(y), axis=-1)), name="LogNormalTransformedDistribution") ``` Another example constructing a Normal from a StandardNormal: ```python ds = tf.contrib.distributions normal = ds.TransformedDistribution( distribution=ds.Normal(loc=0, scale=1), bijector=ds.bijectors.ScaleAndShift(loc=mu, scale=sigma, event_ndims=0), name="NormalTransformedDistribution") ``` A `TransformedDistribution`'s batch- and event-shape are implied by the base distribution unless explicitly overridden by `batch_shape` or `event_shape` arguments. Specifying an overriding `batch_shape` (`event_shape`) is permitted only if the base distribution has scalar batch-shape (event-shape). The bijector is applied to the distribution as if the distribution possessed the overridden shape(s). The following example demonstrates how to construct a multivariate Normal as a `TransformedDistribution`. ```python bs = tf.contrib.distributions.bijector ds = tf.contrib.distributions # We will create two MVNs with batch_shape = event_shape = 2. mean = [[-1., 0], # batch:0 [0., 1]] # batch:1 chol_cov = [[[1., 0], [0, 1]], # batch:0 [[1, 0], [2, 2]]] # batch:1 mvn1 = ds.TransformedDistribution( distribution=ds.Normal(loc=0., scale=1.), bijector=bs.Affine(shift=mean, tril=chol_cov), batch_shape=[2], # Valid because base_distribution.batch_shape == []. event_shape=[2]) # Valid because base_distribution.event_shape == []. mvn2 = ds.MultivariateNormalTriL(loc=mean, scale_tril=chol_cov) # mvn1.log_prob(x) == mvn2.log_prob(x) ``` """ def __init__(self, distribution, bijector=None, batch_shape=None, event_shape=None, validate_args=False, name=None): """Construct a Transformed Distribution. Args: distribution: The base distribution instance to transform. Typically an instance of `Distribution`. bijector: The object responsible for calculating the transformation. Typically an instance of `Bijector`. `None` means `Identity()`. batch_shape: `integer` vector `Tensor` which overrides `distribution` `batch_shape`; valid only if `distribution.is_scalar_batch()`. event_shape: `integer` vector `Tensor` which overrides `distribution` `event_shape`; valid only if `distribution.is_scalar_event()`. validate_args: Python `bool`, default `False`. When `True` distribution parameters are checked for validity despite possibly degrading runtime performance. When `False` invalid inputs may silently render incorrect outputs. name: Python `str` name prefixed to Ops created by this class. Default: `bijector.name + distribution.name`. """ parameters = locals() name = name or (("" if bijector is None else bijector.name) + distribution.name) with ops.name_scope(name, values=[event_shape, batch_shape]): # For convenience we define some handy constants. self._zero = constant_op.constant(0, dtype=dtypes.int32, name="zero") self._empty = constant_op.constant([], dtype=dtypes.int32, name="empty") if bijector is None: bijector = identity_lib.Identity(validate_args=validate_args) # We will keep track of a static and dynamic version of # self._is_{batch,event}_override. This way we can do more prior to graph # execution, including possibly raising Python exceptions. self._override_batch_shape = self._maybe_validate_shape_override( batch_shape, distribution.is_scalar_batch(), validate_args, "batch_shape") self._is_batch_override = _logical_not(_logical_equal( _ndims_from_shape(self._override_batch_shape), self._zero)) self._is_maybe_batch_override = bool( tensor_util.constant_value(self._override_batch_shape) is None or tensor_util.constant_value(self._override_batch_shape).size != 0) self._override_event_shape = self._maybe_validate_shape_override( event_shape, distribution.is_scalar_event(), validate_args, "event_shape") self._is_event_override = _logical_not(_logical_equal( _ndims_from_shape(self._override_event_shape), self._zero)) self._is_maybe_event_override = bool( tensor_util.constant_value(self._override_event_shape) is None or tensor_util.constant_value(self._override_event_shape).size != 0) # To convert a scalar distribution into a multivariate distribution we # will draw dims from the sample dims, which are otherwise iid. This is # easy to do except in the case that the base distribution has batch dims # and we're overriding event shape. When that case happens the event dims # will incorrectly be to the left of the batch dims. In this case we'll # cyclically permute left the new dims. self._needs_rotation = _logical_and( self._is_event_override, _logical_not(self._is_batch_override), _logical_not(distribution.is_scalar_batch())) override_event_ndims = _ndims_from_shape(self._override_event_shape) self._rotate_ndims = _pick_scalar_condition( self._needs_rotation, override_event_ndims, 0) # We'll be reducing the head dims (if at all), i.e., this will be [] # if we don't need to reduce. self._reduce_event_indices = math_ops.range( self._rotate_ndims - override_event_ndims, self._rotate_ndims) self._distribution = distribution self._bijector = bijector super(TransformedDistribution, self).__init__( dtype=self._distribution.dtype, reparameterization_type=self._distribution.reparameterization_type, validate_args=validate_args, allow_nan_stats=self._distribution.allow_nan_stats, parameters=parameters, # We let TransformedDistribution access _graph_parents since this class # is more like a baseclass than derived. graph_parents=(distribution._graph_parents + # pylint: disable=protected-access bijector.graph_parents), name=name) @property def distribution(self): """Base distribution, p(x).""" return self._distribution @property def bijector(self): """Function transforming x => y.""" return self._bijector def _event_shape_tensor(self): return self.bijector.forward_event_shape_tensor( distribution_util.pick_vector( self._is_event_override, self._override_event_shape, self.distribution.event_shape_tensor())) def _event_shape(self): # If there's a chance that the event_shape has been overriden, we return # what we statically know about the `event_shape_override`. This works # because: `_is_maybe_event_override` means `static_override` is `None` or a # non-empty list, i.e., we don't statically know the `event_shape` or we do. # # Since the `bijector` may change the `event_shape`, we then forward what we # know to the bijector. This allows the `bijector` to have final say in the # `event_shape`. static_override = tensor_util.constant_value(self._override_event_shape) return self.bijector.forward_event_shape( tensor_shape.TensorShape(static_override) if self._is_maybe_event_override else self.distribution.event_shape) def _batch_shape_tensor(self): return distribution_util.pick_vector( self._is_batch_override, self._override_batch_shape, self.distribution.batch_shape_tensor()) def _batch_shape(self): # If there's a chance that the batch_shape has been overriden, we return # what we statically know about the `batch_shape_override`. This works # because: `_is_maybe_batch_override` means `static_override` is `None` or a # non-empty list, i.e., we don't statically know the `batch_shape` or we do. # # Notice that this implementation parallels the `_event_shape` except that # the `bijector` doesn't get to alter the `batch_shape`. Recall that # `batch_shape` is a property of a distribution while `event_shape` is # shared between both the `distribution` instance and the `bijector`. static_override = tensor_util.constant_value(self._override_batch_shape) return (tensor_shape.TensorShape(static_override) if self._is_maybe_batch_override else self.distribution.batch_shape) def _sample_n(self, n, seed=None): sample_shape = _concat_vectors( distribution_util.pick_vector(self._needs_rotation, self._empty, [n]), self._override_batch_shape, self._override_event_shape, distribution_util.pick_vector(self._needs_rotation, [n], self._empty)) x = self.distribution.sample(sample_shape=sample_shape, seed=seed) x = self._maybe_rotate_dims(x) return self.bijector.forward(x) def _log_prob(self, y): x = self.bijector.inverse(y) ildj = self.bijector.inverse_log_det_jacobian(y) x = self._maybe_rotate_dims(x, rotate_right=True) log_prob = self.distribution.log_prob(x) if self._is_maybe_event_override: log_prob = math_ops.reduce_sum(log_prob, self._reduce_event_indices) log_prob = ildj + log_prob if self._is_maybe_event_override: log_prob.set_shape(array_ops.broadcast_static_shape( y.get_shape().with_rank_at_least(1)[:-1], self.batch_shape)) return log_prob def _prob(self, y): x = self.bijector.inverse(y) ildj = self.bijector.inverse_log_det_jacobian(y) x = self._maybe_rotate_dims(x, rotate_right=True) prob = self.distribution.prob(x) if self._is_maybe_event_override: prob = math_ops.reduce_prod(prob, self._reduce_event_indices) prob *= math_ops.exp(ildj) if self._is_maybe_event_override: prob.set_shape(array_ops.broadcast_static_shape( y.get_shape().with_rank_at_least(1)[:-1], self.batch_shape)) return prob def _log_cdf(self, y): if self._is_maybe_event_override: raise NotImplementedError("log_cdf is not implemented when overriding " "event_shape") x = self.bijector.inverse(y) return self.distribution.log_cdf(x) def _cdf(self, y): if self._is_maybe_event_override: raise NotImplementedError("cdf is not implemented when overriding " "event_shape") x = self.bijector.inverse(y) return self.distribution.cdf(x) def _log_survival_function(self, y): if self._is_maybe_event_override: raise NotImplementedError("log_survival_function is not implemented when " "overriding event_shape") x = self.bijector.inverse(y) return self.distribution.log_survival_function(x) def _survival_function(self, y): if self._is_maybe_event_override: raise NotImplementedError("survival_function is not implemented when " "overriding event_shape") x = self.bijector.inverse(y) return self.distribution.survival_function(x) def _entropy(self): if not self.bijector.is_constant_jacobian: raise NotImplementedError("entropy is not implemented") # Suppose Y = g(X) where g is a diffeomorphism and X is a continuous rv. It # can be shown that: # H[Y] = H[X] + E_X[(log o abs o det o J o g)(X)]. # If is_constant_jacobian then: # E_X[(log o abs o det o J o g)(X)] = (log o abs o det o J o g)(c) # where c can by anything. entropy = self.distribution.entropy() if self._is_maybe_event_override: # H[X] = sum_i H[X_i] if X_i are mutually independent. # This means that a reduce_sum is a simple rescaling. entropy *= math_ops.cast(math_ops.reduce_prod(self._override_event_shape), dtype=entropy.dtype.base_dtype) if self._is_maybe_batch_override: new_shape = array_ops.concat([ _ones_like(self._override_batch_shape), self.distribution.batch_shape_tensor() ], 0) entropy = array_ops.reshape(entropy, new_shape) multiples = array_ops.concat([ self._override_batch_shape, _ones_like(self.distribution.batch_shape_tensor()) ], 0) entropy = array_ops.tile(entropy, multiples) dummy = array_ops.zeros([], self.dtype) entropy -= self.bijector.inverse_log_det_jacobian(dummy) entropy.set_shape(self.batch_shape) return entropy def _maybe_validate_shape_override(self, override_shape, base_is_scalar, validate_args, name): """Helper to __init__ which ensures override batch/event_shape are valid.""" if override_shape is None: override_shape = [] override_shape = ops.convert_to_tensor(override_shape, dtype=dtypes.int32, name=name) if not override_shape.dtype.is_integer: raise TypeError("shape override must be an integer") override_is_scalar = _is_scalar_from_shape(override_shape) if tensor_util.constant_value(override_is_scalar): return self._empty dynamic_assertions = [] if override_shape.get_shape().ndims is not None: if override_shape.get_shape().ndims != 1: raise ValueError("shape override must be a vector") elif validate_args: dynamic_assertions += [check_ops.assert_rank( override_shape, 1, message="shape override must be a vector")] if tensor_util.constant_value(override_shape) is not None: if any(s <= 0 for s in tensor_util.constant_value(override_shape)): raise ValueError("shape override must have positive elements") elif validate_args: dynamic_assertions += [check_ops.assert_positive( override_shape, message="shape override must have positive elements")] is_both_nonscalar = _logical_and(_logical_not(base_is_scalar), _logical_not(override_is_scalar)) if tensor_util.constant_value(is_both_nonscalar) is not None: if tensor_util.constant_value(is_both_nonscalar): raise ValueError("base distribution not scalar") elif validate_args: dynamic_assertions += [check_ops.assert_equal( is_both_nonscalar, False, message="base distribution not scalar")] if not dynamic_assertions: return override_shape return control_flow_ops.with_dependencies( dynamic_assertions, override_shape) def _maybe_rotate_dims(self, x, rotate_right=False): """Helper which rolls left event_dims left or right event_dims right.""" needs_rotation_const = tensor_util.constant_value(self._needs_rotation) if needs_rotation_const is not None and not needs_rotation_const: return x ndims = array_ops.rank(x) n = (ndims - self._rotate_ndims) if rotate_right else self._rotate_ndims return array_ops.transpose( x, _concat_vectors(math_ops.range(n, ndims), math_ops.range(0, n)))
apache-2.0
-4,639,215,993,770,167,000
39.318919
90
0.668901
false
GoogleCloudPlatform/training-data-analyst
courses/machine_learning/deepdive2/production_ml/labs/samples/contrib/azure-samples/kfp-azure-databricks/databricks/_cluster_op.py
3
10940
import json from kfp.dsl import ResourceOp class CreateClusterOp(ResourceOp): """Represents an Op which will be translated into a Databricks Cluster creation resource template. Examples: import databricks databricks.CreateClusterOp( name="createcluster", cluster_name="test-cluster", spec={ "spark_version":"5.3.x-scala2.11", "node_type_id": "Standard_D3_v2", "spark_conf": { "spark.speculation": "true" }, "num_workers": 2 } ) databricks.CreateClusterOp( name="createcluster", cluster_name="test-cluster", spark_version="5.3.x-scala2.11", node_type_id="Standard_D3_v2", spark_conf={ "spark.speculation": "true" }, num_workers=2 ) databricks.CreateClusterOp( name="createcluster", cluster_name="test-cluster", spark_version="5.3.x-scala2.11", node_type_id="Standard_D3_v2", autoscale={ "min_workers": 2, "max_workers": 50 } ) """ def __init__(self, name: str = None, k8s_name: str = None, cluster_name: str = None, spec: {} = None, num_workers: int = None, autoscale: {} = None, spark_version: str = None, spark_conf: {} = None, node_type_id: str = None, driver_node_type_id: str = None, custom_tags: {} = None, cluster_log_conf: {} = None, init_scripts: {} = None, spark_env_vars: {} = None, autotermination_minutes: int = None, instance_pool_id: str = None): """Create a new instance of CreateClusterOp. Args: name: The name of the pipeline Op. It does not have to be unique within a pipeline because the pipeline will generate a new unique name in case of a conflict. k8s_name = The name of the k8s resource which will be submitted to the cluster. If no k8s_name is provided, cluster_name will be used as the resource name. This name is DNS-1123 subdomain name and must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character. cluster_name: Cluster name requested by the user. spec: Full specification of the Databricks cluster to create. num_workers: Number of worker nodes that this cluster should have. autoscale: Parameters needed in order to automatically scale clusters up and down based on load. spark_version: The runtime version of the cluster. spark_conf: An object containing a set of optional, user-specified Spark configuration key-value pairs. node_type_id: This field encodes, through a single value, the resources available to each of the Spark nodes in this cluster. driver_node_type_id: The node type of the Spark driver. custom_tags: Additional tags for cluster resources. cluster_log_conf: The configuration for delivering Spark logs to a long-term storage destination. init_scripts: The configuration for storing init scripts. spark_env_vars: An object containing a set of optional, user-specified environment variable key-value pairs. autotermination_minutes: Automatically terminates the cluster after it is inactive for this time in minutes. If not set, this cluster will not be automatically terminated. instance_pool_id: The optional ID of the instance pool to which the cluster belongs. Raises: ValueError: If no k8s resource name or Cluster name are provided. """ if not spec: spec = {} if cluster_name: spec["cluster_name"] = cluster_name if num_workers: spec["num_workers"] = num_workers if autoscale: spec["autoscale"] = autoscale if spark_version: spec["spark_version"] = spark_version if spark_conf: spec["spark_conf"] = spark_conf if node_type_id: spec["node_type_id"] = node_type_id if driver_node_type_id: spec["driver_node_type_id"] = driver_node_type_id if custom_tags: spec["custom_tags"] = custom_tags if cluster_log_conf: spec["cluster_log_conf"] = cluster_log_conf if init_scripts: spec["init_scripts"] = init_scripts if spark_env_vars: spec["spark_env_vars"] = spark_env_vars if autotermination_minutes: spec["autotermination_minutes"] = autotermination_minutes if instance_pool_id: spec["instance_pool_id"] = instance_pool_id if not k8s_name and "cluster_name" in spec: k8s_name = spec["cluster_name"] elif not k8s_name: raise ValueError("You need to provide a k8s_name or a cluster_name.") super().__init__( k8s_resource={ "apiVersion": "databricks.microsoft.com/v1alpha1", "kind": "Dcluster", "metadata": { "name": k8s_name, }, "spec": spec, }, action="create", success_condition="status.cluster_info.state in (RUNNING, TERMINATED, UNKNOWN)", attribute_outputs={ "name": "{.metadata.name}", "cluster_id": "{.status.cluster_info.cluster_id}", "cluster_name": "{.status.cluster_info.cluster_name}", "state": "{.status.cluster_info.state}" }, name=name) @classmethod def from_json_spec(cls, name: str = None, k8s_name: str = None, cluster_name: str = None, json_spec: str = None): """Create a new instance of CreateClusterOp from a json specification. Args: name: The name of the pipeline Op. It does not have to be unique within a pipeline because the pipeline will generate a new unique name in case of a conflict. k8s_name = The name of the k8s resource which will be submitted to the cluster. If no k8s_name is provided, cluster_name will be used as the resource name. This name is DNS-1123 subdomain name and must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character. cluster_name: Cluster name requested by the user. json_spec: Full specification of the Databricks cluster to create in json format. """ spec = json.loads(json_spec) return cls(name=name, k8s_name=k8s_name, cluster_name=cluster_name, spec=spec) @classmethod def from_file_name(cls, name: str = None, k8s_name: str = None, cluster_name: str = None, file_name: str = None): """Create a new instance of CreateClusterOp from a file with a json specification. Args: name: The name of the pipeline Op. It does not have to be unique within a pipeline because the pipeline will generate a new unique name in case of a conflict. k8s_name = The name of the k8s resource which will be submitted to the cluster. If no k8s_name is provided, cluster_name will be used as the resource name. This name is DNS-1123 subdomain name and must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character. cluster_name: Cluster name requested by the user. json_spec_file_name: Name of the file containing the full specification of the Databricks cluster to create in json format. Raises: ValueError: if the file name doesn't exist. """ with open(file_name) as json_file: spec = json.loads(json_file.read()) return cls(name=name, k8s_name=k8s_name, cluster_name=cluster_name, spec=spec) @property def resource(self): """`Resource` object that represents the `resource` property in `io.argoproj.workflow.v1alpha1.Template`. """ return self._resource class DeleteClusterOp(ResourceOp): """Represents an Op which will be translated into a Databricks Cluster deletion resource template. Example: import databricks databricks.DeleteClusterOp( name="deletecluster", cluster_name="test-cluster" ) """ def __init__(self, name: str = None, k8s_name: str = None, cluster_name: str = None): """Create a new instance of DeleteClusterOp. Args: name: The name of the pipeline Op. It does not have to be unique within a pipeline because the pipeline will generate a new unique name in case of a conflict. k8s_name = The name of the k8s resource which will be submitted to the cluster. If no k8s_name is provided, cluster_name will be used as the resource name. This name is DNS-1123 subdomain name and must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character. cluster_name: The name of the cluster. If k8s_name is provided, this will be ignored. Raises: ValueError: If no k8s resource name or Cluster name are provided. """ k8s_name = k8s_name or cluster_name if not k8s_name: raise ValueError("You need to provide a k8s_name or a cluster_name.") super().__init__( k8s_resource={ "apiVersion": "databricks.microsoft.com/v1alpha1", "kind": "Dcluster", "metadata": { "name": k8s_name } }, action="delete", name=name) @property def resource(self): """`Resource` object that represents the `resource` property in `io.argoproj.workflow.v1alpha1.Template`. """ return self._resource
apache-2.0
-3,990,632,828,487,379,000
38.927007
100
0.557313
false
avedaee/DIRAC
FrameworkSystem/private/logging/backends/RemoteBackend.py
1
2903
# $HeadURL$ __RCSID__ = "$Id$" """This Backend sends the Log Messages to a Log Server It will only report to the server ERROR, EXCEPTION, FATAL and ALWAYS messages. """ import threading import Queue from DIRAC.Core.Utilities import Time, Network from DIRAC.FrameworkSystem.private.logging.backends.BaseBackend import BaseBackend from DIRAC.FrameworkSystem.private.logging.LogLevels import LogLevels class RemoteBackend( BaseBackend, threading.Thread ): def __init__( self, optionsDictionary ): threading.Thread.__init__( self ) self.__interactive = optionsDictionary[ 'Interactive' ] self.__sleep = optionsDictionary[ 'SleepTime' ] self._messageQueue = Queue.Queue() self._Transactions = [] self._alive = True self._site = optionsDictionary[ 'Site' ] self._hostname = Network.getFQDN() self._logLevels = LogLevels() self._negativeLevel = self._logLevels.getLevelValue( 'ERROR' ) self._positiveLevel = self._logLevels.getLevelValue( 'ALWAYS' ) self._maxBundledMessages = 20 self.setDaemon(1) self.start() def doMessage( self, messageObject ): self._messageQueue.put( messageObject ) def run( self ): import time while self._alive: self._bundleMessages() time.sleep( self.__sleep ) def _bundleMessages( self ): while not self._messageQueue.empty(): bundle = [] while ( len( bundle ) < self._maxBundledMessages ) and \ ( not self._messageQueue.empty() ): message = self._messageQueue.get() if self._testLevel( message.getLevel() ): bundle.append( message.toTuple() ) if len( bundle ): self._sendMessageToServer( bundle ) if len( self._Transactions ): self._sendMessageToServer() def _sendMessageToServer( self, messageBundle=None ): from DIRAC.Core.DISET.RPCClient import RPCClient if messageBundle: self._Transactions.append( messageBundle ) TransactionsLength = len( self._Transactions ) if TransactionsLength > 100: del self._Transactions[:TransactionsLength-100] TransactionsLength = 100 try: oSock = RPCClient( "Framework/SystemLogging" ) except Exception,v: return False while TransactionsLength: result = oSock.addMessages( self._Transactions[0], self._site, self._hostname ) if result['OK']: TransactionsLength = TransactionsLength - 1 self._Transactions.pop(0) else: return False return True def _testLevel( self, sLevel ): messageLevel = self._logLevels.getLevelValue( sLevel ) return messageLevel <= self._negativeLevel or \ messageLevel >= self._positiveLevel def flush( self ): self._alive = False if not self.__interactive and self._sendMessageToServer()['OK']: while not self._messageQueue.empty(): self._bundleMessages()
gpl-3.0
-8,650,437,130,664,352,000
31.617978
82
0.666207
false
axinging/chromium-crosswalk
tools/perf/measurements/measurement_smoke_test.py
9
2471
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import logging import optparse import os import unittest from telemetry import benchmark as benchmark_module from telemetry.core import discover from telemetry.internal.browser import browser_options from telemetry.page import legacy_page_test from telemetry.testing import options_for_unittests from telemetry.web_perf import timeline_based_measurement def _GetAllPossiblePageTestInstances(): page_test_instances = [] measurements_dir = os.path.dirname(__file__) top_level_dir = os.path.dirname(measurements_dir) benchmarks_dir = os.path.join(top_level_dir, 'benchmarks') # Get all page test instances from measurement classes that are directly # constructible all_measurement_classes = discover.DiscoverClasses( measurements_dir, top_level_dir, legacy_page_test.LegacyPageTest, index_by_class_name=True, directly_constructable=True).values() for measurement_class in all_measurement_classes: page_test_instances.append(measurement_class()) all_benchmarks_classes = discover.DiscoverClasses( benchmarks_dir, top_level_dir, benchmark_module.Benchmark).values() # Get all page test instances from defined benchmarks. # Note: since this depends on the command line options, there is no guaranteed # that this will generate all possible page test instances but it's worth # enough for smoke test purpose. for benchmark_class in all_benchmarks_classes: options = options_for_unittests.GetCopy() parser = optparse.OptionParser() browser_options.BrowserOptions.AddCommandLineArgs(parser) try: benchmark_class.AddCommandLineArgs(parser) benchmark_module.AddCommandLineArgs(parser) benchmark_class.SetArgumentDefaults(parser) except Exception: logging.error('Exception raised when processing benchmark %s', benchmark_class) raise options.MergeDefaultValues(parser.get_default_values()) pt = benchmark_class().CreatePageTest(options) if not isinstance(pt, timeline_based_measurement.TimelineBasedMeasurement): page_test_instances.append(pt) return page_test_instances class MeasurementSmokeTest(unittest.TestCase): # Simple smoke test to make sure that all page_test are constructible. def testAllMeasurementInstance(self): _GetAllPossiblePageTestInstances()
bsd-3-clause
-2,954,254,400,516,684,000
38.222222
80
0.76811
false
xaowoodenfish/python-1
bigml/tests/delete_project_steps.py
1
1383
# -*- coding: utf-8 -*- #!/usr/bin/env python # # Copyright 2014-2015 BigML # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from datetime import datetime, timedelta from world import world from bigml.api import HTTP_NO_CONTENT, HTTP_OK, HTTP_NOT_FOUND def i_delete_the_project(step): resource = world.api.delete_project(world.project['resource']) world.status = resource['code'] assert world.status == HTTP_NO_CONTENT def wait_until_project_deleted(step, secs): start = datetime.utcnow() project_id = world.project['resource'] resource = world.api.get_project(project_id) while (resource['code'] == HTTP_OK): time.sleep(3) assert datetime.utcnow() - start < timedelta(seconds=int(secs)) resource = world.api.get_project(project_id) assert resource['code'] == HTTP_NOT_FOUND world.projects.remove(project_id)
apache-2.0
2,076,223,942,356,094,500
34.461538
75
0.718727
false