repo_name
stringlengths
5
92
path
stringlengths
4
232
copies
stringclasses
19 values
size
stringlengths
4
7
content
stringlengths
721
1.04M
license
stringclasses
15 values
hash
int64
-9,223,277,421,539,062,000
9,223,102,107B
line_mean
float64
6.51
99.9
line_max
int64
15
997
alpha_frac
float64
0.25
0.97
autogenerated
bool
1 class
PaddlePaddle/models
PaddleCV/tracking/pytracking/admin/environment.py
1
2051
import importlib import os class EnvSettings: def __init__(self): pytracking_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) self.results_path = '{}/tracking_results/'.format(pytracking_path) self.network_path = '{}/networks/'.format(pytracking_path) self.dataset_path = '{}/benchmark_datasets/'.format(pytracking_path) def create_default_local_file(): comment = {'results_path': 'Where to store tracking results', 'dataset_path': 'Where benchmark datasets are stored', 'network_path': 'Where tracking networks are stored.'} path = os.path.join(os.path.dirname(__file__), 'local.py') with open(path, 'w') as f: settings = EnvSettings() f.write('from pytracking.admin.environment import EnvSettings\n\n') f.write('def local_env_settings():\n') f.write(' settings = EnvSettings()\n\n') f.write(' # Set your local paths here.\n\n') for attr in dir(settings): comment_str = None if attr in comment: comment_str = comment[attr] attr_val = getattr(settings, attr) if not attr.startswith('__') and not callable(attr_val): if comment_str is None: f.write(' settings.{} = \'{}\'\n'.format(attr, attr_val)) else: f.write(' settings.{} = \'{}\' # {}\n'.format(attr, attr_val, comment_str)) f.write('\n return settings\n\n') def env_settings(): env_module_name = 'pytracking.admin.local' try: env_module = importlib.import_module(env_module_name) return env_module.local_env_settings() except: env_file = os.path.join(os.path.dirname(__file__), 'local.py') # Create a default file create_default_local_file() raise RuntimeError('YOU HAVE NOT SETUP YOUR local.py!!!\n Go to "{}" and set all the paths you need. ' 'Then try to run again.'.format(env_file))
apache-2.0
5,295,043,297,320,136,000
38.442308
110
0.575329
false
ShaolongHu/Nitrate
tcms/testplans/tests.py
1
4490
# -*- coding: utf-8 -*- import unittest from django.test.client import Client class PlanTests(unittest.TestCase): def setUp(self): self.c = Client() self.plan_id = 2256 self.status_codes = [301, 302] def test_plans(self): response = self.c.get('/plans/') try: self.assertEquals(response.status_code, 200) except AssertionError: self.assertEquals(response.status_code, 302) def test_plan_new(self): response = self.c.get('/plan/new/') try: self.assertEquals(response.status_code, 200) except AssertionError: self.assertEquals(response.status_code, 302) def test_plan_clone(self): response = self.c.get('/plans/clone/', {'plan_id': self.plan_id}) try: self.assertEquals(response.status_code, 200) except AssertionError: assert response.status_code in self.status_codes def test_plan_details(self): location = '/plan/%s/' % self.plan_id response = self.c.get(location) try: self.assertEquals(response.status_code, 200) except AssertionError: assert response.status_code in self.status_codes def test_plan_cases(self): location = '/plan/%s/cases/' % self.plan_id response = self.c.get(location) try: self.assertEquals(response.status_code, 200) except AssertionError: assert response.status_code in self.status_codes def test_plan_importcase(self): location = '/plan/%s/importcase/' % self.plan_id response = self.c.get(location) try: self.assertEquals(response.status_code, 200) except AssertionError: assert response.status_code in self.status_codes def test_plan_delete(self): location = '/plan/%s/delete/' % self.plan_id response = self.c.get(location) try: self.assertEquals(response.status_code, 200) except AssertionError: assert response.status_code in self.status_codes def test_plan_searchcase(self): location = '/plan/%s/searchcase/' % self.plan_id response = self.c.get(location) try: self.assertEquals(response.status_code, 200) except AssertionError: assert response.status_code in self.status_codes def test_plan_delcase(self): location = '/plan/%s/delcase/' % self.plan_id response = self.c.get(location) try: self.assertEquals(response.status_code, 200) except AssertionError: assert response.status_code in self.status_codes def test_plan_ordercase(self): location = '/plan/%s/ordercase/' % self.plan_id response = self.c.get(location) try: self.assertEquals(response.status_code, 200) except AssertionError: assert response.status_code in self.status_codes def test_plan_edit(self): location = '/plan/%s/edit/' % self.plan_id response = self.c.get(location) try: self.assertEquals(response.status_code, 200) except AssertionError: assert response.status_code in self.status_codes def test_plan_printable(self): location = '/plan/%s/printable/' % self.plan_id response = self.c.get(location) try: self.assertEquals(response.status_code, 200) except AssertionError: assert response.status_code in self.status_codes def test_plan_export(self): location = '/plan/%s/export/' % self.plan_id response = self.c.get(location) try: self.assertEquals(response.status_code, 200) except AssertionError: assert response.status_code in self.status_codes def test_plan_attachment(self): location = '/plan/%s/attachment/' % self.plan_id response = self.c.get(location) try: self.assertEquals(response.status_code, 200) except AssertionError: assert response.status_code in self.status_codes def test_plan_history(self): location = '/plan/%s/history/' % self.plan_id response = self.c.get(location) try: self.assertEquals(response.status_code, 200) except AssertionError: assert response.status_code in self.status_codes if __name__ == '__main__': unittest.main()
gpl-2.0
3,678,989,214,991,336,400
33.015152
73
0.608018
false
czhengsci/veidt
veidt/utils/data_selection.py
1
4503
# coding: utf-8 # Copyright (c) Materials Virtual Lab # Distributed under the terms of the BSD License. from __future__ import division, print_function, unicode_literals, \ absolute_import import random import numpy as np import pandas as pd from copy import copy from pymatgen import Structure class MonteCarloSampler(object): """ Sample a subset from the dataset to achieve some criteria using simulated annealing. For example, one needs to subset the data so that a fraction of the data can already cover a large feature space, i.e., maximizing the distances. """ def __init__(self, datasets, num_samples, cost_function): """ Sample a subset with size num_samples from datasets to minimize the cost function. Args: datasets (numpy.array): The total datasets. num_samples (int): Number of samples from the data. cost_function (function): Function that takes into a subset of the data and calculate a cost. """ self.datasets = datasets self.num_samples = num_samples self.cost_function = cost_function self.num_total = len(datasets) self.num_remain = self.num_total - num_samples self.index_selected = list(np.random.choice( self.num_total, num_samples, replace=False)) self._get_remain_index() self.cost = self.compute_cost(self.datasets[self.index_selected, :]) self.accepted = 0 self.rejected = 0 self.cost_history = [] self.cost_history.append(self.cost) def _get_remain_index(self): self.index_remain = sorted(list(set(range(self.num_total)) - set(self.index_selected))) def compute_cost(self, data_subset): """ Compute the cost of data subsets. Args: data_subset (numpy.array): Data subset. """ return self.cost_function(data_subset) def sample(self, num_attempts, t_init, t_final): """ Metropolis sampler. For every sampling attempt, one data entry is swapped with the data reservior. Then the energy difference is evaluated. If dE < 0, the swapping is accepted. If dE > 0, then it is accepted with probability exp(-dE / T), where T is some artificial temperature. We can start with a relatively large T, and then reduce it with sampling process going on. Args: num_attempts (int): Number of sampling attempts. t_init (float): Initial temperature. t_final (float): Final temperature. """ temperatures = np.linspace(t_init, t_final, num_attempts) for i in range(num_attempts): temperature = temperatures[i] index = random.choice(self.index_selected) index_remain = random.choice(self.index_remain) self.update(index, index_remain, temperature) self.cost_history.append(self.cost) def update(self, index, index_remain, temperature): """ Implement the data swap, if it is accepted. Args: index (int): The index of selected feature matrix used for swapping. index_remain (int): The index of remaining feature matrix used for swapping. temperature (float): Artificial temperature. """ new_selected = copy(self.index_selected) new_selected.remove(index) new_selected.append(index_remain) cost_after_swap = self.compute_cost(self.datasets[new_selected, :]) d_cost = cost_after_swap - self.cost accept = self.decision(d_cost, temperature) if accept: self.index_selected = copy(new_selected) self._get_remain_index() self.cost = cost_after_swap else: pass def decision(self, d_cost, temperature): """ Decision on accepting the data swap. Args: d_cost (float): Difference between cost in proposed move. temperature (float): Temperature. """ if d_cost < 0: self.accepted += 1 return True else: p = np.exp(-d_cost / temperature) p2 = np.random.rand(1) if p2 < p: self.accepted += 1 return True else: self.rejected += 1 return False
bsd-3-clause
-7,386,989,554,089,515,000
34.464567
88
0.593826
false
davygeek/vitess
test/vschema.py
1
4090
#!/usr/bin/env python # Copyright 2019 The Vitess Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests vschema creation and manipulation """ import unittest import environment import logging import tablet import utils import keyspace_util from vtdb import dbexceptions from vtdb import vtgate_cursor from vtdb import vtgate_client shard_0_master = None keyspace_env = None create_vt_user = '''create table vt_user ( id bigint, name varchar(64), primary key (id) ) Engine=InnoDB''' create_main = '''create table main ( id bigint, val varchar(128), primary key(id) ) Engine=InnoDB''' def setUpModule(): global keyspace_env global shard_0_master try: environment.topo_server().setup() keyspace_env = keyspace_util.TestEnv() keyspace_env.launch( 'user', ddls=[ create_vt_user, create_main, ] ) shard_0_master = keyspace_env.tablet_map['user.0.master'] utils.VtGate().start( tablets=[shard_0_master], extra_args=['-vschema_ddl_authorized_users','%'], ) utils.vtgate.wait_for_endpoints('user.0.master', 1) except: tearDownModule() raise def tearDownModule(): utils.required_teardown() if utils.options.skip_teardown: return if keyspace_env: keyspace_env.teardown() environment.topo_server().teardown() utils.kill_sub_processes() utils.remove_tmp_files() def get_connection(timeout=10.0): protocol, endpoint = utils.vtgate.rpc_endpoint(python=True) try: return vtgate_client.connect(protocol, endpoint, timeout) except Exception: logging.exception('Connection to vtgate (timeout=%s) failed.', timeout) raise class TestDDLVSchema(unittest.TestCase): decimal_type = 18 int_type = 265 string_type = 6165 varbinary_type = 10262 def _test_queries(self,cursor,count=4): for x in xrange(count): i = x+1 cursor.begin() cursor.execute( 'insert into vt_user (id,name) values (:id,:name)', {'id': i, 'name': 'test %s' % i}) cursor.commit() # Test select equal for x in xrange(count): i = x+1 cursor.execute('select id, name from vt_user where id = :id', {'id': i}) self.assertEqual( (cursor.fetchall(), cursor.rowcount, cursor.lastrowid,cursor.description), ([(i, 'test %s' % i)], 1L, 0,[('id', self.int_type), ('name', self.string_type)])) cursor.begin() cursor.execute( 'DELETE FROM vt_user', {} ) cursor.commit() def _read_vschema(self, cursor): # Test Showing Tables cursor.execute( 'SHOW VSCHEMA TABLES',{} ) self.assertEqual( [ x[0] for x in cursor.fetchall() ], [ 'dual', 'main', 'vt_user' ], ) # Test Showing Vindexes cursor.execute( 'SHOW VSCHEMA VINDEXES',{} ) self.assertEqual( [ x[0] for x in cursor.fetchall() ], [ ], ) def _create_vschema(self,cursor): cursor.begin() cursor.execute( 'ALTER VSCHEMA ADD TABLE vt_user',{} ) cursor.execute( 'ALTER VSCHEMA ADD TABLE main',{} ) cursor.commit() def test_unsharded_vschema(self): vtgate_conn = get_connection() cursor = vtgate_conn.cursor( tablet_type='master', keyspace=None, writable=True) # Test the blank database with no vschema self._test_queries(cursor) # Use the DDL to create an unsharded vschema and test again self._create_vschema(cursor) self._read_vschema(cursor) self._test_queries(cursor) if __name__ == '__main__': utils.main()
apache-2.0
-5,718,953,077,722,687,000
23.491018
92
0.645477
false
rolandgeider/wger
wger/manager/migrations/0010_auto_20210102_1446.py
1
1206
# Generated by Django 3.1.3 on 2021-01-02 13:46 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('manager', '0009_auto_20201202_1559'), ] operations = [ migrations.AlterField( model_name='setting', name='rir', field=models.CharField(blank=True, choices=[(None, '------'), ('0', 0), ('0.5', 0.5), ('1', 1), ('1.5', 1.5), ('2', 2), ('2.5', 2.5), ('3', 3), ('3.5', 3.5), ('4', 4)], max_length=3, null=True, verbose_name='RiR'), ), migrations.AlterField( model_name='workoutlog', name='rir', field=models.CharField(blank=True, choices=[(None, '------'), ('0', 0), ('0.5', 0.5), ('1', 1), ('1.5', 1.5), ('2', 2), ('2.5', 2.5), ('3', 3), ('3.5', 3.5), ('4', 4)], max_length=3, null=True, verbose_name='RiR'), ), ]
agpl-3.0
8,143,447,409,504,964,000
37.903226
95
0.360697
false
vrsys/avangong
examples/sound/openal/openal-test.py
1
4058
# -*- Mode:Python -*- ########################################################################## # # # This file is part of AVANGO. # # # # Copyright 1997 - 2009 Fraunhofer-Gesellschaft zur Foerderung der # # angewandten Forschung (FhG), Munich, Germany. # # # # AVANGO is free software: you can redistribute it and/or modify # # it under the terms of the GNU Lesser General Public License as # # published by the Free Software Foundation, version 3. # # # # AVANGO is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # GNU General Public License for more details. # # # # You should have received a copy of the GNU Lesser General Public # # License along with AVANGO. If not, see <http://www.gnu.org/licenses/>. # # # ########################################################################## import avango.osg.viewer import avango.moving import sys import avango.sound import avango.sound.openal if len(sys.argv) != 2: print "Usage '" + sys.argv[0] + " <modelname>" sys.exit(1) soundtraverser = avango.sound.nodes.SoundTraverser() openal_renderer = avango.sound.openal.nodes.OpenALSoundRenderer() openal_renderer.Device.value = "" soundtraverser.Renderers.value = [openal_renderer] # set up scene graph obj = avango.osg.nodes.LoadFile(Filename=sys.argv[1]) obj_trans = avango.osg.nodes.MatrixTransform() obj_trans.Children.value = [obj] #set up sound stereosound = avango.sound.nodes.SoundSource() obj_trans.Children.value.append(stereosound) stereosound.URL.value = "oggfile.ogg" stereosound.Loop.value = False monosound = avango.sound.nodes.SoundSource() obj_trans.Children.value.append(monosound) monosound.URL.value = "applause_mono.ogg" monosound.Loop.value = False root_group = avango.osg.nodes.Group() root_group.Children.value = [obj_trans] # set up viewing window = avango.osg.viewer.nodes.GraphicsWindow() camera = avango.osg.viewer.nodes.Camera() camera.Window.value = window viewer = avango.osg.viewer.nodes.Viewer() viewer.MasterCamera.value = camera viewer.Scene.value = root_group # set up event handling events = avango.osg.viewer.nodes.EventFields(View = viewer) window.ToggleFullScreen.connect_from(events.KeyAltReturn) window.DragEvent.connect_from(events.DragEvent) window.MoveEvent.connect_from(events.MoveEvent) soundtraverser.RootNode.value = root_group soundtraverser.Traverse.value = True # set up trackball mover trackball = avango.moving.nodes.Trackball() trackball.Direction.connect_from(window.MousePositionNorm) trackball.RotateTrigger.connect_from(events.MouseButtons_OnlyLeft) trackball.ZoomTrigger.connect_from(events.MouseButtons_LeftAndRight) trackball.PanTrigger.connect_from(events.MouseButtons_OnlyRight) trackball.Matrix.value = camera.ViewerTransform.value trackball.CenterTransform.value = \ avango.osg.make_scale_mat(0.1, 0.1, 0.1) * \ avango.osg.make_trans_mat(0, 0, -0.6) camera.ViewerTransform.connect_from(trackball.Matrix) openal_renderer.ListenerPosition.connect_from(camera.ViewerTransform) # render a frame to update bounding spheres and scale model to fit in window viewer.frame() scale = 0.08 / obj.get_bounding_sphere().radius() obj_trans.Matrix.value = avango.osg.make_scale_mat(scale, scale, scale) viewer.frame() # run evaluation and render loop stereosound.Play.value = True monosound.Play.value = True viewer.frame() viewer.run()
lgpl-3.0
-8,545,645,918,670,794,000
35.232143
76
0.62691
false
fccoelho/jogos_vorazes
estrategias/LeoRodrigues.py
1
2061
# -*- coding: utf8 -*- from .jogadores import Jogador class MeuJogador(Jogador): def percent(self,data,percentil): data = sorted(data) n = len(data) if n == 0: pass #print ("Lista vazia") else: return int(round(percentil*n+1)) def escolha_de_cacada(self, rodada, comida_atual, reputacao_atual, m, reputacoes_dos_jogadores): """ Método principal que executa a cada rodada. você precisa criar uma lista de escolhas onde 'c' significa escolher caçar e 'd' representa descansar as decisãoes podem usar todas as informações disponíveis, por exemplo, as reputações dos outros jogadores. rodada: inteiro que é a rodada em que você está comida_atual: inteiro com a comida que você tem reputacao_atual: float representando sua reputação atual m: inteiro que é um limiarde cooperação/caçada desta rodada. reputacoes_dos_jogadores: lista de floats com as reputações dos outros jogadores """ reput_oder = sorted(reputacoes_dos_jogadores) if comida_atual > 50: if rodada < 10: #print(reput_oder) escolhas = ['c' if max(reput_oder) > reput_oder[self.percent(reput_oder,0.25)-1] and min(reput_oder) < reput_oder[self.percent(reput_oder,0.75)-1] else 'd' for i in reput_oder] return escolhas else: if float(sum(reput_oder))/float(len(reput_oder)) >= 0.55: escolhas = ['c' if max(reput_oder) > reput_oder[self.percent(reput_oder,0.20)-1] else 'd' for i in reputacoes_dos_jogadores] else: escolhas = ['c' if max(reput_oder) > reput_oder[self.percent(reput_oder,0.40)-1] and min(reput_oder) < reput_oder[self.percent(reput_oder,0.60)-1] else 'd' for i in reput_oder] return escolhas else: escolhas = ['d' for i in reput_oder] return escolhas
mit
8,636,253,855,674,421,000
47.571429
197
0.594608
false
dwadler/QGIS
python/plugins/processing/algs/qgis/PointDistance.py
1
13351
# -*- coding: utf-8 -*- """ *************************************************************************** PointDistance.py --------------------- Date : August 2012 Copyright : (C) 2012 by Victor Olaya Email : volayaf at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Victor Olaya' __date__ = 'August 2012' __copyright__ = '(C) 2012, Victor Olaya' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' import os import math from qgis.PyQt.QtGui import QIcon from qgis.PyQt.QtCore import QVariant from qgis.core import (QgsApplication, QgsFeatureRequest, QgsField, QgsFields, QgsProject, QgsFeature, QgsGeometry, QgsDistanceArea, QgsFeatureSink, QgsProcessingParameterFeatureSource, QgsProcessing, QgsProcessingException, QgsProcessingParameterEnum, QgsProcessingParameterField, QgsProcessingParameterNumber, QgsProcessingParameterFeatureSink, QgsSpatialIndex, QgsWkbTypes) from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0] class PointDistance(QgisAlgorithm): INPUT = 'INPUT' INPUT_FIELD = 'INPUT_FIELD' TARGET = 'TARGET' TARGET_FIELD = 'TARGET_FIELD' MATRIX_TYPE = 'MATRIX_TYPE' NEAREST_POINTS = 'NEAREST_POINTS' OUTPUT = 'OUTPUT' def icon(self): return QgsApplication.getThemeIcon("/algorithms/mAlgorithmDistanceMatrix.svg") def svgIconPath(self): return QgsApplication.iconPath("/algorithms/mAlgorithmDistanceMatrix.svg") def group(self): return self.tr('Vector analysis') def groupId(self): return 'vectoranalysis' def __init__(self): super().__init__() def initAlgorithm(self, config=None): self.mat_types = [self.tr('Linear (N*k x 3) distance matrix'), self.tr('Standard (N x T) distance matrix'), self.tr('Summary distance matrix (mean, std. dev., min, max)')] self.addParameter(QgsProcessingParameterFeatureSource(self.INPUT, self.tr('Input point layer'), [QgsProcessing.TypeVectorPoint])) self.addParameter(QgsProcessingParameterField(self.INPUT_FIELD, self.tr('Input unique ID field'), parentLayerParameterName=self.INPUT, type=QgsProcessingParameterField.Any)) self.addParameter(QgsProcessingParameterFeatureSource(self.TARGET, self.tr('Target point layer'), [QgsProcessing.TypeVectorPoint])) self.addParameter(QgsProcessingParameterField(self.TARGET_FIELD, self.tr('Target unique ID field'), parentLayerParameterName=self.TARGET, type=QgsProcessingParameterField.Any)) self.addParameter(QgsProcessingParameterEnum(self.MATRIX_TYPE, self.tr('Output matrix type'), options=self.mat_types, defaultValue=0)) self.addParameter(QgsProcessingParameterNumber(self.NEAREST_POINTS, self.tr('Use only the nearest (k) target points'), type=QgsProcessingParameterNumber.Integer, minValue=0, defaultValue=0)) self.addParameter(QgsProcessingParameterFeatureSink(self.OUTPUT, self.tr('Distance matrix'), QgsProcessing.TypeVectorPoint)) def name(self): return 'distancematrix' def displayName(self): return self.tr('Distance matrix') def processAlgorithm(self, parameters, context, feedback): source = self.parameterAsSource(parameters, self.INPUT, context) if source is None: raise QgsProcessingException(self.invalidSourceError(parameters, self.INPUT)) source_field = self.parameterAsString(parameters, self.INPUT_FIELD, context) target_source = self.parameterAsSource(parameters, self.TARGET, context) if target_source is None: raise QgsProcessingException(self.invalidSourceError(parameters, self.TARGET)) target_field = self.parameterAsString(parameters, self.TARGET_FIELD, context) same_source_and_target = parameters[self.INPUT] == parameters[self.TARGET] matType = self.parameterAsEnum(parameters, self.MATRIX_TYPE, context) nPoints = self.parameterAsInt(parameters, self.NEAREST_POINTS, context) if nPoints < 1: nPoints = target_source.featureCount() if matType == 0: # Linear distance matrix return self.linearMatrix(parameters, context, source, source_field, target_source, target_field, same_source_and_target, matType, nPoints, feedback) elif matType == 1: # Standard distance matrix return self.regularMatrix(parameters, context, source, source_field, target_source, target_field, nPoints, feedback) elif matType == 2: # Summary distance matrix return self.linearMatrix(parameters, context, source, source_field, target_source, target_field, same_source_and_target, matType, nPoints, feedback) def linearMatrix(self, parameters, context, source, inField, target_source, targetField, same_source_and_target, matType, nPoints, feedback): if same_source_and_target: # need to fetch an extra point from the index, since the closest match will always be the same # as the input feature nPoints += 1 inIdx = source.fields().lookupField(inField) outIdx = target_source.fields().lookupField(targetField) fields = QgsFields() input_id_field = source.fields()[inIdx] input_id_field.setName('InputID') fields.append(input_id_field) if matType == 0: target_id_field = target_source.fields()[outIdx] target_id_field.setName('TargetID') fields.append(target_id_field) fields.append(QgsField('Distance', QVariant.Double)) else: fields.append(QgsField('MEAN', QVariant.Double)) fields.append(QgsField('STDDEV', QVariant.Double)) fields.append(QgsField('MIN', QVariant.Double)) fields.append(QgsField('MAX', QVariant.Double)) out_wkb = QgsWkbTypes.multiType(source.wkbType()) if matType == 0 else source.wkbType() (sink, dest_id) = self.parameterAsSink(parameters, self.OUTPUT, context, fields, out_wkb, source.sourceCrs()) if sink is None: raise QgsProcessingException(self.invalidSinkError(parameters, self.OUTPUT)) index = QgsSpatialIndex(target_source.getFeatures(QgsFeatureRequest().setSubsetOfAttributes([]).setDestinationCrs(source.sourceCrs(), context.transformContext())), feedback) distArea = QgsDistanceArea() distArea.setSourceCrs(source.sourceCrs(), context.transformContext()) distArea.setEllipsoid(context.project().ellipsoid()) features = source.getFeatures(QgsFeatureRequest().setSubsetOfAttributes([inIdx])) total = 100.0 / source.featureCount() if source.featureCount() else 0 for current, inFeat in enumerate(features): if feedback.isCanceled(): break inGeom = inFeat.geometry() inID = str(inFeat[inIdx]) featList = index.nearestNeighbor(inGeom.asPoint(), nPoints) distList = [] vari = 0.0 request = QgsFeatureRequest().setFilterFids(featList).setSubsetOfAttributes([outIdx]).setDestinationCrs(source.sourceCrs(), context.transformContext()) for outFeat in target_source.getFeatures(request): if feedback.isCanceled(): break if same_source_and_target and inFeat.id() == outFeat.id(): continue outID = outFeat[outIdx] outGeom = outFeat.geometry() dist = distArea.measureLine(inGeom.asPoint(), outGeom.asPoint()) if matType == 0: out_feature = QgsFeature() out_geom = QgsGeometry.unaryUnion([inFeat.geometry(), outFeat.geometry()]) out_feature.setGeometry(out_geom) out_feature.setAttributes([inID, outID, dist]) sink.addFeature(out_feature, QgsFeatureSink.FastInsert) else: distList.append(float(dist)) if matType != 0: mean = sum(distList) / len(distList) for i in distList: vari += (i - mean) * (i - mean) vari = math.sqrt(vari / len(distList)) out_feature = QgsFeature() out_feature.setGeometry(inFeat.geometry()) out_feature.setAttributes([inID, mean, vari, min(distList), max(distList)]) sink.addFeature(out_feature, QgsFeatureSink.FastInsert) feedback.setProgress(int(current * total)) return {self.OUTPUT: dest_id} def regularMatrix(self, parameters, context, source, inField, target_source, targetField, nPoints, feedback): distArea = QgsDistanceArea() distArea.setSourceCrs(source.sourceCrs(), context.transformContext()) distArea.setEllipsoid(context.project().ellipsoid()) inIdx = source.fields().lookupField(inField) targetIdx = target_source.fields().lookupField(targetField) index = QgsSpatialIndex(target_source.getFeatures(QgsFeatureRequest().setSubsetOfAttributes([]).setDestinationCrs(source.sourceCrs(), context.transformContext())), feedback) first = True sink = None dest_id = None features = source.getFeatures(QgsFeatureRequest().setSubsetOfAttributes([inIdx])) total = 100.0 / source.featureCount() if source.featureCount() else 0 for current, inFeat in enumerate(features): if feedback.isCanceled(): break inGeom = inFeat.geometry() if first: featList = index.nearestNeighbor(inGeom.asPoint(), nPoints) first = False fields = QgsFields() input_id_field = source.fields()[inIdx] input_id_field.setName('ID') fields.append(input_id_field) for f in target_source.getFeatures(QgsFeatureRequest().setFilterFids(featList).setSubsetOfAttributes([targetIdx]).setDestinationCrs(source.sourceCrs(), context.transformContext())): fields.append(QgsField(str(f[targetField]), QVariant.Double)) (sink, dest_id) = self.parameterAsSink(parameters, self.OUTPUT, context, fields, source.wkbType(), source.sourceCrs()) if sink is None: raise QgsProcessingException(self.invalidSinkError(parameters, self.OUTPUT)) data = [inFeat[inField]] for target in target_source.getFeatures(QgsFeatureRequest().setSubsetOfAttributes([]).setFilterFids(featList).setDestinationCrs(source.sourceCrs(), context.transformContext())): if feedback.isCanceled(): break outGeom = target.geometry() dist = distArea.measureLine(inGeom.asPoint(), outGeom.asPoint()) data.append(dist) out_feature = QgsFeature() out_feature.setGeometry(inGeom) out_feature.setAttributes(data) sink.addFeature(out_feature, QgsFeatureSink.FastInsert) feedback.setProgress(int(current * total)) return {self.OUTPUT: dest_id}
gpl-2.0
4,954,009,398,125,861,000
46.176678
197
0.565126
false
JohanComparat/pySU
spm/bin_spiders/spiders_last_burst_vs_radius.py
1
5578
import astropy.cosmology as co aa=co.Planck15 import astropy.io.fits as fits import astropy.units as u from astropy.coordinates import angles #import AngularSeparation from astropy import coordinates as coord import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as p import numpy as n import os import sys import ClusterScalingRelations as clsr from scipy.interpolate import interp1d import StellarMass as sm smhmr = sm.StellarMass() scl = clsr.ClusterScalingRelations_Mantz2016() cat = fits.open(os.path.join(os.environ['DATA_DIR'], 'spiders', 'cluster', 'validatedclusters_catalogue_2016-07-04-DR14_version_round1-v4_Xmass-v1.fits.gz'))[1].data spm = fits.open(os.path.join(os.environ['DATA_DIR'], 'spiders', 'cluster', 'validatedclusters_catalogue_2016-07-04-DR14_version_round1-v4_Xmass-v1_spm.fits'))[1].data volume_rough = aa.comoving_volume(0.5)*2200.*n.pi/129600 volume = volume_rough.value # get cluster center # distance to center # rescale to r200c_deg # get the latest min(ages) of the ssp # compute SFR # now looks at individual galaxies # and gets the highest SFR for each galaxy # youngest age highest_sfrs = [] youngest_ages = [] sep_r200c = [] for cc in cat: center = coord.ICRS(ra=cc['RA_OPT']*u.degree, dec=cc['DEC_OPT']*u.degree) gal = (spm['CLUS_ID']==cc['CLUS_ID']) #all_members = coord.ICRS() #separations = center.separation(all_members)/(cc['R200C_DEG']*u.degree)).value for id_cc, (pla, mjd, fib) in enumerate(zip(cc['ALLPLATE'][:len(gal.nonzero()[0])], cc['ALLMJD'][:len(gal.nonzero()[0])], cc['ALLFIBERID'][:len(gal.nonzero()[0])])): sel = (gal) & (spm['PLATE']==pla) & (spm['MJD']==mjd) & (spm['FIBERID']==fib) if len(sel.nonzero()[0])>0 : n_cp = spm['Chabrier_MILES_nComponentsSSP'][sel].astype('int')[0] if n_cp > 0 : all_ages = n.array([ spm['Chabrier_MILES_age_ssp_'+str(ii)][sel][0] for ii in n.arange(n_cp) ]) all_masses = n.array([ spm['Chabrier_MILES_stellar_mass_ssp_'+str(ii)][sel][0] for ii in n.arange(n_cp) ]) sfr_inst = all_masses / all_ages youngest_ages.append(n.min(all_ages)) highest_sfrs.append(n.max(sfr_inst)) position = coord.ICRS(cc['ALLRA'][id_cc]*u.degree, cc['ALLDEC'][id_cc]*u.degree) sep_r200c.append( (center.separation(position)/(cc['R200C_DEG']*u.degree)).value ) highest_sfrs = n.array(highest_sfrs) youngest_ages = n.array(youngest_ages) sep_r200c = n.array(sep_r200c) p.figure(1, (5,5)) p.title('SPIDERS') p.plot(sep_r200c, highest_sfrs, 'r+') p.xlabel('r/r200c') p.ylabel('SFR [Msun/yr]') #p.xscale('log') p.yscale('log') p.xlim((0.08,1.5)) p.grid() p.savefig(os.path.join(os.environ['DATA_DIR'], 'spiders', 'cluster', 'disteance-2-center-SFR.png')) p.clf() dx = ( n.max(sep_r200c) - n.min(sep_r200c) ) /3. r_b = n.arange(n.min(sep_r200c), n.max(sep_r200c) + dx, dx) p.figure(1, (5,5)) for ii,bb in enumerate(r_b[:-1]): sub = (sep_r200c>bb)&(sep_r200c<r_b[ii+1]) p.hist(highest_sfrs[sub], label=str(n.round(bb,3))+"<"+str(n.round(r_b[ii+1],3)), cumulative=True, normed=True, histtype='step') p.ylabel('normed cumulative distribution') p.xlabel('SFR [Msun/yr]') p.xscale('log') p.ylim((-0.01, 1.01)) p.grid() p.legend(frameon=False, loc=0) p.savefig(os.path.join(os.environ['DATA_DIR'], 'spiders', 'cluster', 'disteance-2-center-SFR-histograms.png')) p.clf() p.figure(1, (5,5)) p.title('SPIDERS') p.plot(sep_r200c, youngest_ages, 'r+') p.xlabel('r/r200c') p.ylabel('age [yr]') p.xscale('log') p.yscale('log') p.xlim((0.1,5)) p.grid() p.savefig(os.path.join(os.environ['DATA_DIR'], 'spiders', 'cluster', 'disteance-2-center-AGE.png')) p.clf() p.figure(1, (5,5)) p.title('SPIDERS DR14 galaxies') p.plot(spm['Z'], spm["Chabrier_MILES_stellar_mass"], 'b,', label='targets') p.plot(z, y, 'r,', label='cluster members') p.xlabel('redshift') p.ylabel('stellar mass [Msun]') #p.xscale('log') p.yscale('log') p.xlim((0,0.7)) p.ylim((1e9,1e12)) p.grid() p.legend(frameon=False, loc=0) p.savefig(os.path.join(os.environ['DATA_DIR'], 'spiders', 'cluster', 'redshift-mass.png')) p.clf() logm2x = n.hstack((m2x)) bins=n.arange(-7, 0.5, 0.1) basis = (n.isnan(logm2x)==False)&(logm2x != -n.inf)&(logm2x != n.inf) arbitrary_factor =5. p.figure(1, (5,5)) ok = (basis)&(x>1e44) out = n.log10(n.histogram(logm2x[ok], bins=bins)[0]) p.plot((bins[1:]+bins[:-1])/2., n.log10(out/arbitrary_factor), label='LX>44') ok = (basis)&(x>10**44.5) out = n.log10(n.histogram(logm2x[ok], bins=bins)[0]) p.plot((bins[1:]+bins[:-1])/2., n.log10(out/arbitrary_factor), label='LX>44.5') ok = (basis)&(x>1e45) out = n.log10(n.histogram(logm2x[ok], bins=bins)[0]) p.plot((bins[1:]+bins[:-1])/2., n.log10(out/arbitrary_factor), label='LX>45') ok = (basis)&(m200c>10**14) out = n.log10(n.histogram(logm2x[ok], bins=bins)[0]) p.plot((bins[1:]+bins[:-1])/2., n.log10(out/arbitrary_factor), label='M200c>14', ls='dashed') ok = (basis)&(m200c>10**15) out = n.log10(n.histogram(logm2x[ok], bins=bins)[0]) p.plot((bins[1:]+bins[:-1])/2., n.log10(out/arbitrary_factor), label='M200c>15', ls='dashed') xs = n.arange(-7, 0.01, 0.01) logfsat= lambda logxi, a, b, logN0, exponent : n.log10( 10**logN0 * (10**logxi)**a)# * n.e**(-b*(10**logxi)**exponent)) p.plot(xs, logfsat(xs, -0.81, 5.81, -2.25, -2.54), label='-0.81') p.plot(xs, logfsat(xs, -0.18, 5.81, -1.2, -.54), label='-0.18') p.xlabel('log10(SMHMR(stellar mass) / HaloMass(Lx ray))') p.ylabel('histogram') #p.xscale('log') #p.yscale('log') p.ylim((-1.5, 0.5)) p.xlim((-4,0)) p.grid() p.legend(frameon=False, loc=0) p.savefig(os.path.join(os.environ['DATA_DIR'], 'spiders', 'cluster', 'LX-mass-histogram.png')) p.clf()
cc0-1.0
-8,053,327,241,965,627,000
32.202381
166
0.661527
false
OpenDataPolicingNC/Traffic-Stops
traffic_stops/base_views.py
1
3014
from django.core.exceptions import ImproperlyConfigured from django.shortcuts import redirect, Http404 from django.views.generic import DetailView, ListView, TemplateView from django.views.generic.edit import ProcessFormView, FormMixin from traffic_stops.utils import get_chunks from collections import defaultdict class Home(FormMixin, ProcessFormView, TemplateView): def get(self, request, *args, **kwargs): if request.GET: form = self.get_form_class()(request.GET) if form.is_valid(): success = self.get_success_url() return redirect(success, form.cleaned_data['agency'].pk) return super(Home, self).get(request, **kwargs) class AgencyList(FormMixin, ListView): def get_success_url(self, pk, **kwargs): success = super(AgencyList, self).get_success_url(self, **kwargs) return redirect(success, pk) def get(self, request, **kwargs): if request.GET: form = self.get_form_class()(request.GET) if form.is_valid(): return self.get_success_url(pk=form.cleaned_data['agency'].pk) return super(AgencyList, self).get(request, **kwargs) def get_context_data(self, **kwargs): context = super(AgencyList, self).get_context_data(**kwargs) # The following seems to be all ProcessFormView really gives us. # It causes collisions with ListView's get method. Hence # we just add it as a trivial context-modification snippet. form_class = self.get_form_class() form = self.get_form(form_class) context['form'] = form # Once we have the "letters present", we want to be able to iterate # over categorized, sorted lists of agencies. Therefore we create # a dict indexed by first letter. sorted_agencies = defaultdict(list) for agency in context['agency_list']: initial = agency.name[:1] sorted_agencies[initial].append(agency) for key in sorted_agencies: sorted_agencies[key].sort(key=lambda x: x.name.lower()) sorted_agencies[key] = get_chunks(sorted_agencies[key]) sorted_agencies = sorted(sorted_agencies.items()) context['sorted_agencies'] = sorted_agencies context['agency_form'] = form return context class AgencyDetail(DetailView): def get_stop_model(self): if self.stop_model: return self.stop_model else: raise ImproperlyConfigured("No stop model provided.") def get_context_data(self, **kwargs): context = super(AgencyDetail, self).get_context_data(**kwargs) agency = context['object'] officer_id = self.request.GET.get('officer_id') if officer_id: Stop = self.get_stop_model() if not Stop.objects.filter(agency=agency, officer_id=officer_id).exists(): raise Http404() context['officer_id'] = officer_id return context
mit
6,739,473,892,686,787,000
36.675
86
0.639681
false
alfredodeza/execnet
execnet/gateway_bootstrap.py
1
3067
# -*- coding: utf-8 -*- """ code to initialize the remote side of a gateway once the io is created """ import inspect import os import execnet from execnet import gateway_base from execnet.gateway import Gateway importdir = os.path.dirname(os.path.dirname(execnet.__file__)) class HostNotFound(Exception): pass def bootstrap_import(io, spec): # only insert the importdir into the path if we must. This prevents # bugs where backports expect to be shadowed by the standard library on # newer versions of python but would instead shadow the standard library sendexec( io, "import sys", "if %r not in sys.path:" % importdir, " sys.path.insert(0, %r)" % importdir, "from execnet.gateway_base import serve, init_popen_io, get_execmodel", "sys.stdout.write('1')", "sys.stdout.flush()", "execmodel = get_execmodel(%r)" % spec.execmodel, "serve(init_popen_io(execmodel), id='%s-slave')" % spec.id, ) s = io.read(1) assert s == "1".encode("ascii"), repr(s) def bootstrap_exec(io, spec): try: sendexec( io, inspect.getsource(gateway_base), "execmodel = get_execmodel(%r)" % spec.execmodel, "io = init_popen_io(execmodel)", "io.write('1'.encode('ascii'))", "serve(io, id='%s-slave')" % spec.id, ) s = io.read(1) assert s == "1".encode("ascii") except EOFError: ret = io.wait() if ret == 255: raise HostNotFound(io.remoteaddress) def bootstrap_socket(io, id): # XXX: switch to spec from execnet.gateway_socket import SocketIO sendexec( io, inspect.getsource(gateway_base), "import socket", inspect.getsource(SocketIO), "try: execmodel", "except NameError:", " execmodel = get_execmodel('thread')", "io = SocketIO(clientsock, execmodel)", "io.write('1'.encode('ascii'))", "serve(io, id='%s-slave')" % id, ) s = io.read(1) assert s == "1".encode("ascii") def sendexec(io, *sources): source = "\n".join(sources) io.write((repr(source) + "\n").encode("ascii")) def fix_pid_for_jython_popen(gw): """ fix for jython 2.5.1 """ spec, io = gw.spec, gw._io if spec.popen and not spec.via: # XXX: handle the case of remote being jython # and not having the popen pid if io.popen.pid is None: io.popen.pid = gw.remote_exec( "import os; channel.send(os.getpid())" ).receive() def bootstrap(io, spec): if spec.popen: if spec.via or spec.python: bootstrap_exec(io, spec) else: bootstrap_import(io, spec) elif spec.ssh or spec.vagrant_ssh: bootstrap_exec(io, spec) elif spec.socket: bootstrap_socket(io, spec) else: raise ValueError("unknown gateway type, cant bootstrap") gw = Gateway(io, spec) fix_pid_for_jython_popen(gw) return gw
mit
8,197,248,749,936,590,000
27.137615
79
0.580698
false
CaptainDesAstres/Blender-Render-Manager
TaskList/TaskLog/FrameLog.py
1
1368
#!/usr/bin/python3.4 # -*-coding:Utf-8 -* '''module to manage task Frame log''' import xml.etree.ElementTree as xmlMod import datetime from usefullFunctions import * class FrameLog: '''class to manage task frame log''' def __init__(self, xml = None, frame = None, date = None, computingTime = None): '''initialize task frame object''' if xml is None: self.defaultInit(frame, date, computingTime) else: self.fromXml(xml) def defaultInit(self, frame, date, computingTime): '''initialize Task frame log object''' self.frame = frame self.date = date self.computingTime = computingTime def fromXml(self, xml): '''initialize Task frame log object with saved log''' self.frame = int(xml.get('frame')) self.date = datetime.datetime.fromtimestamp(float(xml.get('date'))) self.computingTime = float(xml.get('computingTime')) def toXml(self): '''export task frame log into xml syntaxed string''' return '<frame frame="'+str(self.frame)\ +'" date="'+str(int(self.date.timestamp()))\ +'" computingTime="'+str(self.computingTime)+'" />' def print(self): '''A method to print task frame log''' print(' ╚═ '+columnLimit((str(self.frame)), 9, sep = '')\ +self.date.strftime('%d/%m/%Y at %H:%M')\ +' '+str(round(self.computingTime, 2)) )
mit
1,593,360,718,712,065,800
19.358209
69
0.629765
false
ctberthiaume/keggannot
keggannot/annot.py
1
16296
import os, sys from decimal import Decimal import gzip from collections import OrderedDict import logging def blast_result_iterator(blast_file): _ = blast_file.readline() # burn header for line in blast_file: if not line.startswith("#"): yield BlastHit(line) class BlastHit(object): """ Parse ncbi blast tabular output, comments will be ignored. blastp produces the following columns: qseqid sseqid pident length mismatch gapopen qstart qend sstart send evalue bitscore """ col_sep = "\t" query_id_col = 0 subject_id_col = 1 evalue_col = 10 bitscore_col = 11 def __init__(self, line=None): self.line = line self.query_id = None self.subject_id = None self.evalue = None self.bitscore = None if self.line: cols = line.split(self.col_sep) self.query_id = cols[self.query_id_col].strip() self.subject_id = cols[self.subject_id_col].strip() self.evalue = Decimal(cols[self.evalue_col].strip()) self.bitscore = Decimal(cols[self.bitscore_col].strip()) def __repr__(self): vals = (self.__class__.__name__, self.query_id, self.subject_id, self.evalue, self.bitscore) return "%s(query_id=%s, subject_id=%s, evalue=%s, bitscore=%s)" % vals class Gene(object): def __init__(self, gene_id): self.id = gene_id self.best_hit = None # the best hit with KO was not used because it lacked a module # -m flag changed this result self.module_skip = False def add_blast_hit(self, hit): logging.debug("In %s, adding blast hit %s" % (self, hit)) if (not self.best_hit) or (hit.evalue < self.best_hit.evalue): logging.debug("New best hit") self.best_hit = hit def __repr__(self): vals = (self.__class__.__name__, self.id, self.best_hit, self.module_skip) return "%s(id=%s, best_hit=%s, module_skip=%s)" % vals class KEGGAnnotator(object): def __init__(self, ko_genes_list_file, ko_module_list_file, ko_pathway_list_file, ko_enzyme_list_file, ko_file, module_file, pathway_file, blast_file=None, require_module=False): # To be output, a hit must have a module definition. The first (best) # hit with a module definition is used for each query. if require_module: self.add_blast_hit = self.add_blast_hit_module else: self.add_blast_hit = self.add_blast_hit_ko # Load KO lookup files found in genes/ko/ko_*.list logging.info("Creating new %s object" % self.__class__.__name__) logging.info("Loading ko_genes.list file") self.ko2genes = self.dict_from_tabular_file(ko_genes_list_file, remove_key_prefix=True, remove_val_prefix=False) logging.info("Done loading ko_genes.list file, found %i KOs" % len(self.ko2genes)) logging.info("Loading ko_module.list file") self.ko2modules = self.dict_from_tabular_file(ko_module_list_file) logging.info("Done loading ko_module.list file, found %i KOs" % len(self.ko2modules)) logging.info("Loading ko_pathway.list file") self.ko2pathways = self.dict_from_tabular_file(ko_pathway_list_file) logging.info("Done loading ko_pathway.list file, found %i KOs" % len(self.ko2pathways)) logging.info("Loading ko_enzyme.list file") self.ko2enzymes = self.dict_from_tabular_file(ko_enzyme_list_file) logging.info("Done loading ko_enzyme.list file, found %i KOs" % len(self.ko2enzymes)) # Make genes to KO lookup logging.info("Creating gene to KOs lookup table") self.gene2kos = dict() for ko, gene_list in self.ko2genes.items(): for g in gene_list: klist = self.gene2kos.setdefault(g, []) klist.append(ko) logging.info("Done creating gene to KOs lookup table") # Load KO, module, pathway definition file logging.info("Loading ko file") self.kos = self.parse_ko_file(ko_file) logging.info("Done loading ko file") logging.info("Loading module file") self.modules = self.parse_module_file(module_file) logging.info("Done loading module file") logging.info("Loading pathway file") self.pathways = self.parse_pathway_file(pathway_file) logging.info("Done loading pathway file") # keep track of genes which had ko, useful to mark cases where -m changed # results. self.genes_with_ko = {} self.genes = {} if blast_file: logging.info("Adding new blast results file") self.add_blast_results(blast_file) logging.info("Done adding new blast results") def dict_from_tabular_file(self, f, key_column=0, value_column=1, remove_key_prefix=True, remove_val_prefix=True, comment_symbol=None): d = dict() max_column = max(key_column, value_column) line_num = 0 for line in f: line_num += 1 if comment_symbol and line[0] == comment_symbol: continue fields = line.split() if max_column > len(fields): logging.error("Highest column is greater than columns in line %i" % line_num) sys.exit(1) # Remove ko:, md:, etc. if required if remove_key_prefix: fields[key_column] = self.remove_prefix(fields[key_column]) if remove_val_prefix: fields[value_column] = self.remove_prefix(fields[value_column]) if fields[key_column] in d: #logging.debug("Duplicate key %s encountered" % fields[key_column]) d[fields[key_column]].append(fields[value_column]) else: d[fields[key_column]] = [fields[value_column]] return d def remove_prefix(self, kegg_id): """Remove ko:, md:, path:, ec: from ID""" return kegg_id.split(":")[1] def parse_ko_file(self, ko_file): """Parse genes/ko/ko file""" k = dict() cur_entry = "" cur_name = "" cur_def = "" line_no = 0 for line in ko_file: line_no += 1 if line.startswith("ENTRY"): cur_entry = line.split()[1].rstrip() elif line.startswith("NAME"): cur_name = line.split(None, 1)[1].rstrip() elif line.startswith("DEFINITION"): cur_def = line.split(None, 1)[1].rstrip() elif line.startswith("///"): if (not cur_entry) or (not cur_name): sys.stderr.write("Error parsing %s at %i\n" % (ko_file.name, line_no)) sys.exit(1) k[cur_entry] = {"name": cur_name, "def": cur_def} cur_entry = "" cur_name = "" cur_def = "" return k def parse_module_file(self, module_file): """Parse module/module file""" m = dict() cur_entry = "" cur_name = "" cur_class = "" line_no = 0 for line in module_file: line_no += 1 if line.startswith("ENTRY"): cur_entry = line.split()[1].rstrip() elif line.startswith("NAME"): cur_name = line.split(None, 1)[1].rstrip() elif line.startswith("CLASS"): cur_class = line.split(None, 1)[1].rstrip() elif line.startswith("///"): if (not cur_entry) or (not cur_name): sys.stderr.write("Error parsing %s at %i\n" % (module_file.name, line_no)) sys.exit(1) m[cur_entry] = {"name": cur_name, "class": cur_class} cur_entry = "" cur_name = "" cur_class = "" return m def parse_pathway_file(self, pathway_file): """Parse pathway/pathway file""" p = dict() cur_name = "" cur_desc = "" cur_class = "" cur_entry = "" line_no = 0 for line in pathway_file: line_no += 1 if line.startswith("ENTRY"): fields = line.split() if fields[1].startswith("k"): cur_entry = fields[1] elif cur_entry and line.startswith("NAME"): cur_name = line.split(None, 1)[1].rstrip() elif cur_entry and line.startswith("DESCRIPTION"): cur_desc = line.split(None, 1)[1].rstrip() elif cur_entry and line.startswith("CLASS"): cur_class = line.split(None, 1)[1].rstrip() elif line.startswith("///"): if cur_entry: if (not cur_name) and (not cur_desc) and (not cur_class): sys.stderr.write("Error parsing %s at %i\n" % (pathway_file.name, line_no)) sys.exit(1) p[cur_entry] = {"name": cur_name, "desc": cur_desc, "class": cur_class} cur_pathway = "" cur_name = "" cur_desc = "" cur_class = "" cur_entry = "" return p def add_blast_results(self, blast_file): cnt = 0 for hit in blast_result_iterator(blast_file): self.add_blast_hit(hit) cnt += 1 logging.info("Found %i blast results, %i passed filter" % (cnt, len(self.genes))) # Only add hit if it has KO annotation def add_blast_hit_ko(self, hit): logging.debug("Attempting to add hit: %s" % hit) qid = hit.query_id if (not qid in self.genes) and self.get_hit_ko_list(hit): self.genes[qid] = Gene(qid) self.genes[qid].add_blast_hit(hit) # Only add hit if it has module annotation def add_blast_hit_module(self, hit): logging.debug("Attempting to add hit: %s" % hit) qid = hit.query_id if (not qid in self.genes): kos = self.get_hit_ko_list(hit) if kos: if self.get_hit_module_list(kos): # This hit has module annotations so add it g = Gene(qid) self.genes[qid] = g if qid in self.genes_with_ko: g.module_skip = True # note that this results differs because of -m g.add_blast_hit(hit) # add the hit result to the Gene object else: # This hit didn't have module annotations, but had ko so add to # genes_with_ko dict self.genes_with_ko[qid] = True def get_hit_ko_list(self, hit): if not hit: return None try: return sorted(self.gene2kos[hit.subject_id]) except KeyError: logging.debug("KOs not found for gene %s" % hit.subject_id) return None def get_hit_module_list(self, kos): answer = set() if kos: for ko in kos: if ko in self.ko2modules: answer.update(self.ko2modules[ko]) if len(answer): return sorted(answer) else: if kos: logging.debug("Modules not found for KOs %s" % kos) return None def get_hit_pathway_list(self, kos): answer = set() if kos: for ko in kos: try: answer.update(self.ko2pathways[ko]) except KeyError: pass if len(answer): return sorted(answer) else: if kos: logging.debug("Pathways not found for KOs %s" % kos) return None def get_hit_enzyme_list(self, kos): answer = set() if kos: for ko in kos: try: answer.update(self.ko2enzymes[ko]) except KeyError: pass if len(answer): return sorted(answer) else: if kos: logging.debug("Enzymes not found for KOs %s" % kos) return None def make_basic_report_text(self): yield "\t".join(["query", "gene", "KO", "KO_names", "KO_descriptions", "modules", "module_names", "module_classes", "pathways", "pathway_names", "pathway_classes", "EC", "evalue", "score", "module_skip"]) annotations = self.get_gene_annotations() for gene_id, annot in annotations.iteritems(): yield self.tabify_annotations(gene_id, annot) def get_gene_annotations(self): out = dict() for gene_id, gene in self.genes.iteritems(): out[gene_id] = self.aggregate_hit_data(gene) return out def aggregate_hit_data(self, gene): hit = gene.best_hit out = {"gene": "", "kos": [], "ko_names": [], "ko_defs": [], "modules": [], "module_names": [], "module_classes": [], "pathways": [], "pathway_names": [], "pathway_classes": [], "enzymes": [], "evalue": "", "score": "", "module_skip": False } # Add hit gene if hit: out["gene"] = hit.subject_id out["evalue"] = hit.evalue out["score"] = hit.bitscore out["module_skip"] = gene.module_skip kos = self.get_hit_ko_list(hit) if len(kos) > 1: logging.info("More than one KO for %s" % gene.id) out.update(self.ko_annots(kos)) return out def ko_annots(self, kos): """Make a dictionary of KEGG annotations for a list of KOs""" out = {} if kos: out["kos"] = kos out["ko_names"] = [self.kos[k]["name"] for k in kos] out["ko_defs"] = [self.kos[k]["def"] for k in kos] # Build module list modules = self.get_hit_module_list(kos) if modules: out["modules"] = modules out["module_names"] = [self.modules[m]["name"] for m in modules] out["module_classes"] = [self.modules[m]["class"] for m in modules] # Build pathway list pathways = self.get_hit_pathway_list(kos) if pathways: out["pathways"] = pathways out["pathway_names"] = [self.pathways[p]["name"] for p in pathways] out["pathway_classes"] = [self.pathways[p]["class"] for p in pathways] # Build enzyme list enzymes = self.get_hit_enzyme_list(kos) if enzymes: out["enzymes"] = enzymes return out def tabify_annotations(self, gene_id, annot): """Create tab delimited string for hit data created by aggregate_hit_data""" out_text = gene_id + "\t" out_text += "\t".join([annot["gene"], self.tabify_ko_annotations(annot), str(annot["evalue"]), str(annot["score"]), str(annot["module_skip"])]) return out_text def tabify_ko_annotations(self, annot): """Tabify output from ko_annots""" return "\t".join(["; ".join(annot["kos"]), "; ".join(annot["ko_names"]), "; ".join(annot["ko_defs"]), "; ".join(annot["modules"]), "; ".join(annot["module_names"]), " ::: ".join(annot["module_classes"]), "; ".join(annot["pathways"]), "; ".join(annot["pathway_names"]), " ::: ".join(annot["pathway_classes"]), "; ".join(annot["enzymes"])])
apache-2.0
2,386,312,616,748,014,000
37.8
100
0.510371
false
ryanpbrewster/SciVis-2015
examples/sdf_example.py
1
2689
""" The Example is from http://darksky.slac.stanford.edu/scivis2015/examples.html """ from sdfpy import load_sdf from thingking import loadtxt prefix = "../data/" # Load N-body particles from a = 1.0 dataset. Particles have positions with # units of proper kpc, and velocities with units of km/s. particles = load_sdf(prefix+"ds14_scivis_0128_e4_dt04_1.0000") # Load the a=1 Rockstar hlist file. The header of the file lists the useful # units/information. scale, id, desc_scale, desc_id, num_prog, pid, upid, desc_pid, phantom, \ sam_mvir, mvir, rvir, rs, vrms, mmp, scale_of_last_MM, vmax, x, y, z, \ vx, vy, vz, Jx, Jy, Jz, Spin, Breadth_first_ID, Depth_first_ID, \ Tree_root_ID, Orig_halo_ID, Snap_num, Next_coprogenitor_depthfirst_ID, \ Last_progenitor_depthfirst_ID, Rs_Klypin, M_all, M200b, M200c, M500c, \ M2500c, Xoff, Voff, Spin_Bullock, b_to_a, c_to_a, A_x, A_y, A_z, \ b_to_a_500c, c_to_a_500c, A_x_500c, A_y_500c, A_z_500c, T_over_U, \ M_pe_Behroozi, M_pe_Diemer, Macc, Mpeak, Vacc, Vpeak, Halfmass_Scale, \ Acc_Rate_Inst, Acc_Rate_100Myr, Acc_Rate_Tdyn = \ loadtxt(prefix+"rockstar/hlists/hlist_1.00000.list", unpack=True) # Now we want to convert the proper kpc of the particle position to comoving # Mpc/h, a common unit used in computational cosmology in general, but # specifically is used as the output unit in the merger tree halo list loaded # in above. First we get the Hubble parameter, here stored as 'h_100' in the # SDF parameters. Then we load the simulation width, L0, which is also in # proper kpc. Finally we load the scale factor, a, which for this particular # snapshot is equal to 1 since we are loading the final snapshot from the # simulation. h_100 = particles.parameters['h_100'] width = particles.parameters['L0'] cosmo_a = particles.parameters['a'] kpc_to_Mpc = 1./1000 sl = slice(0,None) # Define a simple function to convert proper to comoving Mpc/h. convert_to_cMpc = lambda proper: (proper + width/2.) * h_100 * kpc_to_Mpc / cosmo_a # Plot all the particles, adding a bit of alpha so that we see the density of # points. import matplotlib.pylab as pl pl.figure(figsize=[10,10]) pl.scatter(convert_to_cMpc(particles['x'][sl]), convert_to_cMpc(particles['y'][sl]), color='b', s=1.0, alpha=0.05) # Plot all the halos in red. pl.scatter(x, y, color='r', alpha=0.1) # Add some labels pl.xlabel('x [cMpc/h]') pl.ylabel('y [cMpc/h]') pl.savefig("halos_and_particles.png", bbox_inches='tight') # Could now consider coloring halos by any of the various quantities above. # Perhaps mvir would be nice to show the virial Mass of the halo, or we could # scale the points by the virial radius, rvir.
mit
9,184,363,794,019,417,000
43.081967
83
0.706582
false
rhcarvalho/kombu
kombu/tests/async/http/test_curl.py
1
5102
# -*- coding: utf-8 -*- from __future__ import absolute_import from kombu.async.http.curl import READ, WRITE, CurlClient from kombu.tests.case import ( HubCase, Mock, call, patch, case_requires, set_module_symbol, ) @case_requires('pycurl') class test_CurlClient(HubCase): class Client(CurlClient): Curl = Mock(name='Curl') def test_when_pycurl_missing(self): with set_module_symbol('kombu.async.http.curl', 'pycurl', None): with self.assertRaises(ImportError): self.Client() def test_max_clients_set(self): x = self.Client(max_clients=303) self.assertEqual(x.max_clients, 303) def test_init(self): with patch('kombu.async.http.curl.pycurl') as _pycurl: x = self.Client() self.assertIsNotNone(x._multi) self.assertIsNotNone(x._pending) self.assertIsNotNone(x._free_list) self.assertIsNotNone(x._fds) self.assertEqual( x._socket_action, x._multi.socket_action, ) self.assertEqual(len(x._curls), x.max_clients) self.assertTrue(x._timeout_check_tref) x._multi.setopt.assert_has_calls([ call(_pycurl.M_TIMERFUNCTION, x._set_timeout), call(_pycurl.M_SOCKETFUNCTION, x._handle_socket), ]) def test_close(self): with patch('kombu.async.http.curl.pycurl'): x = self.Client() x._timeout_check_tref = Mock(name='timeout_check_tref') x.close() x._timeout_check_tref.cancel.assert_called_with() for _curl in x._curls: _curl.close.assert_called_with() x._multi.close.assert_called_with() def test_add_request(self): with patch('kombu.async.http.curl.pycurl'): x = self.Client() x._process_queue = Mock(name='_process_queue') x._set_timeout = Mock(name='_set_timeout') request = Mock(name='request') x.add_request(request) self.assertIn(request, x._pending) x._process_queue.assert_called_with() x._set_timeout.assert_called_with(0) def test_handle_socket(self): with patch('kombu.async.http.curl.pycurl') as _pycurl: hub = Mock(name='hub') x = self.Client(hub) fd = Mock(name='fd1') # POLL_REMOVE x._fds[fd] = fd x._handle_socket(_pycurl.POLL_REMOVE, fd, x._multi, None, _pycurl) hub.remove.assert_called_with(fd) self.assertNotIn(fd, x._fds) x._handle_socket(_pycurl.POLL_REMOVE, fd, x._multi, None, _pycurl) # POLL_IN hub = x.hub = Mock(name='hub') fds = [fd, Mock(name='fd2'), Mock(name='fd3')] x._fds = {f: f for f in fds} x._handle_socket(_pycurl.POLL_IN, fd, x._multi, None, _pycurl) hub.remove.assert_has_calls([call(fd)]) hub.add_reader.assert_called_with(fd, x.on_readable, fd) self.assertEqual(x._fds[fd], READ) # POLL_OUT hub = x.hub = Mock(name='hub') x._handle_socket(_pycurl.POLL_OUT, fd, x._multi, None, _pycurl) hub.add_writer.assert_called_with(fd, x.on_writable, fd) self.assertEqual(x._fds[fd], WRITE) # POLL_INOUT hub = x.hub = Mock(name='hub') x._handle_socket(_pycurl.POLL_INOUT, fd, x._multi, None, _pycurl) hub.add_reader.assert_called_with(fd, x.on_readable, fd) hub.add_writer.assert_called_with(fd, x.on_writable, fd) self.assertEqual(x._fds[fd], READ | WRITE) # UNKNOWN EVENT hub = x.hub = Mock(name='hub') x._handle_socket(0xff3f, fd, x._multi, None, _pycurl) # FD NOT IN FDS hub = x.hub = Mock(name='hub') x._fds.clear() x._handle_socket(0xff3f, fd, x._multi, None, _pycurl) self.assertFalse(hub.remove.called) def test_set_timeout(self): x = self.Client() x._set_timeout(100) def test_timeout_check(self): with patch('kombu.async.http.curl.pycurl') as _pycurl: x = self.Client() x._process_pending_requests = Mock(name='process_pending') x._multi.socket_all.return_value = 333, 1 _pycurl.error = KeyError x._timeout_check(_pycurl=_pycurl) x._multi.socket_all.return_value = None x._multi.socket_all.side_effect = _pycurl.error(333) x._timeout_check(_pycurl=_pycurl) def test_on_readable_on_writeable(self): with patch('kombu.async.http.curl.pycurl') as _pycurl: x = self.Client() x._on_event = Mock(name='on_event') fd = Mock(name='fd') x.on_readable(fd, _pycurl=_pycurl) x._on_event.assert_called_with(fd, _pycurl.CSELECT_IN) x.on_writable(fd, _pycurl=_pycurl) x._on_event.assert_called_with(fd, _pycurl.CSELECT_OUT)
bsd-3-clause
-1,647,547,606,994,934,000
37.074627
78
0.555076
false
iut-ibk/DynaMind-UrbanSim
3rdparty/opus/src/urbansim/zone/home_access_to_employment_DDD.py
2
6081
# Opus/UrbanSim urban simulation software. # Copyright (C) 2005-2009 University of Washington # See opus_core/LICENSE from abstract_zone_access_variable import Abstract_Zone_Access_Variable class home_access_to_employment_DDD(Abstract_Zone_Access_Variable): """The accessibility for the households in the given zones to employment for number of autos DDD. For example, if the possibilities for number of cars per household are 0, 1, 2, or 3+ (3 or more), then DDD can be 0, 1, 2, or 3. Note that for this variable, the zone in which the household resides is the origin zone in the travel data. The value of this variable for zone i is defined as follows: home_access_to_employment(i) = sum over j (employment(j) * exp (logsum (ij))) where j ranges over all zones, employment(j) is the number of jobs in zone j, and logsum(ij) is the logsum from the travel model for travel from zone i to zone j. """ def __init__(self, ncars): Abstract_Zone_Access_Variable.__init__(self, ncars, "number_of_jobs") def access_is_from_origin(self): """the zone in which the household resides is the origin zone in the travel data for this variable""" return True from opus_core.tests import opus_unittest from urbansim.variable_test_toolbox import VariableTestToolbox from numpy import array from numpy import ma from math import exp class Tests(opus_unittest.OpusTestCase): # check the case num_autos = 2 variable_name = "urbansim.zone.home_access_to_employment_2" def test_my_inputs(self): # the zone table includes two zones (1 and 2) values = VariableTestToolbox().compute_variable(self.variable_name, {"zone":{ "zone_id":array([1,2]), "number_of_jobs":array([100,125])}, "travel_data":{ "from_zone_id":array([1,1,2,2]), "to_zone_id":array([1,2,1,2]), "logsum0":array([-4.8,-2.222,-3.1,-4]), "logsum1":array([-3,-2,-1,-3.5]), "logsum2":array([-1,-2,-3,-4]), "logsum3":array([-4,-3,-2,-1])}}, dataset = "zone") should_be = array([100*exp(-1)+125*exp(-2), 100*exp(-3)+125*exp(-4)]) self.assertEqual(ma.allclose(values, should_be, rtol=1e-4), True, msg = "Error in " + self.variable_name) def test_weird_inputs(self): # The zone ids don't need to start with 0 or 1. Also, they don't need to be # contiguous, and the zone id's in the travel data might not be in the same order. # The zone table is for two zones (listed here in reverse order [14,11]). # Also, in the travel data below, note that the to_zone_ids are listed in a weird order ... # this should still work values = VariableTestToolbox().compute_variable(self.variable_name, {"zone":{ "zone_id":array([14,11]), "number_of_jobs":array([125,100])}, "travel_data":{ "from_zone_id":array([11,11,14,14]), "to_zone_id":array([11,14,14,11]), "logsum0":array([-4.8,-2.222,-4,-3.1]), "logsum1":array([-3,-2,-3.5,-1]), "logsum2":array([-1,-2,-4,-3]), "logsum3":array([-4,-3,-1,-2])}}, dataset = "zone") should_be = array([125*exp(-4)+100*exp(-3), 125*exp(-2)+100*exp(-1)]) self.assertEqual(ma.allclose(values, should_be, rtol=1e-4), True, msg = "Error in " + self.variable_name) def test_3weird_inputs(self): # similar to the weird_inputs test, but with 3 zones in the zone table (listed in a random order) values = VariableTestToolbox().compute_variable(self.variable_name, {"zone":{ "zone_id":array([14,11,99]), "number_of_jobs":array([125,100,333])}, "travel_data":{ "from_zone_id":array([11,11,99,14,99,11,99,14,14]), "to_zone_id": array([99,14,99,99,14,11,11,14,11]), "logsum0":array([-4.8,-2.222,-4,-3.1,-1.11,-1.5,-2.8,-1.99,-2.99]), "logsum1":array([-3,-2,-3.5,-1,-1,-1,-1,-1,-1]), "logsum2":array([-1,-2,-3,-4,-5,-6,-7,-8,-9]), "logsum3":array([-4,-3,-2,-1,-1,-2,-3,-4,-5])}}, dataset = "zone") should_be = array([125*exp(-8)+100*exp(-9)+333*exp(-4), 125*exp(-2)+100*exp(-6)+333*exp(-1), 125*exp(-5)+100*exp(-7)+333*exp(-3)]) self.assertEqual(ma.allclose(values, should_be, rtol=1e-4), True, msg = "Error in " + self.variable_name) def test_extra_zones(self): # Similar to test_my_inputs, but also with some additional # travel data that is not used. (It is ok for there to be additional zones # in the travel data that aren't in the zone table. In this case # zone 888 in the travel data is extra.) # # The zone table itself just includes two zones (1 and 2). values = VariableTestToolbox().compute_variable(self.variable_name, {"zone":{ "zone_id":array([1,2]), "number_of_jobs":array([100,125])}, "travel_data":{ "from_zone_id":array([1,1,2,2,888,888,1,2]), "to_zone_id":array ([1,2,1,2,1,2,888,888]), "logsum0":array([-4.8,-2.222,-3.1,-4,-100,-200,-300,-400]), "logsum1":array([-3,-2,-1,-3.5,-100,-200,-300,-400]), "logsum2":array([-1,-2,-3,-4,-100,-200,-300,-400]), "logsum3":array([-4,-3,-2,-1,-100,-200,-300,-400])}}, dataset = "zone") should_be = array([100*exp(-1)+125*exp(-2), 100*exp(-3)+125*exp(-4)]) self.assertEqual(ma.allclose(values, should_be, rtol=1e-4), True, msg = "Error in " + self.variable_name) if __name__=='__main__': opus_unittest.main()
gpl-2.0
208,771,490,386,531,070
50.439655
138
0.549416
false
Azure/azure-sdk-for-python
sdk/graphrbac/azure-graphrbac/azure/graphrbac/models/application_create_parameters.py
1
8780
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from .application_base import ApplicationBase class ApplicationCreateParameters(ApplicationBase): """Request parameters for creating a new application. All required parameters must be populated in order to send to Azure. :param allow_guests_sign_in: A property on the application to indicate if the application accepts other IDPs or not or partially accepts. :type allow_guests_sign_in: bool :param allow_passthrough_users: Indicates that the application supports pass through users who have no presence in the resource tenant. :type allow_passthrough_users: bool :param app_logo_url: The url for the application logo image stored in a CDN. :type app_logo_url: str :param app_roles: The collection of application roles that an application may declare. These roles can be assigned to users, groups or service principals. :type app_roles: list[~azure.graphrbac.models.AppRole] :param app_permissions: The application permissions. :type app_permissions: list[str] :param available_to_other_tenants: Whether the application is available to other tenants. :type available_to_other_tenants: bool :param error_url: A URL provided by the author of the application to report errors when using the application. :type error_url: str :param group_membership_claims: Configures the groups claim issued in a user or OAuth 2.0 access token that the app expects. :type group_membership_claims: object :param homepage: The home page of the application. :type homepage: str :param informational_urls: URLs with more information about the application. :type informational_urls: ~azure.graphrbac.models.InformationalUrl :param is_device_only_auth_supported: Specifies whether this application supports device authentication without a user. The default is false. :type is_device_only_auth_supported: bool :param key_credentials: A collection of KeyCredential objects. :type key_credentials: list[~azure.graphrbac.models.KeyCredential] :param known_client_applications: Client applications that are tied to this resource application. Consent to any of the known client applications will result in implicit consent to the resource application through a combined consent dialog (showing the OAuth permission scopes required by the client and the resource). :type known_client_applications: list[str] :param logout_url: the url of the logout page :type logout_url: str :param oauth2_allow_implicit_flow: Whether to allow implicit grant flow for OAuth2 :type oauth2_allow_implicit_flow: bool :param oauth2_allow_url_path_matching: Specifies whether during a token Request Azure AD will allow path matching of the redirect URI against the applications collection of replyURLs. The default is false. :type oauth2_allow_url_path_matching: bool :param oauth2_permissions: The collection of OAuth 2.0 permission scopes that the web API (resource) application exposes to client applications. These permission scopes may be granted to client applications during consent. :type oauth2_permissions: list[~azure.graphrbac.models.OAuth2Permission] :param oauth2_require_post_response: Specifies whether, as part of OAuth 2.0 token requests, Azure AD will allow POST requests, as opposed to GET requests. The default is false, which specifies that only GET requests will be allowed. :type oauth2_require_post_response: bool :param org_restrictions: A list of tenants allowed to access application. :type org_restrictions: list[str] :param optional_claims: :type optional_claims: ~azure.graphrbac.models.OptionalClaims :param password_credentials: A collection of PasswordCredential objects :type password_credentials: list[~azure.graphrbac.models.PasswordCredential] :param pre_authorized_applications: list of pre-authorized applications. :type pre_authorized_applications: list[~azure.graphrbac.models.PreAuthorizedApplication] :param public_client: Specifies whether this application is a public client (such as an installed application running on a mobile device). Default is false. :type public_client: bool :param publisher_domain: Reliable domain which can be used to identify an application. :type publisher_domain: str :param reply_urls: A collection of reply URLs for the application. :type reply_urls: list[str] :param required_resource_access: Specifies resources that this application requires access to and the set of OAuth permission scopes and application roles that it needs under each of those resources. This pre-configuration of required resource access drives the consent experience. :type required_resource_access: list[~azure.graphrbac.models.RequiredResourceAccess] :param saml_metadata_url: The URL to the SAML metadata for the application. :type saml_metadata_url: str :param sign_in_audience: Audience for signing in to the application (AzureADMyOrganization, AzureADAllOrganizations, AzureADAndMicrosoftAccounts). :type sign_in_audience: str :param www_homepage: The primary Web page. :type www_homepage: str :param display_name: Required. The display name of the application. :type display_name: str :param identifier_uris: A collection of URIs for the application. :type identifier_uris: list[str] """ _validation = { 'display_name': {'required': True}, } _attribute_map = { 'allow_guests_sign_in': {'key': 'allowGuestsSignIn', 'type': 'bool'}, 'allow_passthrough_users': {'key': 'allowPassthroughUsers', 'type': 'bool'}, 'app_logo_url': {'key': 'appLogoUrl', 'type': 'str'}, 'app_roles': {'key': 'appRoles', 'type': '[AppRole]'}, 'app_permissions': {'key': 'appPermissions', 'type': '[str]'}, 'available_to_other_tenants': {'key': 'availableToOtherTenants', 'type': 'bool'}, 'error_url': {'key': 'errorUrl', 'type': 'str'}, 'group_membership_claims': {'key': 'groupMembershipClaims', 'type': 'object'}, 'homepage': {'key': 'homepage', 'type': 'str'}, 'informational_urls': {'key': 'informationalUrls', 'type': 'InformationalUrl'}, 'is_device_only_auth_supported': {'key': 'isDeviceOnlyAuthSupported', 'type': 'bool'}, 'key_credentials': {'key': 'keyCredentials', 'type': '[KeyCredential]'}, 'known_client_applications': {'key': 'knownClientApplications', 'type': '[str]'}, 'logout_url': {'key': 'logoutUrl', 'type': 'str'}, 'oauth2_allow_implicit_flow': {'key': 'oauth2AllowImplicitFlow', 'type': 'bool'}, 'oauth2_allow_url_path_matching': {'key': 'oauth2AllowUrlPathMatching', 'type': 'bool'}, 'oauth2_permissions': {'key': 'oauth2Permissions', 'type': '[OAuth2Permission]'}, 'oauth2_require_post_response': {'key': 'oauth2RequirePostResponse', 'type': 'bool'}, 'org_restrictions': {'key': 'orgRestrictions', 'type': '[str]'}, 'optional_claims': {'key': 'optionalClaims', 'type': 'OptionalClaims'}, 'password_credentials': {'key': 'passwordCredentials', 'type': '[PasswordCredential]'}, 'pre_authorized_applications': {'key': 'preAuthorizedApplications', 'type': '[PreAuthorizedApplication]'}, 'public_client': {'key': 'publicClient', 'type': 'bool'}, 'publisher_domain': {'key': 'publisherDomain', 'type': 'str'}, 'reply_urls': {'key': 'replyUrls', 'type': '[str]'}, 'required_resource_access': {'key': 'requiredResourceAccess', 'type': '[RequiredResourceAccess]'}, 'saml_metadata_url': {'key': 'samlMetadataUrl', 'type': 'str'}, 'sign_in_audience': {'key': 'signInAudience', 'type': 'str'}, 'www_homepage': {'key': 'wwwHomepage', 'type': 'str'}, 'display_name': {'key': 'displayName', 'type': 'str'}, 'identifier_uris': {'key': 'identifierUris', 'type': '[str]'}, } def __init__(self, **kwargs): super(ApplicationCreateParameters, self).__init__(**kwargs) self.display_name = kwargs.get('display_name', None) self.identifier_uris = kwargs.get('identifier_uris', None)
mit
-113,552,795,501,562,910
53.875
114
0.685877
false
diamondman/pys3streamer
s3streamer/streamer.py
1
4416
from __future__ import unicode_literals from __future__ import print_function from boto.s3.connection import S3Connection class S3Streamer(object): #def __init__(self, bucket_name, *key_names, s3_connection=None, key_is_prefix=False): def __init__(self, bucket_name, *key_names, **kwargs): """Create a new S3Streamer. Automatically creates a Boto S3Connection if one is not provided. Args: bucket_name: String name of the Amazon S3 bucket. All keys read will be read from this bucket. key_names: List of key names to cat together and read, or list of key prefixes. s3_connection (optional): A Boto S3Connection object. One will be created from your env settings if not provided key_is_prefix (optional): Boolean determining if key_names should be interpreted as key prefixes (True means interpret as key_prefix). Note no deduplication is done for keys when multiple key prefixes are used. It is possible to get the same key multiple times if it matches multiple prefixes. """ if not len(key_names): raise ValueError("At least one key name must be provided") self._key_strs = key_names s3_connection = kwargs.pop('s3_connection', None) key_is_prefix = kwargs.pop('key_is_prefix', False) if kwargs.keys(): raise TypeError("unexpected keyword argument %s" % list(kwargs.keys())[0]) self._conn = s3_connection or S3Connection() self._bucket_name = bucket_name self._bucket = None self._current_key_str = iter(self._key_strs) self._key_is_prefix = key_is_prefix self._tmp_iter = None self._cur_key = None self._readline_buff = b'' self._key_names_accessed = [] self._read_buffer_size = 1*1024*1024 self._hit_eof = False @property def bucket(self): """Get the Amazon S3 boto bucket object.""" if not self._bucket: self._bucket = self._conn.get_bucket(self._bucket_name) return self._bucket def __repr__(self): return "<%s: %s>"%(self.__class__.__name__, self._bucket_name) @property def keys_read(self): """List the Amazon S3 keys that have been read so far""" return list(self._key_names_accessed) @property def _next_key(self): if self._key_is_prefix: if not self._tmp_iter: try: k_str = next(self._current_key_str) self._tmp_iter = iter(self.bucket.list(prefix=k_str)) except StopIteration as e: return None try: k = next(self._tmp_iter) self._key_names_accessed.append(k.name) return k except StopIteration as e: self._tmp_iter = None return self._next_key else: try: k_str = next(self._current_key_str) except StopIteration as e: return None return self.bucket.get_key(k_str) or self._next_key def _select_next_key(self): self._cur_key = self._next_key self._hit_eof = False return self._cur_key @property def _current_key(self): if not self._cur_key: self._select_next_key() return self._cur_key def read(self, size): if size is 0: raise ValueError("size 0 unsupported because it is dangerous.") d = self._current_key.read(size) if len(d) is not size and not self._hit_eof: d2 = self._current_key.read(size-len(d)) if not d2: #HIT EOF self._hit_eof = True d += b'\n' return d d += d2 if d: return d if not self._select_next_key(): return b'' return self.read(size) def readline(self): while b'\n' not in self._readline_buff: d = self.read(self._read_buffer_size) if not d: return b'' self._readline_buff += d line, _, self._readline_buff = self._readline_buff.partition(b'\n') return line+b'\n' def __iter__(self): while True: d = self.readline() if not d: break yield d def close(self): pass
mit
-8,990,907,671,535,923,000
34.328
305
0.55933
false
Duroktar/cookbook
AIML Files/ALICE/ALICE_bot.py
1
1830
# - LINKS - ALICE Bot # # By: traBpUkciP (2016) import aiml # AI-Markup Language library import datetime import time import urllib # library for dealing with web stuff through Python import sys, os path = os.path.dirname(os.path.abspath(sys.argv[0])) BRAIN_FILE = path + "/bot_brain_ALICE.brn" k = aiml.Kernel() k.bootstrap(brainFile=BRAIN_FILE) def talk(text): # THIS URL NEEDS TO BE SET TO YOUR PORT AND KEY ACCORDINGLY # THIS PART ONLY WORK IF YOU HAVE LINKS WEB REQUEST SETTINGS ON DEFAULT url = 'http://127.0.0.1:54657/?action=[Speak("placeholder")]&key=1234ABC' #set default talk to jarvis address newurl = url.replace("placeholder", text) #fill in text to be spoken urllib.urlopen(newurl) def userInput(): f = open( path + "/dictation.txt") a = f.read() f.close() if a == "False": return False else: print "---------------------------------------------------------------------------------" print "Input from LINKS detected!" aliceSpeak(a) def clearInput(): f = open( path + '/dictation.txt', 'w') f.write("False") f.close() def writeHistory(i): f = open( path + '/history.txt', 'a') ts = time.time() st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S') a = st + ": " + i print a print "---------------------------------------------------------------------------------" f.write(a) f.write('\n') f.close() def aliceSpeak(feed): message = feed writeHistory(message) clearInput() bot_response = k.respond(message) talk(bot_response) main() def main(): while userInput() == False: time.sleep(.1) print "working" continue try: while True: main() except KeyboardInterrupt: pass
gpl-3.0
-182,710,782,391,264,220
18.0625
114
0.553005
false
opadron/girder
girder/api/v1/resource.py
1
21286
#!/usr/bin/env python # -*- coding: utf-8 -*- ############################################################################### # Copyright 2013 Kitware Inc. # # Licensed under the Apache License, Version 2.0 ( the "License" ); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############################################################################### import cherrypy import json from ..describe import Description, describeRoute from ..rest import Resource as BaseResource, RestException from girder.constants import AccessType from girder.api import access from girder.models.model_base import AccessControlledModel from girder.utility import acl_mixin from girder.utility import ziputil from girder.utility.progress import ProgressContext # Plugins can modify this set to allow other types to be searched allowedSearchTypes = {'collection', 'folder', 'group', 'item', 'user'} class Resource(BaseResource): """ API Endpoints that deal with operations across multiple resource types. """ def __init__(self): super(Resource, self).__init__() self.resourceName = 'resource' self.route('GET', ('search',), self.search) self.route('GET', ('lookup',), self.lookup) self.route('GET', (':id',), self.getResource) self.route('GET', ('download',), self.download) self.route('POST', ('download',), self.download) self.route('PUT', ('move',), self.moveResources) self.route('POST', ('copy',), self.copyResources) self.route('DELETE', (), self.delete) @access.public @describeRoute( Description('Search for resources in the system.') .param('q', 'The search query.') .param('mode', 'The search mode. Can use either a text search or a ' 'prefix-based search.', enum=('text', 'prefix'), required=False, default='text') .param('types', 'A JSON list of resource types to search for, e.g. ' "'user', 'folder', 'item'.") .param('level', 'Minimum required access level.', required=False, dataType='int', default=AccessType.READ) .pagingParams(defaultSort=None, defaultLimit=10) .errorResponse('Invalid type list format.') ) def search(self, params): self.requireParams(('q', 'types'), params) mode = params.get('mode', 'text') level = AccessType.validate(params.get('level', AccessType.READ)) user = self.getCurrentUser() limit = int(params.get('limit', 10)) offset = int(params.get('offset', 0)) if mode == 'text': method = 'textSearch' elif mode == 'prefix': method = 'prefixSearch' else: raise RestException( 'The search mode must be either "text" or "prefix".') try: types = json.loads(params['types']) except ValueError: raise RestException('The types parameter must be JSON.') results = {} for modelName in types: if modelName not in allowedSearchTypes: continue if '.' in modelName: name, plugin = modelName.rsplit('.', 1) model = self.model(name, plugin) else: model = self.model(modelName) results[modelName] = [ model.filter(d, user) for d in getattr(model, method)( query=params['q'], user=user, limit=limit, offset=offset, level=level) ] return results def _validateResourceSet(self, params, allowedModels=None): """ Validate a JSON string listing resources. The resources parameter is a JSON encoded dictionary with each key a model name and each value a list of ids that must be present in that model. :param params: a dictionary of parameters that must include 'resources' :param allowedModels: if present, an iterable of models that may be included in the resources. :returns: the JSON decoded resource dictionary. """ self.requireParams(('resources', ), params) try: resources = json.loads(params['resources']) except ValueError: raise RestException('The resources parameter must be JSON.') if type(resources) is not dict: raise RestException('Invalid resources format.') if allowedModels: for key in resources: if key not in allowedModels: raise RestException('Resource type not supported.') count = sum([len(resources[key]) for key in resources]) if not count: raise RestException('No resources specified.') return resources def _getResourceModel(self, kind, funcName=None): """ Load and return a model with a specific function or throw an exception. :param kind: the name of the model to load :param funcName: a function name to ensure that each model contains. :returns: the loaded model. """ try: model = self.model(kind) except ImportError: model = None if not model or (funcName and not hasattr(model, funcName)): raise RestException('Invalid resources format.') return model def _lookUpToken(self, token, parentType, parent): """ Find a particular child resource by name or throw an exception. :param token: the name of the child resource to find :param parentType: the type of the parent to search :param parent: the parent resource :returns: the child resource """ seekFolder = (parentType in ('user', 'collection', 'folder')) seekItem = (parentType == 'folder') seekFile = (parentType == 'item') # (model name, mask, search filter) searchTable = ( ('folder', seekFolder, {'name': token, 'parentId': parent['_id'], 'parentCollection': parentType}), ('item', seekItem, {'name': token, 'folderId': parent['_id']}), ('file', seekFile, {'name': token, 'itemId': parent['_id']}), ) for candidateModel, mask, filterObject in searchTable: if not mask: continue candidateChild = self.model(candidateModel).findOne(filterObject) if candidateChild is not None: return candidateChild, candidateModel # if no folder, item, or file matches, give up raise RestException('Child resource not found: %s(%s)->%s' % ( parentType, parent.get('name', parent.get('_id')), token)) def _lookUpPath(self, path, user): pathArray = [token for token in path.split('/') if token] model = pathArray[0] parent = None if model == 'user': username = pathArray[1] parent = self.model('user').findOne({'login': username}) if parent is None: raise RestException('User not found: %s' % username) elif model == 'collection': collectionName = pathArray[1] parent = self.model('collection').findOne({'name': collectionName}) if parent is None: raise RestException( 'Collection not found: %s' % collectionName) else: raise RestException('Invalid path format') try: document = parent self.model(model).requireAccess(document, user) for token in pathArray[2:]: document, model = self._lookUpToken(token, model, document) self.model(model).requireAccess(document, user) except RestException: raise RestException('Path not found: %s' % path) result = self.model(model).filter(document, user) return result @access.public def lookup(self, params): self.requireParams('path', params) return self._lookUpPath(params['path'], self.getCurrentUser()) lookup.description = ( Description('Look up a resource in the data hierarchy by path.') .param('path', 'The path of the resource. The path must be an absolute Unix ' 'path starting with either "/user/[user name]", for a user\'s ' 'resources or "/collection/[collection name]", for resources ' 'under a collection.') .errorResponse('Path is invalid.') .errorResponse('Path refers to a resource that does not exist.') .errorResponse('Read access was denied for the resource.', 403)) @access.cookie(force=True) @access.public def download(self, params): """ Returns a generator function that will be used to stream out a zip file containing the listed resource's contents, filtered by permissions. """ user = self.getCurrentUser() resources = self._validateResourceSet(params) # Check that all the resources are valid, so we don't download the zip # file if it would throw an error. for kind in resources: model = self._getResourceModel(kind, 'fileList') for id in resources[kind]: if not model.load(id=id, user=user, level=AccessType.READ): raise RestException('Resource %s %s not found.' % (kind, id)) metadata = self.boolParam('includeMetadata', params, default=False) cherrypy.response.headers['Content-Type'] = 'application/zip' cherrypy.response.headers['Content-Disposition'] = \ 'attachment; filename="Resources.zip"' def stream(): zip = ziputil.ZipGenerator() for kind in resources: model = self.model(kind) for id in resources[kind]: doc = model.load(id=id, user=user, level=AccessType.READ) for (path, file) in model.fileList( doc=doc, user=user, includeMetadata=metadata, subpath=True): for data in zip.addFile(file, path): yield data yield zip.footer() return stream download.description = ( Description('Download a set of items, folders, collections, and users ' 'as a zip archive.') .notes('This route is also exposed via the POST method because the ' 'request parameters can be quite long, and encoding them in the ' 'URL (as is standard when using the GET method) can cause the ' 'URL to become too long, which causes errors.') .param('resources', 'A JSON-encoded list of types to download. Each ' 'type is a list of ids. For example: {"item": [(item id 1), ' '(item id 2)], "folder": [(folder id 1)]}.') .param('includeMetadata', 'Include any metadata in JSON files in the ' 'archive.', required=False, dataType='boolean', default=False) .errorResponse('Unsupport or unknown resource type.') .errorResponse('Invalid resources format.') .errorResponse('No resources specified.') .errorResponse('Resource not found.') .errorResponse('Read access was denied for a resource.', 403)) @access.user def delete(self, params): """ Delete a set of resources. """ user = self.getCurrentUser() resources = self._validateResourceSet(params) total = sum([len(resources[key]) for key in resources]) progress = self.boolParam('progress', params, default=False) with ProgressContext(progress, user=user, title='Deleting resources', message='Calculating size...') as ctx: ctx.update(total=total) current = 0 for kind in resources: model = self._getResourceModel(kind, 'remove') for id in resources[kind]: if (isinstance(model, (acl_mixin.AccessControlMixin, AccessControlledModel))): doc = model.load(id=id, user=user, level=AccessType.ADMIN) else: doc = model.load(id=id) if not doc: raise RestException('Resource %s %s not found.' % (kind, id)) # Don't do a subtree count if we weren't asked for progress if progress: subtotal = model.subtreeCount(doc) if subtotal != 1: total += model.subtreeCount(doc)-1 ctx.update(total=total) model.remove(doc, progress=ctx) if progress: current += subtotal if ctx.progress['data']['current'] != current: ctx.update(current=current, message='Deleted ' + kind) delete.description = ( Description('Delete a set of items, folders, or other resources.') .param('resources', 'A JSON-encoded list of types to delete. Each ' 'type is a list of ids. For example: {"item": [(item id 1), ' '(item id2)], "folder": [(folder id 1)]}.') .param('progress', 'Whether to record progress on this task.', default=False, required=False, dataType='boolean') .errorResponse('Unsupport or unknown resource type.') .errorResponse('Invalid resources format.') .errorResponse('No resources specified.') .errorResponse('Resource not found.') .errorResponse('Admin access was denied for a resource.', 403)) @access.admin def getResource(self, id, params): model = self._getResourceModel(params['type']) if (isinstance(model, (acl_mixin.AccessControlMixin, AccessControlledModel))): user = self.getCurrentUser() return model.load(id=id, user=user, level=AccessType.READ) return model.load(id=id) getResource.description = ( Description('Get any resource by ID.') .param('id', 'The ID of the resource.', paramType='path') .param('type', 'The type of the resource (item, file, etc.).') .errorResponse('ID was invalid.') .errorResponse('Read access was denied for the resource.', 403)) def _prepareMoveOrCopy(self, params): user = self.getCurrentUser() resources = self._validateResourceSet(params, ('folder', 'item')) parentType = params['parentType'].lower() if parentType not in ('user', 'collection', 'folder'): raise RestException('Invalid parentType.') if ('item' in resources and len(resources['item']) > 0 and parentType != 'folder'): raise RestException('Invalid parentType.') parent = self.model(parentType).load( params['parentId'], level=AccessType.WRITE, user=user, exc=True) progress = self.boolParam('progress', params, default=False) return user, resources, parent, parentType, progress @access.user def moveResources(self, params): """ Move the specified resources to a new parent folder, user, or collection. Only folder and item resources can be moved with this function. """ user, resources, parent, parentType, progress = \ self._prepareMoveOrCopy(params) total = sum([len(resources[key]) for key in resources]) with ProgressContext(progress, user=user, title='Moving resources', message='Calculating requirements...', total=total) as ctx: for kind in resources: model = self._getResourceModel(kind, 'move') for id in resources[kind]: doc = model.load(id=id, user=user, level=AccessType.WRITE) if not doc: raise RestException('Resource %s %s not found.' % (kind, id)) ctx.update(message='Moving %s %s' % ( kind, doc.get('name', ''))) if kind == 'item': if parent['_id'] != doc['folderId']: model.move(doc, parent) elif kind == 'folder': if ((parentType, parent['_id']) != (doc['parentCollection'], doc['parentId'])): model.move(doc, parent, parentType) ctx.update(increment=1) moveResources.description = ( Description('Move a set of items and folders.') .param('resources', 'A JSON-encoded list of types to move. Each type ' 'is a list of ids. Only folders and items may be specified. ' 'For example: {"item": [(item id 1), (item id2)], "folder": ' '[(folder id 1)]}.') .param('parentType', 'Parent type for the new parent of these ' 'resources.') .param('parentId', 'Parent ID for the new parent of these resources.') .param('progress', 'Whether to record progress on this task. Default ' 'is false.', required=False, dataType='boolean') .errorResponse('Unsupport or unknown resource type.') .errorResponse('Invalid resources format.') .errorResponse('Resource type not supported.') .errorResponse('No resources specified.') .errorResponse('Resource not found.') .errorResponse('ID was invalid.')) @access.user def copyResources(self, params): """ Copy the specified resources to a new parent folder, user, or collection. Only folder and item resources can be copied with this function. """ user, resources, parent, parentType, progress = \ self._prepareMoveOrCopy(params) total = len(resources.get('item', [])) if 'folder' in resources: model = self._getResourceModel('folder') for id in resources['folder']: folder = model.load(id=id, user=user, level=AccessType.READ) if folder: total += model.subtreeCount(folder) with ProgressContext(progress, user=user, title='Copying resources', message='Calculating requirements...', total=total) as ctx: for kind in resources: model = self._getResourceModel(kind) for id in resources[kind]: doc = model.load(id=id, user=user, level=AccessType.READ) if not doc: raise RestException('Resource not found. No %s with ' 'id %s' % (kind, id)) ctx.update(message='Copying %s %s' % ( kind, doc.get('name', ''))) if kind == 'item': model.copyItem(doc, folder=parent, creator=user) ctx.update(increment=1) elif kind == 'folder': model.copyFolder( doc, parent=parent, parentType=parentType, creator=user, progress=ctx) copyResources.description = ( Description('Copy a set of items and folders.') .param('resources', 'A JSON-encoded list of types to copy. Each type ' 'is a list of ids. Only folders and items may be specified. ' 'For example: {"item": [(item id 1), (item id2)], "folder": ' '[(folder id 1)]}.') .param('parentType', 'Parent type for the new parent of these ' 'resources.') .param('parentId', 'Parent ID for the new parent of these resources.') .param('progress', 'Whether to record progress on this task. Default ' 'is false.', required=False, dataType='boolean') .errorResponse('Unsupport or unknown resource type.') .errorResponse('Invalid resources format.') .errorResponse('Resource type not supported.') .errorResponse('No resources specified.') .errorResponse('Resource not found.') .errorResponse('ID was invalid.'))
apache-2.0
4,512,090,057,241,301,500
44.482906
80
0.558677
false
twiindan/selenium_lessons
01_Introducción/02_tools_and_environment.py
1
2414
#=============================================================================== # - CPython is the real name of default standard Python implementation # - Implemented in C # - There are Python implementations in other languages: # - Jython: Python 2.5 interpreter written in Java which runs bytecode in the JVM # - IronPython: Similar approach for .NET Common Language Runtime # - JS, C++, CIL... # - Stackless Python: CPython fork with microthreads concurrency # - PyPy: Python 2.7 interpreter implemented in Python. #=============================================================================== #=============================================================================== # - IPython: create a comprehensive environment for interactive and exploratory computing # - An enhanced interactive Python shell #=============================================================================== #=============================================================================== # - virtualenv: a tool to create isolated Python environments. # - It simply changes your PATH environment var to point to a different folder #=============================================================================== #=============================================================================== # - PyPi: The Python Package Index is a repository of software for the Python programming language. # - There are currently 96678 packages here. # - The packages are 'eggs' #=============================================================================== #=============================================================================== # - pip: A tool for installing and managing Python packages # - It installs packages from PyPi, local folders or Git and other repositories # - It can read a list of packages from a file or generate the list of installed packages #=============================================================================== #=============================================================================== # - IDE? # - PyCharm (we have licenses, ask IT) # - Eclipse + Pydev # - NetBeans # - NINJA IDE # - Python's IDLE (not recommendable at all) # - ... # - Emacs # - Vi # - http://wiki.python.org/moin/IntegratedDevelopmentEnvironments # - Lots of good IDEs, it's up to you! #===============================================================================
apache-2.0
-6,505,159,516,177,435,000
44.566038
99
0.42792
false
wangyang59/tf_models
video_prediction/prediction_train_flo_learn_ip_sintel_test.py
1
30243
# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # Copyright 2016 The TensorFlow Authors All Rights Reserved. # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Code for training the prediction model.""" import numpy as np import tensorflow as tf from tensorflow.python.platform import app from tensorflow.python.platform import flags from prediction_input_flo_sintel import build_tfrecord_input, DATA_DIR from prediction_model_flo_chair_ip import construct_model from visualize import plot_flo_learn_symm, plot_general from optical_flow_warp import transformer from optical_flow_warp_fwd import transformerFwd from optical_flow_warp_old import transformer_old import os # How often to record tensorboard summaries. SUMMARY_INTERVAL = 20 # How often to run a batch through the validation model. VAL_INTERVAL = 200 # How often to save a model checkpoint SAVE_INTERVAL = 500 FLAGS = flags.FLAGS flags.DEFINE_string('data_dir', DATA_DIR, 'directory containing data.') flags.DEFINE_string('output_dir', "", 'directory for model checkpoints.') flags.DEFINE_integer('num_iterations', 100000, 'number of training iterations.') flags.DEFINE_string('pretrained_model', '', 'filepath of a pretrained model to initialize from.') flags.DEFINE_float('train_val_split', 0.95, 'The percentage of files to use for the training set,' ' vs. the validation set.') flags.DEFINE_integer('batch_size', 32, 'batch size for training') flags.DEFINE_float('learning_rate', 0.001, 'the base learning rate of the generator') flags.DEFINE_integer('num_gpus', 1, 'the number of gpu to use') def get_black_list(clses): blacklist = [] for cls in clses: fname = "/home/wangyang59/Data/ILSVRC2016/ImageSets/VID/train_%s.txt" % cls with open(fname) as f: content = f.readlines() blacklist += [x.split(" ")[0].split("/")[-1] + ".tfrecord" for x in content] return blacklist ## Helper functions def peak_signal_to_noise_ratio(true, pred): """Image quality metric based on maximal signal power vs. power of the noise. Args: true: the ground truth image. pred: the predicted image. Returns: peak signal to noise ratio (PSNR) """ return 10.0 * tf.log(1.0 / mean_squared_error(true, pred)) / tf.log(10.0) def mean_squared_error(true, pred): """L2 distance between tensors true and pred. Args: true: the ground truth image. pred: the predicted image. Returns: mean squared error between ground truth and predicted image. """ return tf.reduce_sum(tf.square(true - pred)) / tf.to_float(tf.size(pred)) def mean_charb_error(true, pred, beta): return tf.reduce_sum(tf.sqrt((tf.square(beta*(true-pred)) + 0.001*0.001))) / tf.to_float(tf.size(pred)) def mean_charb_error_wmask(true, pred, mask, beta): return tf.reduce_sum(tf.sqrt((tf.square(beta*(true-pred)) + 0.001*0.001))*mask) / tf.to_float(tf.size(pred)) def weighted_mean_squared_error(true, pred, weight): """L2 distance between tensors true and pred. Args: true: the ground truth image. pred: the predicted image. Returns: mean squared error between ground truth and predicted image. """ tmp = tf.reduce_sum(weight*tf.square(true-pred), axis=[1,2], keep_dims=True) / tf.reduce_sum(weight, axis=[1, 2], keep_dims=True) return tf.reduce_mean(tmp) #return tf.reduce_sum(tf.square(true - pred)*weight) / tf.to_float(tf.size(pred)) #return tf.reduce_sum(tf.square(true - pred)*weight) / tf.reduce_sum(weight) def mean_L1_error(true, pred): """L2 distance between tensors true and pred. Args: true: the ground truth image. pred: the predicted image. Returns: mean squared error between ground truth and predicted image. """ return tf.reduce_sum(tf.abs(true - pred)) / tf.to_float(tf.size(pred)) def weighted_mean_L1_error(true, pred, weight): """L2 distance between tensors true and pred. Args: true: the ground truth image. pred: the predicted image. Returns: mean squared error between ground truth and predicted image. """ return tf.reduce_sum(tf.abs(true - pred)*weight) / tf.to_float(tf.size(pred)) def gradient_x(img): gx = img[:,:,:-1,:] - img[:,:,1:,:] return gx def gradient_y(img): gy = img[:,:-1,:,:] - img[:,1:,:,:] return gy def cal_grad_error(flo, image, beta): """Calculate the gradient of the given image by calculate the difference between nearby pixels """ error = 0.0 img_grad_x = gradient_x(image) img_grad_y = gradient_y(image) weights_x = tf.exp(-10.0*tf.reduce_mean(tf.abs(img_grad_x), 3, keep_dims=True)) weights_y = tf.exp(-10.0*tf.reduce_mean(tf.abs(img_grad_y), 3, keep_dims=True)) error += weighted_mean_L1_error(flo[:, 1:, :, :], flo[:, :-1, :, :], weights_y*beta) error += weighted_mean_L1_error(flo[:, :, 1:, :], flo[:, :, :-1, :], weights_x*beta) #error += mean_charb_error_wmask(flo[:, 1:, :, :], flo[:, :-1, :, :], weights_y, beta) #error += mean_charb_error_wmask(flo[:, :, 1:, :], flo[:, :, :-1, :], weights_x, beta) return error / 2.0 def img_grad_error(true, pred, mask, beta): error = 0.0 error += mean_charb_error_wmask(true[:, 1:, :, :] - true[:, :-1, :, :], pred[:, 1:, :, :] - pred[:, :-1, :, :], mask[:, 1:, :, :], beta) error += mean_charb_error_wmask(true[:, :, 1:, :] - true[:, :, :-1, :], pred[:, :, 1:, :] - pred[:, :, :-1, :], mask[:, :, 1:, :], beta) return error / 2.0 def cal_epe(flo1, flo2): return tf.reduce_mean(tf.sqrt(tf.reduce_sum(tf.square(flo1 - flo2), axis=3))) def blur(image): batch_size, img_height, img_width, color_channels = map(int, image.get_shape()[0:4]) kernel = np.array([1., 2., 1., 2., 4., 2., 1., 2., 1.], dtype=np.float32) / 16.0 kernel = kernel.reshape((3, 3, 1, 1)) kernel = tf.constant(kernel, shape=(3, 3, 1, 1), name='gaussian_kernel', verify_shape=True) blur_image = tf.nn.depthwise_conv2d(tf.pad(image, [[0,0], [1,1], [1,1],[0,0]], "SYMMETRIC"), tf.tile(kernel, [1, 1, color_channels, 1]), [1, 1, 1, 1], 'VALID') return blur_image def down_sample(image, to_blur=True): batch_size, img_height, img_width, color_channels = map(int, image.get_shape()[0:4]) if to_blur: image = blur(image) return tf.image.resize_bicubic(image, [img_height/2, img_width/2]) def get_pyrimad(image): image2 = down_sample(down_sample(image)) image3 = down_sample(image2) image4 = down_sample(image3) image5 = down_sample(image4) image6 = down_sample(image5) # image2 = tf.image.resize_area(image, [img_height/4, img_width/4]) # image3 = tf.image.resize_area(image, [img_height/8, img_width/8]) # image4 = tf.image.resize_area(image, [img_height/16, img_width/16]) # image5 = tf.image.resize_area(image, [img_height/32, img_width/32]) # image6 = tf.image.resize_area(image, [img_height/64, img_width/64]) return image2, image3, image4, image5, image6 def get_channel(image): zeros = tf.zeros_like(image) ones = tf.ones_like(image) #gray = 0.21*image[:, :, :, 0] + 0.72*image[:, :, :, 1] + 0.07*image[:, :, :, 2] channels = [] for i in range(10): channels.append(tf.where(tf.logical_and(image >= i/10.0, image < (i+1)/10.0), ones, zeros)) return tf.concat([image]+channels, axis=3) def average_gradients(tower_grads): """Calculate the average gradient for each shared variable across all towers. Note that this function provides a synchronization point across all towers. Args: tower_grads: List of lists of (gradient, variable) tuples. The outer list is over individual gradients. The inner list is over the gradient calculation for each tower. Returns: List of pairs of (gradient, variable) where the gradient has been averaged across all towers. """ average_grads = [] for grad_and_vars in zip(*tower_grads): # Note that each grad_and_vars looks like the following: # ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN)) grads = [] for g, _ in grad_and_vars: # Add 0 dimension to the gradients to represent the tower. expanded_g = tf.expand_dims(g, 0) # Append on a 'tower' dimension which we will average over below. grads.append(expanded_g) # Average over the 'tower' dimension. grad = tf.concat(axis=0, values=grads) grad = tf.reduce_mean(grad, 0) # Keep in mind that the Variables are redundant because they are shared # across towers. So .. we will just return the first tower's pointer to # the Variable. v = grad_and_vars[0][1] grad_and_var = (grad, v) average_grads.append(grad_and_var) return average_grads class Model(object): def __init__(self, image1=None, image2=None, true_flo=None, reuse_scope=False, scope=None, prefix="train"): #self.prefix = prefix = tf.placeholder(tf.string, []) self.iter_num = tf.placeholder(tf.float32, []) summaries = [] batch_size, H, W, color_channels = map(int, image1.get_shape()[0:4]) # if not reuse_scope: # image2_recon, feature2 = autoencoder(image2, trainable=False) # else: # If it's a validation or test model. # with tf.variable_scope(scope, reuse=True): # image2_recon, feature2 = autoencoder(image2, trainable=False) # # with tf.variable_scope(scope, reuse=True): # image1_recon, feature1 = autoencoder(image1, trainable=False) image1_pyrimad = get_pyrimad(get_channel(image1)) image2_pyrimad = get_pyrimad(get_channel(image2)) image1_2, image1_3, image1_4, image1_5, image1_6 = image1_pyrimad image2_2, image2_3, image2_4, image2_5, image2_6 = image2_pyrimad if not reuse_scope: flow2, flow3, flow4, flow5, flow6, image1_trans = construct_model(image1, image2, image1_pyrimad, image2_pyrimad) else: # If it's a validation or test model. with tf.variable_scope(scope, reuse=True): flow2, flow3, flow4, flow5, flow6, image1_trans = construct_model(image1, image2, image1_pyrimad, image2_pyrimad) with tf.variable_scope(scope, reuse=True): flow2r, flow3r, flow4r, flow5r, flow6r, _ = construct_model(image2, image1, image2_pyrimad, image1_pyrimad) occu_mask_6 = tf.clip_by_value(transformerFwd(tf.ones(shape=[batch_size, H/64, W/64, 1], dtype='float32'), 20*flow6r/64.0, [H/64, W/64]), clip_value_min=0.0, clip_value_max=1.0) occu_mask_5 = tf.clip_by_value(transformerFwd(tf.ones(shape=[batch_size, H/32, W/32, 1], dtype='float32'), 20*flow5r/32.0, [H/32, W/32]), clip_value_min=0.0, clip_value_max=1.0) occu_mask_4 = tf.clip_by_value(transformerFwd(tf.ones(shape=[batch_size, H/16, W/16, 1], dtype='float32'), 20*flow4r/16.0, [H/16, W/16]), clip_value_min=0.0, clip_value_max=1.0) occu_mask_3 = tf.clip_by_value(transformerFwd(tf.ones(shape=[batch_size, H/8, W/8, 1], dtype='float32'), 20*flow3r/8.0, [H/8, W/8]), clip_value_min=0.0, clip_value_max=1.0) occu_mask_2 = tf.clip_by_value(transformerFwd(tf.ones(shape=[batch_size, H/4, W/4, 1], dtype='float32'), 20*flow2r/4.0, [H/4, W/4]), clip_value_min=0.0, clip_value_max=1.0) image1_2p, image1_3p, image1_4p, image1_5p, image1_6p = image1_trans loss6 = mean_charb_error_wmask(image1_6, image1_6p, occu_mask_6, 1.0) loss5 = mean_charb_error_wmask(image1_5, image1_5p, occu_mask_5, 1.0) loss4 = mean_charb_error_wmask(image1_4, image1_4p, occu_mask_4, 1.0) loss3 = mean_charb_error_wmask(image1_3, image1_3p, occu_mask_3, 1.0) loss2 = mean_charb_error_wmask(image1_2, image1_2p, occu_mask_2, 1.0) grad_error6 = cal_grad_error(flow6, image1_6[:,:,:,0:3], 1.0/64.0) grad_error5 = cal_grad_error(flow5, image1_5[:,:,:,0:3], 1.0/32.0) grad_error4 = cal_grad_error(flow4, image1_4[:,:,:,0:3], 1.0/16.0) grad_error3 = cal_grad_error(flow3, image1_3[:,:,:,0:3], 1.0/8.0) grad_error2 = cal_grad_error(flow2, image1_2[:,:,:,0:3], 1.0/4.0) img_grad_error6 = img_grad_error(image1_6p, image1_6, occu_mask_6, 1.0) img_grad_error5 = img_grad_error(image1_5p, image1_5, occu_mask_5, 1.0) img_grad_error4 = img_grad_error(image1_4p, image1_4, occu_mask_4, 1.0) img_grad_error3 = img_grad_error(image1_3p, image1_3, occu_mask_3, 1.0) img_grad_error2 = img_grad_error(image1_2p, image1_2, occu_mask_2, 1.0) # feature1_6_norm = tf.nn.l2_normalize(feature1[4], dim=3) # feature1_6p = transformer(tf.nn.l2_normalize(feature2[4], dim=3), 20*flow6/64.0, [H/64, W/64], feature1_6_norm) # loss6f = mean_charb_error_wmask(feature1_6_norm, feature1_6p, occu_mask_6, 10.0) # # feature1_5_norm = tf.nn.l2_normalize(feature1[3], dim=3) # feature1_5p = transformer(tf.nn.l2_normalize(feature2[3], dim=3), 20*flow5/32.0, [H/32, W/32], feature1_5_norm) # loss5f = mean_charb_error_wmask(feature1_5_norm, feature1_5p, occu_mask_5, 10.0) # # #feature1_5p = transformer_old(feature2[3], 20*flow5/32.0, [H/32, W/32]) # # with tf.variable_scope(scope, reuse=True): # # image1_recon = decoder(feature1_6p, reuse_scope=True, trainable=True) # #image1_recon2 = decoder(feature1_5p, reuse_scope=True, trainable=TruH=e, level=5) # # loss_ae = mean_charb_error(image1_recon, image1, 1.0) + mean_charb_error(image2, image2_recon, 1.0) + loss5f + loss6f # # summaries.append(tf.summary.scalar(prefix + '_loss_ae', loss_ae)) # summaries.append(tf.summary.scalar(prefix + '_loss6f', loss6f)) # summaries.append(tf.summary.scalar(prefix + '_loss5f', loss5f)) # loss = 0.05*(loss2+img_grad_error2) + 0.1*(loss3+img_grad_error3) + \ # 0.2*(loss4+img_grad_error4) + 0.8*(loss5+img_grad_error5) + 3.2*(loss6+img_grad_error6) + \ # (0.05*grad_error2 + 0.1*grad_error3 + 0.2*grad_error4 + 0.0*grad_error5 + 0.0*grad_error6)*10.0 loss = 1.0*(loss2+img_grad_error2) + 1.0*(loss3+img_grad_error3) + \ 1.0*(loss4+img_grad_error4) + 1.0*(loss5+img_grad_error5) + 1.0*(loss6+img_grad_error6) + \ (1.0*grad_error2 + 1.0*grad_error3 + 1.0*grad_error4 + 1.0*grad_error5 + 1.0*grad_error6)*10.0 # loss = 3.2*(loss2+img_grad_error2) + 0.8*(loss3+img_grad_error3) + \ # 0.2*(loss4+img_grad_error4) + 0.1*(loss5+img_grad_error5) + 0.05*(loss6+img_grad_error6) + \ # (3.2*grad_error2 + 0.8*grad_error3 + 0.2*grad_error4 + 0.1*grad_error5 + 0.05*grad_error6)*10.0 self.loss = loss summaries.append(tf.summary.scalar(prefix + '_loss', self.loss)) summaries.append(tf.summary.scalar(prefix + '_loss2', loss2)) summaries.append(tf.summary.scalar(prefix + '_loss3', loss3)) summaries.append(tf.summary.scalar(prefix + '_loss4', loss4)) summaries.append(tf.summary.scalar(prefix + '_loss5', loss5)) summaries.append(tf.summary.scalar(prefix + '_loss6', loss6)) summaries.append(tf.summary.scalar(prefix + '_grad_loss2', grad_error2)) summaries.append(tf.summary.scalar(prefix + '_grad_loss3', grad_error3)) summaries.append(tf.summary.scalar(prefix + '_grad_loss4', grad_error4)) summaries.append(tf.summary.scalar(prefix + '_grad_loss5', grad_error5)) summaries.append(tf.summary.scalar(prefix + '_grad_loss6', grad_error6)) self.summ_op = tf.summary.merge(summaries) class Model_eval(object): def __init__(self, image1=None, image2=None, true_flo=None, true_occ_mask=None, scene=None, image_no=None, scope=None, prefix="eval"): #self.prefix = prefix = tf.placeholder(tf.string, []) self.iter_num = tf.placeholder(tf.float32, []) summaries = [] self.scene = scene self.image_no = image_no batch_size, H, W, color_channels = map(int, image1.get_shape()[0:4]) image1_pyrimad = get_pyrimad(get_channel(image1)) image2_pyrimad = get_pyrimad(get_channel(image2)) image1_2, image1_3, image1_4, image1_5, image1_6 = image1_pyrimad image2_2, image2_3, image2_4, image2_5, image2_6 = image2_pyrimad with tf.variable_scope(scope, reuse=True): flow2, flow3, flow4, flow5, flow6, image1_trans = construct_model(image1, image2, image1_pyrimad, image2_pyrimad) with tf.variable_scope(scope, reuse=True): flow2r, flow3r, flow4r, flow5r, flow6r, _ = construct_model(image2, image1, image2_pyrimad, image1_pyrimad) image1_2p, image1_3p, image1_4p, image1_5p, image1_6p = image1_trans occu_mask_2 = tf.clip_by_value(transformerFwd(tf.ones(shape=[batch_size, H/4, W/4, 1], dtype='float32'), 20*flow2r/4.0, [H/4, W/4]), clip_value_min=0.0, clip_value_max=1.0) # with tf.variable_scope(scope, reuse=True): # image2_recon, feature2 = autoencoder(image2, reuse_scope=True, trainable=False) # # feature1_6p = transformer_old(feature2[4], 20*flow6/64.0, [H/64, W/64]) # with tf.variable_scope(scope, reuse=True): # image1_recon = decoder(feature1_6p, reuse_scope=True, trainable=False) #feature1_5p = transformer_old(feature2[3], 20*flow5/32.0, [H/32, W/32]) #image1_recon2 = decoder(feature1_5p, reuse_scope=True, trainable=True, level=5) # loss_ae = mean_charb_error(image1, image1_recon, 1.0) + mean_charb_error(image2, image2_recon, 1.0) # self.image_ae = [image1, image2, image1_recon, image2_recon] # summaries.append(tf.summary.scalar(prefix + '_loss_ae', loss_ae)) true_flo_scale = tf.concat([true_flo[:,:,:,0:1], true_flo[:,:,:,1:2]/436.0*448.0], axis=3) self.orig_image1 = tf.image.resize_bicubic(image1, [436, 1024]) self.orig_image2 = tf.image.resize_bicubic(image2, [436, 1024]) self.true_flo = true_flo self.pred_flo = tf.image.resize_bicubic(20*tf.concat([flow2[:,:,:,0:1], flow2[:,:,:,1:2]/448.0*436.0], axis=3), [436, 1024]) self.true_warp = transformer_old(self.orig_image2, self.true_flo, [436, 1024]) self.pred_warp = transformer_old(self.orig_image2, self.pred_flo, [436, 1024]) self.pred_flo_r = tf.image.resize_bicubic(20*tf.concat([flow2r[:,:,:,0:1], flow2r[:,:,:,1:2]/448.0*436.0], axis=3), [436, 1024]) self.occu_mask = tf.image.resize_bicubic(occu_mask_2, [436, 1024]) self.occu_mask_test = 1.0 - true_occ_mask flow2_scale = tf.image.resize_bicubic(20*tf.concat([flow2[:,:,:,0:1], flow2[:,:,:,1:2]/448.0*436.0], axis=3), [436, 1024]) self.epe = cal_epe(true_flo, flow2_scale) self.epeInd = tf.reduce_mean(tf.sqrt(tf.reduce_sum(tf.square(true_flo - flow2_scale), axis=3)), axis=[1, 2]) summaries.append(tf.summary.scalar(prefix + '_flo_loss', self.epe)) self.small_scales = [image1_4[:,:,:,0:3], image2_4[:,:,:,0:3], image1_4p[:,:,:,0:3], tf.image.resize_bicubic(true_flo_scale/16.0, [H/16, W/16]), 20*flow4/16.0, tf.image.resize_bicubic(true_flo_scale/16.0, [H/16, W/16])-20*flow4/16.0, image1_5[:,:,:,0:3], image2_5[:,:,:,0:3], image1_5p[:,:,:,0:3], tf.image.resize_bicubic(true_flo_scale/32.0, [H/32, W/32]), 20*flow5/32.0, tf.image.resize_bicubic(true_flo_scale/32.0, [H/32, W/32])-20*flow5/32.0, image1_6[:,:,:,0:3], image2_6[:,:,:,0:3], image1_6p[:,:,:,0:3], tf.image.resize_bicubic(true_flo_scale/64.0, [H/64, W/64]), 20*flow6/64.0, tf.image.resize_bicubic(true_flo_scale/64.0, [H/64, W/64])-20*flow6/64.0] self.occ_count = tf.reduce_mean(true_occ_mask) self.true_occ_mask = true_occ_mask self.occ_epe = cal_epe(true_flo*true_occ_mask, flow2_scale*true_occ_mask) self.nonocc_epe = cal_epe(true_flo*(1.0-true_occ_mask), flow2_scale*(1.0-true_occ_mask)) summaries.append(tf.summary.scalar(prefix + '_occ_count', self.occ_count)) summaries.append(tf.summary.scalar(prefix + '_occ_epe', self.occ_epe)) summaries.append(tf.summary.scalar(prefix + '_nonocc_epe', self.nonocc_epe)) self.summ_op = tf.summary.merge(summaries) def plot_all(model, itr, sess, feed_dict): orig_image1, true_flo, pred_flo, true_warp, pred_warp, pred_flo_r, occu_mask, occu_mask_test, small_scales = sess.run([model.orig_image1, model.true_flo, model.pred_flo, model.true_warp, model.pred_warp, model.pred_flo_r, model.occu_mask, model.occu_mask_test, model.small_scales], feed_dict) plot_flo_learn_symm(orig_image1, true_flo, pred_flo, true_warp, pred_warp, pred_flo_r, occu_mask, occu_mask_test, output_dir=FLAGS.output_dir, itr=itr) plot_general(small_scales, h=6, w=3, output_dir=FLAGS.output_dir, itr=itr, suffix="small") #plot_general(image_ae, h=2, w=2, output_dir=FLAGS.output_dir, itr=itr, suffix="ae") def main(unused_argv): if FLAGS.output_dir == "": raise Exception("OUT_DIR must be specified") if os.path.exists(FLAGS.output_dir): raise Exception("OUT_DIR already exist") print 'Constructing models and inputs.' with tf.Graph().as_default(), tf.device('/cpu:0'): train_op = tf.train.AdamOptimizer(FLAGS.learning_rate) tower_grads = [] itr_placeholders = [] image1, image2, flo, _= build_tfrecord_input(training=True) split_image1 = tf.split(axis=0, num_or_size_splits=FLAGS.num_gpus, value=image1) split_image2 = tf.split(axis=0, num_or_size_splits=FLAGS.num_gpus, value=image2) split_flo = tf.split(axis=0, num_or_size_splits=FLAGS.num_gpus, value=flo) eval_image1, eval_image2, eval_flo, eval_occ_mask, scenes, image_no = build_tfrecord_input(training=False, num_epochs=1) summaries_cpu = tf.get_collection(tf.GraphKeys.SUMMARIES, tf.get_variable_scope().name) with tf.variable_scope(tf.get_variable_scope()) as vs: for i in xrange(FLAGS.num_gpus): with tf.device('/gpu:%d' % i): if i == FLAGS.num_gpus - 1: scopename = "model" else: scopename = '%s_%d' % ("tower", i) with tf.name_scope(scopename) as ns: if i == 0: model = Model(split_image1[i], split_image2[i], split_flo[i], reuse_scope=False, scope=vs) else: model = Model(split_image1[i], split_image2[i], split_flo[i], reuse_scope=True, scope=vs) loss = model.loss # Retain the summaries from the final tower. if i == FLAGS.num_gpus - 1: summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, ns) eval_model = Model_eval(eval_image1, eval_image2, eval_flo, eval_occ_mask, scenes, image_no, scope=vs) # Calculate the gradients for the batch of data on this CIFAR tower. grads = train_op.compute_gradients(loss) # Keep track of the gradients across all towers. tower_grads.append(grads) itr_placeholders.append(model.iter_num) # We must calculate the mean of each gradient. Note that this is the # synchronization point across all towers. grads = average_gradients(tower_grads) # Apply the gradients to adjust the shared variables. apply_gradient_op = train_op.apply_gradients(grads) # Create a saver. saver = tf.train.Saver( tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES), max_to_keep=5) # saver1 = tf.train.Saver( # list(set(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES))-set(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=".*ae.*"))), max_to_keep=5) # # saver2 = tf.train.Saver( # tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=".*ae.*"), max_to_keep=5) # Build the summary operation from the last tower summaries. summary_op = tf.summary.merge(summaries + summaries_cpu) # Make training session. sess = tf.Session(config=tf.ConfigProto( allow_soft_placement=True, log_device_placement=False)) summary_writer = tf.summary.FileWriter( FLAGS.output_dir, graph=sess.graph, flush_secs=10) if FLAGS.pretrained_model: saver.restore(sess, FLAGS.pretrained_model) #saver2.restore(sess, "./tmp/flow_exp/flow_learn_chair_copy_ae_bal/model65002") #sess.run(tf.initialize_variables(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=".*ae.*"))) #start_itr = int(FLAGS.pretrained_model.split("/")[-1][5:]) start_itr = 0 sess.run(tf.local_variables_initializer()) else: sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) start_itr = 0 tf.train.start_queue_runners(sess) average_epe = tf.placeholder(tf.float32) average_epe_summary = tf.summary.scalar("model/eval_average_epe", average_epe) epes = [] average_occ_count = tf.placeholder(tf.float32) average_occ_count_summary = tf.summary.scalar("model/eval_average_occ_count", average_occ_count) occ_counts = [] average_epe_occ = tf.placeholder(tf.float32) average_epe_occ_summary = tf.summary.scalar("model/eval_average_epe_occ", average_epe_occ) epes_occ = [] average_epe_nonocc = tf.placeholder(tf.float32) average_epe_nonocc_summary = tf.summary.scalar("model/eval_average_epe_nonocc", average_epe_nonocc) epes_nonocc = [] # Run training. for itr in range(start_itr, FLAGS.num_iterations): # Generate new batch of data. feed_dict = {x:np.float32(itr) for x in itr_placeholders} eval_summary_str, epe, occ_count, occ_epe, nonocc_epe, \ orig_image1, orig_image2, true_flo, pred_flo, true_warp, pred_warp, \ pred_flo_r, occu_mask, occu_mask_test, small_scales, scene, image_no, epeInd, true_occ_mask = sess.run([eval_model.summ_op, eval_model.epe, eval_model.occ_count, eval_model.occ_epe, eval_model.nonocc_epe, eval_model.orig_image1, eval_model.orig_image2, eval_model.true_flo, eval_model.pred_flo, eval_model.true_warp, eval_model.pred_warp, eval_model.pred_flo_r, eval_model.occu_mask, eval_model.occu_mask_test, eval_model.small_scales, eval_model.scene, eval_model.image_no, eval_model.epeInd, eval_model.true_occ_mask]) idx = epeInd > 0.0 if np.sum(idx) > 0: ims1 = plot_general([orig_image1[idx], true_flo[idx], pred_flo[idx], occu_mask_test[idx], occu_mask[idx]], h=1, w=5, output_dir=FLAGS.output_dir, itr=itr, get_im=True) #ims2 = plot_general([tmp[idx] for tmp in small_scales], h=6, w=3, output_dir=FLAGS.output_dir, itr=itr, suffix="small", get_im=True) scene = scene[idx] image_no = image_no[idx] for i in range(np.sum(idx)): if not os.path.exists(os.path.join(FLAGS.output_dir, scene[i][0])): os.makedirs(os.path.join(FLAGS.output_dir, scene[i][0])) ims1[i].save(os.path.join(FLAGS.output_dir, scene[i][0], image_no[i][0] + ".jpeg")) #ims2[i].save(os.path.join(FLAGS.output_dir, scene[i][0], image_no[i][0] + "_small.jpeg")) epes.append(epe) occ_counts.append(occ_count) epes_occ.append(occ_epe) epes_nonocc.append(nonocc_epe) print(sum(epes)/len(epes)) feed = {average_epe: sum(epes)/len(epes)} epe_summary_str = sess.run(average_epe_summary, feed_dict=feed) feed = {average_occ_count: sum(occ_counts)/len(occ_counts)} epe_tier1_summary_str = sess.run(average_occ_count_summary, feed_dict=feed) feed = {average_epe_occ: sum(epes_occ)/len(epes_occ)} epe_tier2_summary_str = sess.run(average_epe_occ_summary, feed_dict=feed) feed = {average_epe_nonocc: sum(epes_nonocc)/len(epes_nonocc)} epe_tier3_summary_str = sess.run(average_epe_nonocc_summary, feed_dict=feed) summary_writer.add_summary(eval_summary_str, itr) summary_writer.add_summary(epe_summary_str, itr) summary_writer.add_summary(epe_tier1_summary_str, itr) summary_writer.add_summary(epe_tier2_summary_str, itr) summary_writer.add_summary(epe_tier3_summary_str, itr) if __name__ == '__main__': app.run()
apache-2.0
5,854,912,422,827,126,000
44.753404
173
0.606289
false
andrewsomething/digitalocean-indicator
digitalocean_indicator/__init__.py
1
1487
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*- ### BEGIN LICENSE # This program is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License version 3, as published # by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranties of # MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR # PURPOSE. See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program. If not, see <http://www.gnu.org/licenses/>. ### END LICENSE import optparse from locale import gettext as _ import gi gi.require_version('Gtk', '3.0') from gi.repository import Gtk # pylint: disable=E0611 from digitalocean_indicator import DoIndicator from digitalocean_indicator_lib import set_up_logging, get_version def parse_options(): """Support for command line options""" parser = optparse.OptionParser(version="%%prog %s" % get_version()) parser.add_option("-v", "--verbose", action="count", dest="verbose", help="Show debug messages") (options, args) = parser.parse_args() set_up_logging(options) def main(): 'constructor for your class instances' parse_options() # Run the application. do_indicator = DoIndicator.Indicator() Gtk.main()
gpl-3.0
8,986,540,471,270,268,000
31.326087
75
0.702757
false
vandorjw/django-template-project
project/project_name/views.py
1
1288
from django.views.generic import TemplateView from django.views.generic.edit import FormView from blog.models import Article from django.core.mail import send_mail from {{project_name}}.forms import ContactForm class HomePageView(TemplateView): template_name="index.html" def get_context_data(self, **kwargs): context = super(HomePageView, self).get_context_data(**kwargs) context['top4articles'] = Article.objects.filter(is_active=True)[:4] return context class ContactPageView( FormView ): template_name="contact.html" form_class = ContactForm success_url = '/contact/' def form_valid(self, form): subject = form.cleaned_data['subject'] message = form.cleaned_data['message'] sender = form.cleaned_data['sender'] recipients = ('CHANGE_ME@{{ project_name }}.com',) send_mail(subject, message, sender, recipients) return super(ContactPageView, self).form_valid(self) class RobotPageView(TemplateView): template_name="robots.txt" content_type='text/plain' class HumanPageView(TemplateView): template_name="humans.txt" content_type='text/plain' #class GooglePageView(TemplateView): # template_name="googleXXXXXXXXXXX.html" # content_type='text/plain'
mit
2,950,621,827,330,681,000
32.025641
76
0.697205
false
a25kk/aha
src/aha.sitecontent/aha/sitecontent/browser/pagesection.py
1
7060
# -*- coding: utf-8 -*- """Module providing views for a contentpage section""" from AccessControl import Unauthorized from Acquisition import aq_inner from Acquisition import aq_parent from plone import api from plone.api.exc import InvalidParameterError from plone.protect.utils import addTokenToUrl from Products.CMFPlone.utils import safe_unicode from Products.Five.browser import BrowserView from zope.component import getMultiAdapter from zope.component import getUtility from aha.sitecontent.mailer import create_plaintext_message from aha.sitecontent.mailer import prepare_email_message from aha.sitecontent.mailer import get_mail_template from aha.sitecontent.mailer import send_mail from aha.sitecontent.interfaces import IResponsiveImagesTool from aha.sitecontent import _ class PageSectionView(BrowserView): """ Page Section default view """ def __call__(self): return self.render() def render(self): return self.index() def parent_page(self): return aq_parent(aq_inner(self.context)) def rendered_page_snippet(self): context = aq_inner(self.context) snippet = context.restrictedTraverse('@@pagesection-snippet')() if context.displayInquiryForm: snippet = context.restrictedTraverse('@@page-section-form')() return snippet class PageSectionSnippet(BrowserView): """ Embeddable section content snippet """ def field_has_data(self, fieldname): """ Check wether a given schema key returns a value""" context = aq_inner(self.context) try: video_link = getattr(context, fieldname, None) except AttributeError: video_link = None if video_link is not None: return True return False def has_video_link(self): return self.field_has_data('videoLink') def has_external_image(self): return self.field_has_data('externalImage') def show_image(self): display = True if self.has_video_link() or self.has_external_image(): display = False return display def get_image_data(self, uuid): tool = getUtility(IResponsiveImagesTool) return tool.create(uuid) class PageSectionForm(BrowserView): """ Embeddable section content snippet including inquiry form """ def __call__(self): return self.render() def update(self): self.errors = {} unwanted = ('_authenticator', 'form.button.Submit') required = ['email'] required_boolean = ('privacy-policy-agreement', 'privacy-policy') if 'form.button.Submit' in self.request: authenticator = getMultiAdapter((self.context, self.request), name=u"authenticator") if not authenticator.verify(): raise Unauthorized form = self.request.form form_data = {} form_errors = {} error_idx = 0 if self.privacy_policy_enabled(): for field_name in required_boolean: if not field_name in form: form_errors[field_name] = self.required_field_error() error_idx += 1 for value in form: if value not in unwanted: form_data[value] = safe_unicode(form[value]) if not form[value] and value in required: form_errors[value] = self.required_field_error() error_idx += 1 else: error = { 'active': False, 'msg': form[value] } form_errors[value] = error if error_idx > 0: self.errors = form_errors else: self.send_inquiry(form) def render(self): self.update() return self.index() def default_value(self, error): value = '' if error['active'] is False: value = error['msg'] return value @staticmethod def required_field_error(): translation_service = api.portal.get_tool(name="translation_service") error = {} error_msg = _(u"This field is required") error['active'] = True error['msg'] = translation_service.translate( error_msg, 'aha.sitecontent', target_language=api.portal.get_default_language() ) return error @staticmethod def privacy_policy_enabled(): return True @staticmethod def privacy_policy_url(): portal = api.portal.get() portal_url = portal.absolute_url() url = '{0}/raum/datenschutzerklaerung'.format(portal_url) return url def send_inquiry(self, data): context = aq_inner(self.context) subject = _(u"Inquiry from website visitor") email_subject = api.portal.translate( "Inquiry from website visitor", 'aha.sitecontent', api.portal.get_current_language()) data['subject'] = email_subject mail_tpl = self._compose_message(data) mail_plain = create_plaintext_message(mail_tpl) msg = prepare_email_message(mail_tpl, mail_plain) recipients = ['[email protected]', ] send_mail( msg, recipients, email_subject ) context_parent = aq_parent(context) next_url = '{0}/@@inquiry-form-dispatched/'.format( context_parent.absolute_url() ) url = addTokenToUrl(next_url) return self.request.response.redirect(url) def _compose_message(self, data): portal = api.portal.get() portal_url = portal.absolute_url() template_vars = { 'email': data['email'], 'subject': str(data['subject']), 'fullname': data['fullname'], 'phone': data['phone'], 'message': data['comment'], 'url': portal_url } template_name = 'inquiry-mail.html' message = get_mail_template(template_name, template_vars) return message def field_has_data(self, fieldname): """ Check wether a given schema key returns a value""" context = aq_inner(self.context) try: video_link = getattr(context, fieldname, None) except AttributeError: video_link = None if video_link is not None: return True return False def has_video_link(self): return self.field_has_data('videoLink') def has_external_image(self): return self.field_has_data('externalImage') def show_image(self): display = True if self.has_video_link() or self.has_external_image(): display = False return display def get_image_data(self, uuid): tool = getUtility(IResponsiveImagesTool) return tool.create(uuid)
mit
-960,660,328,584,240,100
31.837209
77
0.585694
false
dylanninin/schema
test_schema.py
1
18858
from __future__ import with_statement from collections import defaultdict, namedtuple from operator import methodcaller import os from pytest import raises from schema import Schema, Use, And, Or, Optional, SchemaError, JSONSchema try: basestring except NameError: basestring = str # Python 3 does not have basestring SE = raises(SchemaError) def ve(_): raise ValueError() def se(_): raise SchemaError('first auto', 'first error') def test_schema(): assert Schema(1).validate(1) == 1 with SE: Schema(1).validate(9) assert Schema(int).validate(1) == 1 with SE: Schema(int).validate('1') assert Schema(Use(int)).validate('1') == 1 with SE: Schema(int).validate(int) assert Schema(str).validate('hai') == 'hai' with SE: Schema(str).validate(1) assert Schema(Use(str)).validate(1) == '1' assert Schema(list).validate(['a', 1]) == ['a', 1] assert Schema(dict).validate({'a': 1}) == {'a': 1} with SE: Schema(dict).validate(['a', 1]) assert Schema(lambda n: 0 < n < 5).validate(3) == 3 with SE: Schema(lambda n: 0 < n < 5).validate(-1) def test_validate_file(): assert Schema( Use(open)).validate('LICENSE-MIT').read().startswith('Copyright') with SE: Schema(Use(open)).validate('NON-EXISTENT') assert Schema(os.path.exists).validate('.') == '.' with SE: Schema(os.path.exists).validate('./non-existent/') assert Schema(os.path.isfile).validate('LICENSE-MIT') == 'LICENSE-MIT' with SE: Schema(os.path.isfile).validate('NON-EXISTENT') def test_and(): assert And(int, lambda n: 0 < n < 5).validate(3) == 3 with SE: And(int, lambda n: 0 < n < 5).validate(3.33) assert And(Use(int), lambda n: 0 < n < 5).validate(3.33) == 3 with SE: And(Use(int), lambda n: 0 < n < 5).validate('3.33') def test_or(): assert Or(int, dict).validate(5) == 5 assert Or(int, dict).validate({}) == {} with SE: Or(int, dict).validate('hai') assert Or(int).validate(4) with SE: Or().validate(2) def test_validate_list(): assert Schema([1, 0]).validate([1, 0, 1, 1]) == [1, 0, 1, 1] assert Schema([1, 0]).validate([]) == [] with SE: Schema([1, 0]).validate(0) with SE: Schema([1, 0]).validate([2]) assert And([1, 0], lambda l: len(l) > 2).validate([0, 1, 0]) == [0, 1, 0] with SE: And([1, 0], lambda l: len(l) > 2).validate([0, 1]) def test_list_tuple_set_frozenset(): assert Schema([int]).validate([1, 2]) with SE: Schema([int]).validate(['1', 2]) assert Schema(set([int])).validate(set([1, 2])) == set([1, 2]) with SE: Schema(set([int])).validate([1, 2]) # not a set with SE: Schema(set([int])).validate(['1', 2]) assert Schema(tuple([int])).validate(tuple([1, 2])) == tuple([1, 2]) with SE: Schema(tuple([int])).validate([1, 2]) # not a set def test_strictly(): assert Schema(int).validate(1) == 1 with SE: Schema(int).validate('1') def test_dict(): assert Schema({'key': 5}).validate({'key': 5}) == {'key': 5} with SE: Schema({'key': 5}).validate({'key': 'x'}) with SE: Schema({'key': 5}).validate(['key', 5]) assert Schema({'key': int}).validate({'key': 5}) == {'key': 5} assert Schema({'n': int, 'f': float}).validate( {'n': 5, 'f': 3.14}) == {'n': 5, 'f': 3.14} with SE: Schema({'n': int, 'f': float}).validate( {'n': 3.14, 'f': 5}) with SE: try: Schema({}).validate({'abc': None, 1: None}) except SchemaError as e: assert e.args[0].startswith("Wrong keys 'abc', 1 in") raise with SE: try: Schema({'key': 5}).validate({}) except SchemaError as e: assert e.args[0] == "Missing keys: 'key'" raise with SE: try: Schema({'key': 5}).validate({'n': 5}) except SchemaError as e: assert e.args[0] == "Missing keys: 'key'" raise with SE: try: Schema({}).validate({'n': 5}) except SchemaError as e: assert e.args[0] == "Wrong keys 'n' in {'n': 5}" raise with SE: try: Schema({'key': 5}).validate({'key': 5, 'bad': 5}) except SchemaError as e: assert e.args[0] in ["Wrong keys 'bad' in {'key': 5, 'bad': 5}", "Wrong keys 'bad' in {'bad': 5, 'key': 5}"] raise with SE: try: Schema({}).validate({'a': 5, 'b': 5}) except SchemaError as e: assert e.args[0] in ["Wrong keys 'a', 'b' in {'a': 5, 'b': 5}", "Wrong keys 'a', 'b' in {'b': 5, 'a': 5}"] raise def test_dict_keys(): assert Schema({str: int}).validate( {'a': 1, 'b': 2}) == {'a': 1, 'b': 2} with SE: Schema({str: int}).validate({1: 1, 'b': 2}) assert Schema({Use(str): Use(int)}).validate( {1: 3.14, 3.14: 1}) == {'1': 3, '3.14': 1} def test_dict_optional_keys(): with SE: Schema({'a': 1, 'b': 2}).validate({'a': 1}) assert Schema({'a': 1, Optional('b'): 2}).validate({'a': 1}) == {'a': 1} assert Schema({'a': 1, Optional('b'): 2}).validate( {'a': 1, 'b': 2}) == {'a': 1, 'b': 2} # Make sure Optionals are favored over types: assert Schema({basestring: 1, Optional('b'): 2}).validate({'a': 1, 'b': 2}) == {'a': 1, 'b': 2} def test_dict_optional_defaults(): # Optionals fill out their defaults: assert Schema({Optional('a', default=1): 11, Optional('b', default=2): 22}).validate({'a': 11}) == {'a': 11, 'b': 2} # Optionals take precedence over types. Here, the "a" is served by the # Optional: assert Schema({Optional('a', default=1): 11, basestring: 22}).validate({'b': 22}) == {'a': 1, 'b': 22} with raises(TypeError): Optional(And(str, Use(int)), default=7) def test_dict_subtypes(): d = defaultdict(int, key=1) v = Schema({'key': 1}).validate(d) assert v == d assert isinstance(v, defaultdict) # Please add tests for Counter and OrderedDict once support for Python2.6 # is dropped! def test_complex(): s = Schema({'<file>': And([Use(open)], lambda l: len(l)), '<path>': os.path.exists, Optional('--count'): And(int, lambda n: 0 <= n <= 5)}) data = s.validate({'<file>': ['./LICENSE-MIT'], '<path>': './'}) assert len(data) == 2 assert len(data['<file>']) == 1 assert data['<file>'][0].read().startswith('Copyright') assert data['<path>'] == './' def test_nice_errors(): try: Schema(int, error='should be integer').validate('x') except SchemaError as e: assert e.errors == ['should be integer'] try: Schema(Use(float), error='should be a number').validate('x') except SchemaError as e: assert e.code == 'should be a number' try: Schema({Optional('i'): Use(int, error='should be a number')}).validate({'i': 'x'}) except SchemaError as e: assert e.code == 'should be a number' def test_use_error_handling(): try: Use(ve).validate('x') except SchemaError as e: assert e.autos == ["ve('x') raised ValueError()"] assert e.errors == [None] try: Use(ve, error='should not raise').validate('x') except SchemaError as e: assert e.autos == ["ve('x') raised ValueError()"] assert e.errors == ['should not raise'] try: Use(se).validate('x') except SchemaError as e: assert e.autos == [None, 'first auto'] assert e.errors == [None, 'first error'] try: Use(se, error='second error').validate('x') except SchemaError as e: assert e.autos == [None, 'first auto'] assert e.errors == ['second error', 'first error'] def test_or_error_handling(): try: Or(ve).validate('x') except SchemaError as e: assert e.autos[0].startswith('Or(') assert e.autos[0].endswith(") did not validate 'x'") assert e.autos[1] == "ve('x') raised ValueError()" assert len(e.autos) == 2 assert e.errors == [None, None] try: Or(ve, error='should not raise').validate('x') except SchemaError as e: assert e.autos[0].startswith('Or(') assert e.autos[0].endswith(") did not validate 'x'") assert e.autos[1] == "ve('x') raised ValueError()" assert len(e.autos) == 2 assert e.errors == ['should not raise', 'should not raise'] try: Or('o').validate('x') except SchemaError as e: assert e.autos == ["Or('o') did not validate 'x'", "'o' does not match 'x'"] assert e.errors == [None, None] try: Or('o', error='second error').validate('x') except SchemaError as e: assert e.autos == ["Or('o') did not validate 'x'", "'o' does not match 'x'"] assert e.errors == ['second error', 'second error'] def test_and_error_handling(): try: And(ve).validate('x') except SchemaError as e: assert e.autos == ["ve('x') raised ValueError()"] assert e.errors == [None] try: And(ve, error='should not raise').validate('x') except SchemaError as e: assert e.autos == ["ve('x') raised ValueError()"] assert e.errors == ['should not raise'] try: And(str, se).validate('x') except SchemaError as e: assert e.autos == [None, 'first auto'] assert e.errors == [None, 'first error'] try: And(str, se, error='second error').validate('x') except SchemaError as e: assert e.autos == [None, 'first auto'] assert e.errors == ['second error', 'first error'] def test_schema_error_handling(): try: Schema(Use(ve)).validate('x') except SchemaError as e: assert e.autos == [None, "ve('x') raised ValueError()"] assert e.errors == [None, None] try: Schema(Use(ve), error='should not raise').validate('x') except SchemaError as e: assert e.autos == [None, "ve('x') raised ValueError()"] assert e.errors == ['should not raise', None] try: Schema(Use(se)).validate('x') except SchemaError as e: assert e.autos == [None, None, 'first auto'] assert e.errors == [None, None, 'first error'] try: Schema(Use(se), error='second error').validate('x') except SchemaError as e: assert e.autos == [None, None, 'first auto'] assert e.errors == ['second error', None, 'first error'] def test_use_json(): import json gist_schema = Schema(And(Use(json.loads), # first convert from JSON {Optional('description'): basestring, 'public': bool, 'files': {basestring: {'content': basestring}}})) gist = '''{"description": "the description for this gist", "public": true, "files": { "file1.txt": {"content": "String file contents"}, "other.txt": {"content": "Another file contents"}}}''' assert gist_schema.validate(gist) def test_error_reporting(): s = Schema({'<files>': [Use(open, error='<files> should be readable')], '<path>': And(os.path.exists, error='<path> should exist'), '--count': Or(None, And(Use(int), lambda n: 0 < n < 5), error='--count should be integer 0 < n < 5')}, error='Error:') s.validate({'<files>': [], '<path>': './', '--count': 3}) try: s.validate({'<files>': [], '<path>': './', '--count': '10'}) except SchemaError as e: assert e.code == 'Error:\n--count should be integer 0 < n < 5' try: s.validate({'<files>': [], '<path>': './hai', '--count': '2'}) except SchemaError as e: assert e.code == 'Error:\n<path> should exist' try: s.validate({'<files>': ['hai'], '<path>': './', '--count': '2'}) except SchemaError as e: assert e.code == 'Error:\n<files> should be readable' def test_schema_repr(): # what about repr with `error`s? schema = Schema([Or(None, And(str, Use(float)))]) repr_ = "Schema([Or(None, And(<type 'str'>, Use(<type 'float'>)))])" # in Python 3 repr contains <class 'str'>, not <type 'str'> assert repr(schema).replace('class', 'type') == repr_ def test_validate_object(): schema = Schema({object: str}) assert schema.validate({42: 'str'}) == {42: 'str'} with SE: schema.validate({42: 777}) def test_issue_9_prioritized_key_comparison(): validate = Schema({'key': 42, object: 42}).validate assert validate({'key': 42, 777: 42}) == {'key': 42, 777: 42} def test_issue_9_prioritized_key_comparison_in_dicts(): # http://stackoverflow.com/questions/14588098/docopt-schema-validation s = Schema({'ID': Use(int, error='ID should be an int'), 'FILE': Or(None, Use(open, error='FILE should be readable')), Optional(str): object}) data = {'ID': 10, 'FILE': None, 'other': 'other', 'other2': 'other2'} assert s.validate(data) == data data = {'ID': 10, 'FILE': None} assert s.validate(data) == data def test_missing_keys_exception_with_non_str_dict_keys(): s = Schema({And(str, Use(str.lower), 'name'): And(str, len)}) with SE: s.validate(dict()) with SE: try: Schema({1: 'x'}).validate(dict()) except SchemaError as e: assert e.args[0] == "Missing keys: 1" raise def test_issue_56_cant_rely_on_callables_to_have_name(): s = Schema(methodcaller('endswith', '.csv')) assert s.validate('test.csv') == 'test.csv' with SE: try: s.validate('test.py') except SchemaError as e: assert "operator.methodcaller" in e.args[0] raise def test_exception_handling_with_bad_validators(): BadValidator = namedtuple("BadValidator", ["validate"]) s = Schema(BadValidator("haha")) with SE: try: s.validate("test") except SchemaError as e: assert "TypeError" in e.args[0] raise def test_issue_83_iterable_validation_return_type(): TestSetType = type("TestSetType", (set,), dict()) data = TestSetType(["test", "strings"]) s = Schema(set([str])) assert isinstance(s.validate(data), TestSetType) def test_optional_key_convert_failed_randomly_while_with_another_optional_object(): """ In this test, created_at string "2015-10-10 00:00:00" is expected to be converted to a datetime instance. - it works when the schema is s = Schema({ 'created_at': _datetime_validator, Optional(basestring): object, }) - but when wrapping the key 'created_at' with Optional, it fails randomly :return: """ import datetime fmt = '%Y-%m-%d %H:%M:%S' _datetime_validator = Or(None, Use(lambda i: datetime.datetime.strptime(i, fmt))) # FIXME given tests enough for i in range(1024): s = Schema({ Optional('created_at'): _datetime_validator, Optional('updated_at'): _datetime_validator, Optional('birth'): _datetime_validator, Optional(basestring): object, }) data = { 'created_at': '2015-10-10 00:00:00' } validated_data = s.validate(data) # is expected to be converted to a datetime instance, but fails randomly # (most of the time) assert isinstance(validated_data['created_at'], datetime.datetime) # assert isinstance(validated_data['created_at'], basestring) def test_json_schema(): assert JSONSchema(1, 1).validate().data == 1 assert JSONSchema(int, 1).validate().data == 1 assert JSONSchema(int, '1').validate().data is None assert JSONSchema(int, int).validate().data is None assert JSONSchema(str, 'hai').validate().data == 'hai' assert JSONSchema(str, 1).validate().data is None assert JSONSchema(Use(str), 1).validate().data == '1' assert JSONSchema(list, ['a', 1]).validate().data == ['a', 1] assert JSONSchema(dict, {'a': 1}).validate().data == {'a': 1} assert JSONSchema(dict, ['a', 1]).validate().data is None # TODO lambda # assert JSONSchema(lambda n: 0 < n < 5, 3).validate().data == 3 def test_json_schema_errors_with_int(): js = JSONSchema(int, 0.1).validate() assert js.data is None assert js.valid is False assert js.errors == '0.1 is not a valid int' js = JSONSchema(int, 'number').validate() assert js.data is None assert js.valid is False assert js.errors == 'number is not a valid int' def test_json_schema_errors_with_str(): js = JSONSchema(str, 1).validate() assert js.data is None assert js.valid is False assert js.errors == '1 is not a valid str' def test_json_schema_errors_with_bool(): js = JSONSchema(bool, 1).validate() assert js.data is None assert js.valid is False assert js.errors == '1 is not a valid bool' def test_json_schema_errors_with_dict(): js = JSONSchema(dict, 1).validate() assert js.data is None assert js.valid is False assert js.errors == '1 is not a valid dict' js = JSONSchema(dict, None).validate() assert js.data is None assert js.valid is False assert js.errors == 'None is not a valid dict' js = JSONSchema(dict, (0,)).validate() assert js.data is None assert js.valid is False assert js.errors == '(0,) is not a valid dict' js = JSONSchema(dict, dict).validate() assert js.data is None assert js.valid is False assert js.errors == "<type 'dict'> is not a valid json" js = JSONSchema(dict, object).validate() assert js.data is None assert js.valid is False assert js.errors == "<type 'object'> is not a valid json" js = JSONSchema(dict, {'1', '2'}).validate() assert js.data is None assert js.valid is False assert js.errors == "set(['1', '2']) is not a valid dict" js = JSONSchema({'name': str, 'age': lambda n: 18 <= n <= 99}, {'name': 'Sue', 'age': 100}).validate() assert js.data == {'name': 'Sue'} assert js.valid is False assert js.errors == {'age': '<lambda>(100) should evaluate to True'} # TODO object value # js = JSONSchema({str: int, int: None}, {'key1': 1, 'key2': 2, 10: None, 20: None}).validate() # assert js.data == {'key1': 1, 'key2': 2} # assert js.valid is False def test_json_schema_errors_with_list(): js = JSONSchema(list, 1).validate() assert js.data is None assert js.valid is False assert js.errors == '1 is not a valid list'
mit
-8,319,315,173,465,126,000
33.039711
99
0.561724
false
arashn/senior-project
server/create_mission.py
1
2187
# This is a script to create a mission from a start # location to an end location and upload the mission # to the vehicle. The script uses the Google Maps # Directions API to obtain directions from the start # location to the end location, and uses the points # received as waypoints in the mission. import googlemaps from polyline.codec import PolylineCodec from dronekit import connect, VehicleMode, Command from pymavlink import mavutil gmaps = googlemaps.Client(key='AIzaSyBj8RNUHUSuk78N2Jim9yrMAKjWvh6gc_g') vehicle = connect('/dev/ttyUSB0', baud=57600, wait_ready=True) print "Drone is ready" def get_directions(start_location, end_location): directions_result = gmaps.directions(start_location, end_location, mode="walking") print directions_result print "Coordinates:" directions = [] start = directions_result[0]['legs'][0]['steps'][0]['start_location'] directions.append((start['lat'], start['lng'])) for step in directions_result[0]['legs'][0]['steps']: poly = PolylineCodec().decode(step['polyline']['points']) for point in poly: directions.append(point) end = step['end_location'] directions.append((end['lat'], end['lng'])) for x in directions: print x return directions def create_mission(directions): cmds = vehicle.commands cmds.clear() cmds.add(Command(0, 0, 0, mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT, mavutil.mavlink.MAV_CMD_NAV_TAKEOFF, 0, 0, 0, 0, 0, 0, 0, 0, 5)) for point in directions: lat = float(point[0]) lon = float(point[1]) cmds.add(Command(0, 0, 0, mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT, mavutil.mavlink.MAV_CMD_NAV_WAYPOINT, 0, 0, 1, 0, 0, 0, lat, lon, 5)) cmds.add(Command(0, 0, 0, mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT, mavutil.mavlink.MAV_CMD_NAV_LOITER_TIME, 0, 0, 10, 0, 0, 0, 0, 0, 5)) cmds.add(Command(0, 0, 0, mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT, mavutil.mavlink.MAV_CMD_NAV_LAND, 0, 0, 0, 0, 0, 0, 0, 0, 0)) cmds.upload() start_location = sys.argv[1] end_location = sys.argv[2] directions = get_directions(start_location, end_location) create_mission(directions)
gpl-3.0
8,425,306,509,460,994,000
39.5
150
0.692273
false
j08lue/poppy
poppy/ts_flux_budget.py
1
6682
import numpy as np import warnings from oceanpy.fluxbudget import budget_over_region_2D from oceanpy.stats import central_differences def _fill0(a): return np.ma.filled(a,0.) def _warn_virtual_salt_flux_units(): warnings.warn('Output units are kg SALT s-1!',) warnings.filterwarnings("once") def fluxbudget_VVEL(ds,mask,varn,kza=0,kzo=None,S0=34.8,t=0): """Integrate horizontal flux using VVEL*SCALAR""" _warn_virtual_salt_flux_units() dsvar = ds.variables dxu = dsvar['DXU'][:] * 1e-2 dyu = dsvar['DYU'][:] * 1e-2 dz = dsvar['dz'][:] * 1e-2 if kzo is None: kzo = len(dz) fluxbudget = 0. for k in range(kza,kzo): uflux = _fill0(dsvar['UVEL'][t,k]) * 1e-2 uflux *= dyu uflux *= dz[k] vflux = _fill0(dsvar['VVEL'][t,k]) * 1e-2 vflux *= dxu vflux *= dz[k] if not varn: scalar = None elif varn == 'heat': scalar = _fill0(dsvar['TEMP'][t,k]) elif varn == 'salt': scalar = _fill0(dsvar['SALT'][t,k]) elif varn == 'freshwater': scalar = (S0 - _fill0(dsvar['SALT'][t,k])) / S0 fluxbudget += budget_over_region_2D(uflux,vflux,scalar=scalar,mask=mask,grid='ArakawaB') if varn == 'heat': fluxbudget *= (1e3 * 4e3 * 1e-15) # PW return fluxbudget def fluxbudget_UESVNS(ds,mask,varn='salt',kza=0,kzo=None,t=0): """Integrate horizontal flux using UES and VNS variables""" _warn_virtual_salt_flux_units() dsvar = ds.variables dz = dsvar['dz'][:] * 1e-2 tarea = dsvar['UAREA'][:] * 1e-4 if kzo is None: kzo = len(dz) fluxbudget = 0. for k in range(kza,kzo): uflux = _fill0(dsvar['UES'][t,k]) uflux *= tarea uflux *= dz[k] vflux = _fill0(dsvar['VNS'][t,k]) vflux *= tarea vflux *= dz[k] fluxbudget += budget_over_region_2D(uflux,vflux,scalar=None,mask=mask) return fluxbudget def fluxbudget_bolus_visop(ds,mask,varn,kza=0,kzo=None,S0=34.8,t=0): """Compute flux of `varn` into region `mask` due to eddy (bolus) velocity""" _warn_virtual_salt_flux_units() dsvar = ds.variables dxt = dsvar['DXT'][:] * 1e-2 dyt = dsvar['DYT'][:] * 1e-2 dz = dsvar['dz'][:] * 1e-2 if kzo is None: kzo = len(dz) fluxbudget = 0. for k in range(kza,kzo): # get bolus velocity uflux = _fill0(dsvar['UISOP'][t,k]) * 1e-2 # m s-1 vflux = _fill0(dsvar['VISOP'][t,k]) * 1e-2 # m s-1 # get scalar data if varn == 'heat': scalar = _fill0(dsvar['TEMP'][t,k]) elif varn == 'salt': scalar = _fill0(dsvar['SALT'][t,k]) elif varn == 'freshwater': scalar = (S0 - _fill0(dsvar['SALT'][t,k])) / S0 # multiply flux by scalar uflux *= scalar vflux *= scalar # multiply by horizontal grid spacing uflux *= dyt vflux *= dxt # multiply by vertical grid spacing uflux *= dz[k] vflux *= dz[k] # compute budget fluxbudget += budget_over_region_2D(uflux,vflux,scalar=None,mask=mask) if varn == 'heat': fluxbudget *= (1e3 * 4e3 * 1e-15) # PW return fluxbudget fluxbudget_bolus = fluxbudget_bolus_visop def fluxbudget_diffusion(ds,mask,varn,kza=0,kzo=None,S0=34.8,t=0): """Compute flux of `varn` into region `mask` due to diffusion""" _warn_virtual_salt_flux_units() dsvar = ds.variables dxt = dsvar['DXT'][:] * 1e-2 dyt = dsvar['DYT'][:] * 1e-2 dz = dsvar['dz'][:] * 1e-2 if kzo is None: kzo = len(dz) fluxbudget = 0. for k in range(kza,kzo): # get scalar data if varn == 'heat': scalar = _fill0(dsvar['TEMP'][t,k]) elif varn == 'salt': scalar = _fill0(dsvar['SALT'][t,k]) elif varn == 'freshwater': scalar = (S0 - _fill0(dsvar['SALT'][t,k])) / S0 # get gradient uflux = central_differences(scalar,dxt,axis=1) # [scalar] m-1 vflux = central_differences(scalar,dyt,axis=0) # [scalar] m-1 # multiply gradient by diffusion coefficient kappa = _fill0(dsvar['KAPPA_ISOP'][t,k] * 1e-4) # m2 s-1 uflux *= kappa vflux *= kappa # multiply by horizontal grid spacing uflux *= dyt vflux *= dxt # multiply by vertical grid spacing uflux *= dz[k] vflux *= dz[k] # compute budget fluxbudget += budget_over_region_2D(uflux,vflux,scalar=None,mask=mask) # convert to right units if varn == 'heat': fluxbudget *= (1e3 * 4e3 * 1e-15) # PW return fluxbudget def fluxbudget_bolus_advection_tendency(ds,mask,varn,t=0): _warn_virtual_salt_flux_units() dsvar = ds.variables if varn == 'heat': integrand = _fill0(dsvar['ADVT_ISOP'][t][mask]) * 1e-2 elif varn == 'salt': integrand = _fill0(dsvar['ADVS_ISOP'][t][mask]) * 1e-2 else: raise ValueError('This function only works for heat and salt transport.') integrand *= dsvar['TAREA'][:][mask] * 1e-4 integral = np.sum(integrand) return integral def transport_divergence(ds,mask,varn='salt',kza=0,kzo=None,t=0): _warn_virtual_salt_flux_units() if varn == 'heat': uvar,vvar = 'UET','VNT' elif varn == 'salt': uvar,vvar = 'UES','VNS' dsvar = ds.variables dxu = dsvar['DXU'][:] * 1e-2 dyu = dsvar['DYU'][:] * 1e-2 tarea = dsvar['TAREA'][:] * 1e-4 dz = dsvar['dz'][:] * 1e-2 if kzo is None: kzo = len(dz) transport_divergence = 0. for k in range(kza,kzo): uflux = _fill0(dsvar[uvar][t,k]) uflux *= dyu uflux *= dz[k] uflux *= mask vflux = _fill0(dsvar[vvar][t,k]) vflux *= dxu vflux *= dz[k] vflux *= mask divergence = central_differences(uflux,dxu,axis=1) + central_differences(vflux,dyu,axis=0) divergence *= mask transport_divergence += np.sum(divergence*tarea) if varn=='heat': warnings.warn('Units might be wrong for heat transport! Check!') return transport_divergence def transport_divergence_from_vertical(ds,mask,varn='salt',kza=0,kzo=None,t=0): _warn_virtual_salt_flux_units() if varn == 'heat': wvar = 'WTT' elif varn == 'salt': wvar = 'WTS' dsvar = ds.variables dz = dsvar['dz'][:] * 1e-2 if kzo is None: kzo = len(dz) transport_divergence = 0. for k in range(kza,kzo): wflux = _fill0(dsvar[wvar][t,k][mask]) wflux *= dz[k] wflux *= dsvar['TAREA'][:][mask] * 1e-4 transport_divergence += np.sum(wflux) return transport_divergence
gpl-2.0
-7,507,726,245,566,721,000
32.41
98
0.564801
false
tkem/mopidy-podcast-itunes
mopidy_podcast_itunes/__init__.py
1
4677
import pathlib import pkg_resources from mopidy import config, ext, httpclient __version__ = pkg_resources.get_distribution("Mopidy-Podcast").version CHARTS = ["podcasts", "audioPodcasts", "videoPodcasts"] COUNTRIES = [ "AD", "AE", "AF", "AG", "AI", "AL", "AM", "AO", "AQ", "AR", "AS", "AT", "AU", "AW", "AX", "AZ", "BA", "BB", "BD", "BE", "BF", "BG", "BH", "BI", "BJ", "BL", "BM", "BN", "BO", "BQ", "BR", "BS", "BT", "BV", "BW", "BY", "BZ", "CA", "CC", "CD", "CF", "CG", "CH", "CI", "CK", "CL", "CM", "CN", "CO", "CR", "CU", "CV", "CW", "CX", "CY", "CZ", "DE", "DJ", "DK", "DM", "DO", "DZ", "EC", "EE", "EG", "EH", "ER", "ES", "ET", "FI", "FJ", "FK", "FM", "FO", "FR", "GA", "GB", "GD", "GE", "GF", "GG", "GH", "GI", "GL", "GM", "GN", "GP", "GQ", "GR", "GS", "GT", "GU", "GW", "GY", "HK", "HM", "HN", "HR", "HT", "HU", "ID", "IE", "IL", "IM", "IN", "IO", "IQ", "IR", "IS", "IT", "JE", "JM", "JO", "JP", "KE", "KG", "KH", "KI", "KM", "KN", "KP", "KR", "KW", "KY", "KZ", "LA", "LB", "LC", "LI", "LK", "LR", "LS", "LT", "LU", "LV", "LY", "MA", "MC", "MD", "ME", "MF", "MG", "MH", "MK", "ML", "MM", "MN", "MO", "MP", "MQ", "MR", "MS", "MT", "MU", "MV", "MW", "MX", "MY", "MZ", "NA", "NC", "NE", "NF", "NG", "NI", "NL", "NO", "NP", "NR", "NU", "NZ", "OM", "PA", "PE", "PF", "PG", "PH", "PK", "PL", "PM", "PN", "PR", "PS", "PT", "PW", "PY", "QA", "RE", "RO", "RS", "RU", "RW", "SA", "SB", "SC", "SD", "SE", "SG", "SH", "SI", "SJ", "SK", "SL", "SM", "SN", "SO", "SR", "SS", "ST", "SV", "SX", "SY", "SZ", "TC", "TD", "TF", "TG", "TH", "TJ", "TK", "TL", "TM", "TN", "TO", "TR", "TT", "TV", "TW", "TZ", "UA", "UG", "UM", "US", "UY", "UZ", "VA", "VC", "VE", "VG", "VI", "VN", "VU", "WF", "WS", "YE", "YT", "ZA", "ZM", "ZW", ] EXPLICIT = ("Yes", "No") # since config.Boolean has no "optional" MAX_LIMIT = 200 # absolute limit specified by iTunes Store API class Extension(ext.Extension): dist_name = "Mopidy-Podcast-iTunes" ext_name = "podcast-itunes" version = __version__ def get_default_config(self): return config.read(pathlib.Path(__file__).parent / "ext.conf") def get_config_schema(self): schema = super().get_config_schema() schema.update( base_url=config.String(), country=config.String(choices=COUNTRIES), explicit=config.String(choices=EXPLICIT, optional=True), charts=config.String(choices=CHARTS), charts_limit=config.Integer( minimum=1, maximum=MAX_LIMIT, optional=True ), search_limit=config.Integer( minimum=1, maximum=MAX_LIMIT, optional=True ), timeout=config.Integer(minimum=1, optional=True), retries=config.Integer(minimum=0), # no longer used charts_format=config.Deprecated(), episode_format=config.Deprecated(), genre_format=config.Deprecated(), podcast_format=config.Deprecated(), root_genre_id=config.Deprecated(), root_name=config.Deprecated(), ) return schema def setup(self, registry): from .backend import iTunesPodcastBackend registry.add("backend", iTunesPodcastBackend) @classmethod def get_requests_session(cls, config): import requests proxy = httpclient.format_proxy(config["proxy"]) user_agent_string = f"{cls.dist_name}/{cls.version}" user_agent = httpclient.format_user_agent(user_agent_string) session = requests.Session() session.proxies.update({"http": proxy, "https": proxy}) session.headers.update({"user-agent": user_agent}) return session
apache-2.0
-5,865,417,700,428,691,000
13.707547
70
0.39876
false
FlightGear/flightgear
utils/Modeller/yasim_import.py
1
30696
#!BPY # """ # Name: 'YASim (.xml)' # Blender: 245 # Group: 'Import' # Tooltip: 'Loads and visualizes a YASim FDM geometry' # """ __author__ = "Melchior FRANZ < mfranz # aon : at >" __url__ = ["http://www.flightgear.org/", "http://cvs.flightgear.org/viewvc/source/utils/Modeller/yasim_import.py"] __version__ = "0.2" __bpydoc__ = """\ yasim_import.py loads and visualizes a YASim FDM geometry ========================================================= It is recommended to load the model superimposed over a greyed out and immutable copy of the aircraft model: (0) put this script into ~/.blender/scripts/ (1) load or import aircraft model (menu -> "File" -> "Import" -> "AC3D (.ac) ...") (2) create new *empty* scene (menu -> arrow button left of "SCE:scene1" combobox -> "ADD NEW" -> "empty") (3) rename scene to yasim (not required) (4) link to scene1 (F10 -> "Output" tab in "Buttons Window" -> arrow button left of text entry "No Set Scene" -> "scene1") (5) now load the YASim config file (menu -> "File" -> "Import" -> "YASim (.xml) ...") This is good enough for simple checks. But if you are working on the YASim configuration, then you need a quick and convenient way to reload the file. In that case continue after (4): (5) switch the button area at the bottom of the blender screen to "Scripts Window" mode (green python snake icon) (6) load the YASim config file (menu -> "Scripts" -> "Import" -> "YASim (.xml) ...") (7) make the "Scripts Window" area as small as possible by dragging the area separator down (8) optionally split the "3D View" area and switch the right part to the "Outliner" (9) press the "Reload YASim" button in the script area to reload the file If the 3D model is displaced with respect to the FDM model, then the <offsets> values from the model animation XML file should be added as comment to the YASim config file, as a line all by itself, with no spaces surrounding the equal signs. Spaces elsewhere are allowed. For example: <offsets> <x-m>3.45</x-m> <z-m>-0.4</z-m> <pitch-deg>5</pitch-deg> </offsets> becomes: <!-- offsets: x=3.45 z=-0.4 p=5 --> Possible variables are: x ... <x-m> y ... <y-m> z ... <z-m> h ... <heading-deg> p ... <pitch-deg> r ... <roll-deg> Of course, absolute FDM coordinates can then no longer directly be read from Blender's 3D view. The cursor coordinates display in the script area, however, shows the coordinates in YASim space. Note that object names don't contain XML indices but element numbers. YASim_flap0#2 is the third flap0 in the whole file, not necessarily in its parent XML group. A floating point part in the object name (e.g. YASim_flap0#2.004) only means that the geometry has been reloaded that often. It's an unavoidable consequence of how Blender deals with meshes. Elements are displayed as follows: cockpit -> monkey head fuselage -> blue "tube" (with only 12 sides for less clutter); center at "a" vstab -> red with yellow control surfaces (flap0, flap1, slat, spoiler) wing/mstab/hstab -> green with yellow control surfaces (which are always 20 cm deep); symmetric surfaces are only displayed on the left side, unless the "Mirror" button is active thrusters (jet/propeller/thruster) -> dashed line from center to actionpt; arrow from actionpt along thrust vector (always 1 m long); propeller circle rotor -> radius and rel_len_blade_start circle, normal and forward vector, one blade at phi0 with direction arrow near blade tip gear -> contact point and compression vector (no arrow head) tank -> magenta cube (10 cm side length) weight -> inverted cyan cone ballast -> yellow cylinder hitch -> hexagon (10 cm diameter) hook -> dashed line for up angle, T-line for down angle launchbar -> dashed line for up angles, T-line for down angles (launchbar and holdback each) The Mirror button complements symmetrical surfaces (wing/hstab/mstab) and control surfaces (flap0/flap1/slat/spoiler). This is useful for asymmetrical aircraft, but has the disadvantage that it moves the surfaces' object centers from their usual place, yasim's [x, y, z] value, to [0, 0, 0]. Turning mirroring off restores the object center. Environment variable BLENDER_YASIM_IMPORT can be set to a space-separated list of options: $ BLENDER_YASIM_IMPORT="mirror verbose" blender whereby: verbose ... enables verbose logs mirror ... enables mirroring of symmetric surfaces """ #-------------------------------------------------------------------------------- # Copyright (C) 2009 Melchior FRANZ < mfranz # aon : at > # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. #-------------------------------------------------------------------------------- import Blender, BPyMessages, string, math, os from Blender.Mathutils import * from xml.sax import handler, make_parser CONFIG = string.split(os.getenv("BLENDER_YASIM_IMPORT") or "") YASIM_MATRIX = Matrix([-1, 0, 0, 0], [0, -1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]) ORIGIN = Vector(0, 0, 0) X = Vector(1, 0, 0) Y = Vector(0, 1, 0) Z = Vector(0, 0, 1) DEG2RAD = math.pi / 180 RAD2DEG = 180 / math.pi NO_EVENT = 0 RELOAD_BUTTON = 1 CURSOR_BUTTON = 2 MIRROR_BUTTON = 3 class Global: verbose = "verbose" in CONFIG path = "" matrix = None data = None cursor = ORIGIN last_cursor = Vector(Blender.Window.GetCursorPos()) mirror_button = Blender.Draw.Create("mirror" in CONFIG) class Abort(Exception): def __init__(self, msg, term = None): self.msg = msg self.term = term def log(msg): if Global.verbose: print(msg) def draw_dashed_line(mesh, start, end): w = 0.04 step = w * (end - start).normalize() n = len(mesh.verts) for i in range(int(1 + 0.5 * (end - start).length / w)): a = start + 2 * i * step b = a + step if (b - end).length < step.length: b = end mesh.verts.extend([a, b]) mesh.edges.extend([n + 2 * i, n + 2 * i + 1]) def draw_arrow(mesh, start, end): v = end - start m = v.toTrackQuat('x', 'z').toMatrix().resize4x4() * TranslationMatrix(start) v = v.length * X n = len(mesh.verts) mesh.verts.extend([ORIGIN * m , v * m, (v - 0.05 * X + 0.05 * Y) * m, (v - 0.05 * X - 0.05 * Y) * m]) # head mesh.verts.extend([(ORIGIN + 0.05 * Y) * m, (ORIGIN - 0.05 * Y) * m]) # base mesh.edges.extend([[n, n + 1], [n + 1, n + 2], [n + 1, n + 3], [n + 4, n + 5]]) def draw_circle(mesh, numpoints, radius, matrix): n = len(mesh.verts) for i in range(numpoints): angle = 2.0 * math.pi * i / numpoints v = Vector(radius * math.cos(angle), radius * math.sin(angle), 0) mesh.verts.extend([v * matrix]) for i in range(numpoints): i1 = (i + 1) % numpoints mesh.edges.extend([[n + i, n + i1]]) class Item: scene = Blender.Scene.GetCurrent() def make_twosided(self, mesh): mesh.faceUV = True for f in mesh.faces: f.mode |= Blender.Mesh.FaceModes.TWOSIDE | Blender.Mesh.FaceModes.OBCOL def set_color(self, obj, color): mat = Blender.Material.New() mat.setRGBCol(color[0], color[1], color[2]) mat.setAlpha(color[3]) mat.mode |= Blender.Material.Modes.ZTRANSP | Blender.Material.Modes.TRANSPSHADOW obj.transp = True mesh = obj.getData(mesh = True) mesh.materials += [mat] for f in mesh.faces: f.smooth = True mesh.calcNormals() class Cockpit(Item): def __init__(self, center): mesh = Blender.Mesh.Primitives.Monkey() mesh.transform(ScaleMatrix(0.13, 4) * Euler(90, 0, 90).toMatrix().resize4x4() * TranslationMatrix(Vector(-0.1, 0, -0.032))) obj = self.scene.objects.new(mesh, "YASim_cockpit") obj.setMatrix(TranslationMatrix(center) * Global.matrix) class Tank(Item): def __init__(self, name, center): mesh = Blender.Mesh.Primitives.Cube() mesh.transform(ScaleMatrix(0.05, 4)) obj = self.scene.objects.new(mesh, name) obj.setMatrix(TranslationMatrix(center) * Global.matrix) self.set_color(obj, [1, 0, 1, 0.5]) class Ballast(Item): def __init__(self, name, center): mesh = Blender.Mesh.Primitives.Cylinder() mesh.transform(ScaleMatrix(0.05, 4)) obj = self.scene.objects.new(mesh, name) obj.setMatrix(TranslationMatrix(center) * Global.matrix) self.set_color(obj, [1, 1, 0, 0.5]) class Weight(Item): def __init__(self, name, center): mesh = Blender.Mesh.Primitives.Cone() mesh.transform(ScaleMatrix(0.05, 4)) obj = self.scene.objects.new(mesh, name) obj.setMatrix(TranslationMatrix(center) * Global.matrix) self.set_color(obj, [0, 1, 1, 0.5]) class Gear(Item): def __init__(self, name, center, compression): mesh = Blender.Mesh.New() mesh.verts.extend([ORIGIN, compression]) mesh.edges.extend([0, 1]) obj = self.scene.objects.new(mesh, name) obj.setMatrix(TranslationMatrix(center) * Global.matrix) class Hook(Item): def __init__(self, name, center, length, up_angle, dn_angle): mesh = Blender.Mesh.New() up = ORIGIN - length * math.cos(up_angle * DEG2RAD) * X - length * math.sin(up_angle * DEG2RAD) * Z dn = ORIGIN - length * math.cos(dn_angle * DEG2RAD) * X - length * math.sin(dn_angle * DEG2RAD) * Z mesh.verts.extend([ORIGIN, dn, dn + 0.05 * Y, dn - 0.05 * Y]) mesh.edges.extend([[0, 1], [2, 3]]) draw_dashed_line(mesh, ORIGIN, up) draw_dashed_line(mesh, ORIGIN, dn) obj = self.scene.objects.new(mesh, name) obj.setMatrix(TranslationMatrix(center) * Global.matrix) class Launchbar(Item): def __init__(self, name, lb, lb_length, hb, hb_length, up_angle, dn_angle): mesh = Blender.Mesh.New() hb = hb - lb lb_tip = ORIGIN + lb_length * math.cos(dn_angle * DEG2RAD) * X - lb_length * math.sin(dn_angle * DEG2RAD) * Z hb_tip = hb - hb_length * math.cos(dn_angle * DEG2RAD) * X - hb_length * math.sin(dn_angle * DEG2RAD) * Z mesh.verts.extend([lb_tip, ORIGIN, hb, hb_tip, lb_tip + 0.05 * Y, lb_tip - 0.05 * Y, hb_tip + 0.05 * Y, hb_tip - 0.05 * Y]) mesh.edges.extend([[0, 1], [1, 2], [2, 3], [4, 5], [6, 7]]) draw_dashed_line(mesh, ORIGIN, lb_length * math.cos(up_angle * DEG2RAD) * X - lb_length * math.sin(up_angle * DEG2RAD) * Z) draw_dashed_line(mesh, hb, hb - hb_length * math.cos(up_angle * DEG2RAD) * X - hb_length * math.sin(up_angle * DEG2RAD) * Z) obj = self.scene.objects.new(mesh, name) obj.setMatrix(TranslationMatrix(lb) * Global.matrix) class Hitch(Item): def __init__(self, name, center): mesh = Blender.Mesh.Primitives.Circle(6, 0.1) obj = self.scene.objects.new(mesh, name) obj.setMatrix(RotationMatrix(90, 4, "x") * TranslationMatrix(center) * Global.matrix) class Thrust: def set_actionpt(self, p): self.actionpt = p def set_dir(self, d): self.thrustvector = d class Thruster(Thrust, Item): def __init__(self, name, center, thrustvector): (self.name, self.center, self.actionpt, self.thrustvector) = (name, center, center, thrustvector) def __del__(self): a = self.actionpt - self.center mesh = Blender.Mesh.New() draw_dashed_line(mesh, ORIGIN, a) draw_arrow(mesh, a, a + self.thrustvector.normalize()) obj = self.scene.objects.new(mesh, self.name) obj.setMatrix(TranslationMatrix(self.center) * Global.matrix) class Propeller(Thrust, Item): def __init__(self, name, center, radius): (self.name, self.center, self.radius, self.actionpt, self.thrustvector) = (name, center, radius, center, -X) def __del__(self): a = self.actionpt - self.center matrix = self.thrustvector.toTrackQuat('z', 'x').toMatrix().resize4x4() * TranslationMatrix(a) mesh = Blender.Mesh.New() mesh.verts.extend([ORIGIN * matrix, (ORIGIN + self.radius * X) * matrix]) mesh.edges.extend([[0, 1]]) draw_dashed_line(mesh, ORIGIN, a) draw_arrow(mesh, a, a + self.thrustvector.normalize()) draw_circle(mesh, 128, self.radius, matrix) obj = self.scene.objects.new(mesh, self.name) obj.setMatrix(TranslationMatrix(self.center) * Global.matrix) class Jet(Thrust, Item): def __init__(self, name, center, rotate): (self.name, self.center, self.actionpt) = (name, center, center) self.thrustvector = -X * RotationMatrix(rotate, 4, "y") def __del__(self): a = self.actionpt - self.center mesh = Blender.Mesh.New() draw_dashed_line(mesh, ORIGIN, a) draw_arrow(mesh, a, a + self.thrustvector.normalize()) obj = self.scene.objects.new(mesh, self.name) obj.setMatrix(TranslationMatrix(self.center) * Global.matrix) class Fuselage(Item): def __init__(self, name, a, b, width, taper, midpoint): numvert = 12 angle = [] for i in range(numvert): alpha = i * 2 * math.pi / float(numvert) angle.append([math.cos(alpha), math.sin(alpha)]) axis = b - a length = axis.length mesh = Blender.Mesh.New() for i in range(numvert): mesh.verts.extend([[0, 0.5 * width * taper * angle[i][0], 0.5 * width * taper * angle[i][1]]]) for i in range(numvert): mesh.verts.extend([[midpoint * length, 0.5 * width * angle[i][0], 0.5 * width * angle[i][1]]]) for i in range(numvert): mesh.verts.extend([[length, 0.5 * width * taper * angle[i][0], 0.5 * width * taper * angle[i][1]]]) for i in range(numvert): i1 = (i + 1) % numvert mesh.faces.extend([[i, i1, i1 + numvert, i + numvert]]) mesh.faces.extend([[i + numvert, i1 + numvert, i1 + 2 * numvert, i + 2 * numvert]]) mesh.verts.extend([ORIGIN, length * X]) obj = self.scene.objects.new(mesh, name) obj.setMatrix(axis.toTrackQuat('x', 'y').toMatrix().resize4x4() * TranslationMatrix(a) * Global.matrix) self.set_color(obj, [0, 0, 0.5, 0.4]) class Rotor(Item): def __init__(self, name, center, up, fwd, numblades, radius, chord, twist, taper, rel_len_blade_start, phi0, ccw): matrix = RotationMatrix(phi0, 4, "z") * up.toTrackQuat('z', 'x').toMatrix().resize4x4() invert = matrix.copy().invert() direction = [-1, 1][ccw] twist *= DEG2RAD a = ORIGIN + rel_len_blade_start * radius * X b = ORIGIN + radius * X tw = 0.5 * chord * taper * math.cos(twist) * Y + 0.5 * direction * chord * taper * math.sin(twist) * Z mesh = Blender.Mesh.New() mesh.verts.extend([ORIGIN, a, b, a + 0.5 * chord * Y, a - 0.5 * chord * Y, b + tw, b - tw]) mesh.edges.extend([[0, 1], [1, 2], [1, 3], [1, 4], [3, 5], [4, 6], [5, 6]]) draw_circle(mesh, 64, rel_len_blade_start * radius, Matrix()) draw_circle(mesh, 128, radius, Matrix()) draw_arrow(mesh, ORIGIN, up * invert) draw_arrow(mesh, ORIGIN, fwd * invert) b += 0.1 * X + direction * chord * Y draw_arrow(mesh, b, b + min(0.5 * radius, 1) * direction * Y) obj = self.scene.objects.new(mesh, name) obj.setMatrix(matrix * TranslationMatrix(center) * Global.matrix) class Wing(Item): def __init__(self, name, root, length, chord, incidence, twist, taper, sweep, dihedral): # <1--0--2 # \ | / # 4-3-5 self.is_symmetric = not name.startswith("YASim_vstab#") mesh = Blender.Mesh.New() mesh.verts.extend([ORIGIN, ORIGIN + 0.5 * chord * X, ORIGIN - 0.5 * chord * X]) tip = ORIGIN + math.cos(sweep * DEG2RAD) * length * Y - math.sin(sweep * DEG2RAD) * length * X tipfore = tip + 0.5 * taper * chord * math.cos(twist * DEG2RAD) * X + 0.5 * taper * chord * math.sin(twist * DEG2RAD) * Z tipaft = tip + tip - tipfore mesh.verts.extend([tip, tipfore, tipaft]) mesh.faces.extend([[0, 1, 4, 3], [2, 0, 3, 5]]) self.make_twosided(mesh) obj = self.scene.objects.new(mesh, name) mesh.transform(Euler(dihedral, -incidence, 0).toMatrix().resize4x4()) self.set_color(obj, [[0.5, 0.0, 0, 0.5], [0.0, 0.5, 0, 0.5]][self.is_symmetric]) (self.obj, self.mesh) = (obj, mesh) if self.is_symmetric and Global.mirror_button.val: mod = obj.modifiers.append(Blender.Modifier.Type.MIRROR) mod[Blender.Modifier.Settings.AXIS_X] = False mod[Blender.Modifier.Settings.AXIS_Y] = True mod[Blender.Modifier.Settings.AXIS_Z] = False mesh.transform(TranslationMatrix(root)) # must move object center to x axis obj.setMatrix(Global.matrix) else: obj.setMatrix(TranslationMatrix(root) * Global.matrix) def add_flap(self, name, start, end): a = Vector(self.mesh.verts[2].co) b = Vector(self.mesh.verts[5].co) c = 0.2 * (Vector(self.mesh.verts[0].co - a)).normalize() m = self.obj.getMatrix() mesh = Blender.Mesh.New() i0 = a + start * (b - a) i1 = a + end * (b - a) mesh.verts.extend([i0, i1, i0 + c, i1 + c]) mesh.faces.extend([[0, 1, 3, 2]]) self.make_twosided(mesh) obj = self.scene.objects.new(mesh, name) obj.setMatrix(m) self.set_color(obj, [0.8, 0.8, 0, 0.9]) if self.is_symmetric and Global.mirror_button.val: mod = obj.modifiers.append(Blender.Modifier.Type.MIRROR) mod[Blender.Modifier.Settings.AXIS_X] = False mod[Blender.Modifier.Settings.AXIS_Y] = True mod[Blender.Modifier.Settings.AXIS_Z] = False class import_yasim(handler.ErrorHandler, handler.ContentHandler): ignored = ["cruise", "approach", "control-input", "control-output", "control-speed", \ "control-setting", "stall", "airplane", "piston-engine", "turbine-engine", \ "rotorgear", "tow", "winch", "solve-weight"] # err_handler def warning(self, exception): print((self.error_string("Warning", exception))) def error(self, exception): print((self.error_string("Error", exception))) def fatalError(self, exception): raise Abort(str(exception), self.error_string("Fatal", exception)) def error_string(self, tag, e): (column, line) = (e.getColumnNumber(), e.getLineNumber()) return "%s: %s\n%s%s^" % (tag, str(e), Global.data[line - 1], column * ' ') # doc_handler def setDocumentLocator(self, locator): self.locator = locator def startDocument(self): self.tags = [] self.counter = {} self.items = [None] def endDocument(self): for o in Item.scene.objects: o.sel = True def startElement(self, tag, attrs): if len(self.tags) == 0 and tag != "airplane": raise Abort("this isn't a YASim config file (bad root tag at line %d)" % self.locator.getLineNumber()) self.tags.append(tag) path = string.join(self.tags, '/') item = Item() parent = self.items[-1] if self.counter.has_key(tag): self.counter[tag] += 1 else: self.counter[tag] = 0 if tag == "cockpit": c = Vector(float(attrs["x"]), float(attrs["y"]), float(attrs["z"])) log("\033[31mcockpit x=%f y=%f z=%f\033[m" % (c[0], c[1], c[2])) item = Cockpit(c) elif tag == "fuselage": a = Vector(float(attrs["ax"]), float(attrs["ay"]), float(attrs["az"])) b = Vector(float(attrs["bx"]), float(attrs["by"]), float(attrs["bz"])) width = float(attrs["width"]) taper = float(attrs.get("taper", 1)) midpoint = float(attrs.get("midpoint", 0.5)) log("\033[32mfuselage ax=%f ay=%f az=%f bx=%f by=%f bz=%f width=%f taper=%f midpoint=%f\033[m" % \ (a[0], a[1], a[2], b[0], b[1], b[2], width, taper, midpoint)) item = Fuselage("YASim_%s#%d" % (tag, self.counter[tag]), a, b, width, taper, midpoint) elif tag == "gear": c = Vector(float(attrs["x"]), float(attrs["y"]), float(attrs["z"])) compression = float(attrs.get("compression", 1)) up = Z * compression if attrs.has_key("upx"): up = Vector(float(attrs["upx"]), float(attrs["upy"]), float(attrs["upz"])).normalize() * compression log("\033[35;1mgear x=%f y=%f z=%f compression=%f upx=%f upy=%f upz=%f\033[m" \ % (c[0], c[1], c[2], compression, up[0], up[1], up[2])) item = Gear("YASim_gear#%d" % self.counter[tag], c, up) elif tag == "jet": c = Vector(float(attrs["x"]), float(attrs["y"]), float(attrs["z"])) rotate = float(attrs.get("rotate", 0)) log("\033[36;1mjet x=%f y=%f z=%f rotate=%f\033[m" % (c[0], c[1], c[2], rotate)) item = Jet("YASim_jet#%d" % self.counter[tag], c, rotate) elif tag == "propeller": c = Vector(float(attrs["x"]), float(attrs["y"]), float(attrs["z"])) radius = float(attrs["radius"]) log("\033[36;1m%s x=%f y=%f z=%f radius=%f\033[m" % (tag, c[0], c[1], c[2], radius)) item = Propeller("YASim_propeller#%d" % self.counter[tag], c, radius) elif tag == "thruster": c = Vector(float(attrs["x"]), float(attrs["y"]), float(attrs["z"])) v = Vector(float(attrs["vx"]), float(attrs["vy"]), float(attrs["vz"])) log("\033[36;1m%s x=%f y=%f z=%f vx=%f vy=%f vz=%f\033[m" % (tag, c[0], c[1], c[2], v[0], v[1], v[2])) item = Thruster("YASim_thruster#%d" % self.counter[tag], c, v) elif tag == "actionpt": if not isinstance(parent, Thrust): raise Abort("%s is not part of a thruster/propeller/jet at line %d" \ % (path, self.locator.getLineNumber())) c = Vector(float(attrs["x"]), float(attrs["y"]), float(attrs["z"])) log("\t\033[36mactionpt x=%f y=%f z=%f\033[m" % (c[0], c[1], c[2])) parent.set_actionpt(c) elif tag == "dir": if not isinstance(parent, Thrust): raise Abort("%s is not part of a thruster/propeller/jet at line %d" \ % (path, self.locator.getLineNumber())) c = Vector(float(attrs["x"]), float(attrs["y"]), float(attrs["z"])) log("\t\033[36mdir x=%f y=%f z=%f\033[m" % (c[0], c[1], c[2])) parent.set_dir(c) elif tag == "tank": c = Vector(float(attrs["x"]), float(attrs["y"]), float(attrs["z"])) log("\033[34;1m%s x=%f y=%f z=%f\033[m" % (tag, c[0], c[1], c[2])) item = Tank("YASim_tank#%d" % self.counter[tag], c) elif tag == "ballast": c = Vector(float(attrs["x"]), float(attrs["y"]), float(attrs["z"])) log("\033[34m%s x=%f y=%f z=%f\033[m" % (tag, c[0], c[1], c[2])) item = Ballast("YASim_ballast#%d" % self.counter[tag], c) elif tag == "weight": c = Vector(float(attrs["x"]), float(attrs["y"]), float(attrs["z"])) log("\033[34m%s x=%f y=%f z=%f\033[m" % (tag, c[0], c[1], c[2])) item = Weight("YASim_weight#%d" % self.counter[tag], c) elif tag == "hook": c = Vector(float(attrs["x"]), float(attrs["y"]), float(attrs["z"])) length = float(attrs.get("length", 1)) up_angle = float(attrs.get("up-angle", 0)) down_angle = float(attrs.get("down-angle", 70)) log("\033[35m%s x=%f y=%f z=%f length=%f up-angle=%f down-angle=%f\033[m" \ % (tag, c[0], c[1], c[2], length, up_angle, down_angle)) item = Hook("YASim_hook#%d" % self.counter[tag], c, length, up_angle, down_angle) elif tag == "hitch": c = Vector(float(attrs["x"]), float(attrs["y"]), float(attrs["z"])) log("\033[35m%s x=%f y=%f z=%f\033[m" % (tag, c[0], c[1], c[2])) item = Hitch("YASim_hitch#%d" % self.counter[tag], c) elif tag == "launchbar": c = Vector(float(attrs["x"]), float(attrs["y"]), float(attrs["z"])) length = float(attrs.get("length", 1)) up_angle = float(attrs.get("up-angle", -45)) down_angle = float(attrs.get("down-angle", 45)) holdback = Vector(float(attrs.get("holdback-x", c[0])), float(attrs.get("holdback-y", c[1])), float(attrs.get("holdback-z", c[2]))) holdback_length = float(attrs.get("holdback-length", 2)) log("\033[35m%s x=%f y=%f z=%f length=%f down-angle=%f up-angle=%f holdback-x=%f holdback-y=%f holdback-z+%f holdback-length=%f\033[m" \ % (tag, c[0], c[1], c[2], length, down_angle, up_angle, \ holdback[0], holdback[1], holdback[2], holdback_length)) item = Launchbar("YASim_launchbar#%d" % self.counter[tag], c, length, holdback, holdback_length, up_angle, down_angle) elif tag == "wing" or tag == "hstab" or tag == "vstab" or tag == "mstab": root = Vector(float(attrs["x"]), float(attrs["y"]), float(attrs["z"])) length = float(attrs["length"]) chord = float(attrs["chord"]) incidence = float(attrs.get("incidence", 0)) twist = float(attrs.get("twist", 0)) taper = float(attrs.get("taper", 1)) sweep = float(attrs.get("sweep", 0)) dihedral = float(attrs.get("dihedral", [0, 90][tag == "vstab"])) log("\033[33;1m%s x=%f y=%f z=%f length=%f chord=%f incidence=%f twist=%f taper=%f sweep=%f dihedral=%f\033[m" \ % (tag, root[0], root[1], root[2], length, chord, incidence, twist, taper, sweep, dihedral)) item = Wing("YASim_%s#%d" % (tag, self.counter[tag]), root, length, chord, incidence, twist, taper, sweep, dihedral) elif tag == "flap0" or tag == "flap1" or tag == "slat" or tag == "spoiler": if not isinstance(parent, Wing): raise Abort("%s is not part of a wing or stab at line %d" \ % (path, self.locator.getLineNumber())) start = float(attrs["start"]) end = float(attrs["end"]) log("\t\033[33m%s start=%f end=%f\033[m" % (tag, start, end)) parent.add_flap("YASim_%s#%d" % (tag, self.counter[tag]), start, end) elif tag == "rotor": c = Vector(float(attrs.get("x", 0)), float(attrs.get("y", 0)), float(attrs.get("z", 0))) norm = Vector(float(attrs.get("nx", 0)), float(attrs.get("ny", 0)), float(attrs.get("nz", 1))) fwd = Vector(float(attrs.get("fx", 1)), float(attrs.get("fy", 0)), float(attrs.get("fz", 0))) diameter = float(attrs.get("diameter", 10.2)) numblades = int(attrs.get("numblades", 4)) chord = float(attrs.get("chord", 0.3)) twist = float(attrs.get("twist", 0)) taper = float(attrs.get("taper", 1)) rel_len_blade_start = float(attrs.get("rel-len-blade-start", 0)) phi0 = float(attrs.get("phi0", 0)) ccw = not not int(attrs.get("ccw", 0)) log(("\033[36;1mrotor x=%f y=%f z=%f nx=%f ny=%f nz=%f fx=%f fy=%f fz=%f numblades=%d diameter=%f " \ + "chord=%f twist=%f taper=%f rel_len_blade_start=%f phi0=%f ccw=%d\033[m") \ % (c[0], c[1], c[2], norm[0], norm[1], norm[2], fwd[0], fwd[1], fwd[2], numblades, \ diameter, chord, twist, taper, rel_len_blade_start, phi0, ccw)) item = Rotor("YASim_rotor#%d" % self.counter[tag], c, norm, fwd, numblades, 0.5 * diameter, chord, \ twist, taper, rel_len_blade_start, phi0, ccw) elif tag not in self.ignored: log("\033[30;1m%s\033[m" % path) self.items.append(item) def endElement(self, tag): self.tags.pop() self.items.pop() def extract_matrix(filedata, tag): v = { 'x': 0.0, 'y': 0.0, 'z': 0.0, 'h': 0.0, 'p': 0.0, 'r': 0.0 } has_offsets = False for line in filedata: line = string.strip(line) if not line.startswith("<!--") or not line.endswith("-->"): continue line = string.strip(line[4:-3]) if not string.lower(line).startswith("%s:" % tag): continue line = string.strip(line[len(tag) + 1:]) for assignment in string.split(line): (key, value) = string.split(assignment, '=', 2) v[string.strip(key)] = float(string.strip(value)) has_offsets = True if not has_offsets: return None print(("using offsets: x=%f y=%f z=%f h=%f p=%f r=%f" % (v['x'], v['y'], v['z'], v['h'], v['p'], v['r']))) return Euler(v['r'], v['p'], v['h']).toMatrix().resize4x4() * TranslationMatrix(Vector(v['x'], v['y'], v['z'])) def load_yasim_config(path): if BPyMessages.Error_NoFile(path): return Blender.Window.WaitCursor(1) Blender.Window.EditMode(0) print(("loading '%s'" % path)) try: for o in Item.scene.objects: if o.name.startswith("YASim_"): Item.scene.objects.unlink(o) try: f = open(path) Global.data = f.readlines() finally: f.close() Global.path = path Global.matrix = YASIM_MATRIX matrix = extract_matrix(Global.data, "offsets") if matrix: Global.matrix *= matrix.invert() Global.yasim.parse(path) Blender.Registry.SetKey("FGYASimImportExport", { "path": path }, False) Global.data = None except Abort, e: print(("%s\nAborting ..." % (e.term or e.msg))) Blender.Draw.PupMenu("Error%t|" + e.msg) Blender.Window.RedrawAll() Blender.Window.WaitCursor(0) def gui_draw(): from Blender import BGL, Draw (width, height) = Blender.Window.GetAreaSize() BGL.glClearColor(0.4, 0.4, 0.45, 1) BGL.glClear(BGL.GL_COLOR_BUFFER_BIT) BGL.glColor3f(1, 1, 1) BGL.glRasterPos2f(5, 55) Draw.Text("FlightGear YASim Import: '%s'" % Global.path) Draw.PushButton("Reload", RELOAD_BUTTON, 5, 5, 80, 32, "reload YASim config file") Global.mirror_button = Draw.Toggle("Mirror", MIRROR_BUTTON, 100, 5, 50, 16, Global.mirror_button.val, \ "show symmetric surfaces on both sides (reloads config)") Draw.PushButton("Update Cursor", CURSOR_BUTTON, width - 650, 5, 100, 32, "update cursor display (in YASim coordinate system)") BGL.glRasterPos2f(width - 530 + Blender.Draw.GetStringWidth("Vector from last") - Blender.Draw.GetStringWidth("Current"), 24) Draw.Text("Current cursor pos: x = %+.3f y = %+.3f z = %+.3f" % tuple(Global.cursor)) c = Global.cursor - Global.last_cursor BGL.glRasterPos2f(width - 530, 7) Draw.Text("Vector from last cursor pos: x = %+.3f y = %+.3f z = %+.3f length = %.3f m" % (c[0], c[1], c[2], c.length)) def gui_event(ev, value): if ev == Blender.Draw.ESCKEY: Blender.Draw.Exit() def gui_button(n): if n == NO_EVENT: return elif n == RELOAD_BUTTON: load_yasim_config(Global.path) elif n == CURSOR_BUTTON: Global.last_cursor = Global.cursor Global.cursor = Vector(Blender.Window.GetCursorPos()) * Global.matrix.invert() d = Global.cursor - Global.last_cursor print(("cursor: x=\"%f\" y=\"%f\" z=\"%f\" dx=%f dy=%f dz=%f length=%f" \ % (Global.cursor[0], Global.cursor[1], Global.cursor[2], d[0], d[1], d[2], d.length))) elif n == MIRROR_BUTTON: load_yasim_config(Global.path) Blender.Draw.Redraw(1) def main(): log(6 * "\n") registry = Blender.Registry.GetKey("FGYASimImportExport", False) if registry and "path" in registry and Blender.sys.exists(Blender.sys.expandpath(registry["path"])): path = registry["path"] else: path = "" xml_handler = import_yasim() Global.yasim = make_parser() Global.yasim.setContentHandler(xml_handler) Global.yasim.setErrorHandler(xml_handler) if Blender.Window.GetScreenInfo(Blender.Window.Types.SCRIPT): Blender.Draw.Register(gui_draw, gui_event, gui_button) Blender.Window.FileSelector(load_yasim_config, "Import YASim Configuration File", path) main()
gpl-2.0
1,149,373,334,458,743,400
36.117291
139
0.627769
false
sahat/bokeh
examples/plotting/server/remote_image.py
1
1468
import numpy as np from bokeh.plotting import * from bokeh.objects import Range1d, ServerDataSource """ In order to run this example, you have to execute ./bokeh-server -D remotedata the remote data directory in the bokeh checkout has the sample data for this example In addition, you must install ArrayManagement from this branch (soon to be master) https://github.com/ContinuumIO/ArrayManagement """ N = 1000 x = np.linspace(0, 10, N) y = np.linspace(0, 10, N) xx, yy = np.meshgrid(x, y) d = np.sin(xx)*np.cos(yy) output_server("remote_image") source = ServerDataSource(data_url="/defaultuser/array.table/array", owner_username="defaultuser", data={'x': [0], 'y': [0], 'global_x_range' : [0, 10], 'global_y_range' : [0, 10], 'global_offset_x' : [0], 'global_offset_y' : [0], 'dw' : [10], 'dh' : [10], 'palette': ["Spectral-11"] } ) image( source=source, image="image", x="x", y="y", dw="dw", dh="dh", width=200, height=200, palette="palette", x_range=Range1d(start=0, end=10), y_range=Range1d(start=0, end=10), tools="pan,wheel_zoom,box_zoom,reset,previewsave" ) show()
bsd-3-clause
-9,210,181,709,916,321,000
27.784314
84
0.50545
false
kg-bot/SupyBot
plugins/Variables/test.py
1
3088
### # Copyright (c) 2011, Valentin Lorentz # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions, and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions, and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the author of this software nor the name of # contributors to this software may be used to endorse or promote products # derived from this software without specific prior written consent. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. ### from supybot.test import * class VariablesTestCase(ChannelPluginTestCase): plugins = ('Variables',) def testSetGet(self): self.assertError('get foo') self.assertNotError('set foo bar') self.assertResponse('get foo', 'bar') self.assertNotError('set foo baz') self.assertResponse('get foo', 'baz') def testChannel(self): self.assertError('get --domain channel foo') self.assertError('get --domain channel --name #test foo') self.assertError('get --domain channel --name #egg foo') self.assertError('get foo') self.assertNotError('set --domain channel foo bar') self.assertResponse('get --domain channel foo', 'bar') self.assertResponse('get --domain channel --name #test foo', 'bar') self.assertError('get --domain channel --name #egg foo') self.assertError('get foo') def testNetwork(self): self.assertError('get --domain network foo') self.assertError('get --domain network --name test foo') self.assertError('get --domain network --name foonet foo') self.assertError('get foo') self.assertNotError('set --domain network foo bar') self.assertResponse('get --domain network foo', 'bar') self.assertResponse('get --domain network --name test foo', 'bar') self.assertError('get --domain network --name foonet foo') self.assertError('get foo') # vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
gpl-3.0
8,128,141,146,548,841,000
45.787879
79
0.710168
false
utkarsh-goswami/erpnext
erpnext/accounts/doctype/pricing_rule/pricing_rule.py
1
13027
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # MIT License. See license.txt # For license information, please see license.txt from __future__ import unicode_literals import frappe import json import copy from frappe import throw, _ from frappe.utils import flt, cint from frappe.model.document import Document class MultiplePricingRuleConflict(frappe.ValidationError): pass class PricingRule(Document): def validate(self): self.validate_mandatory() self.validate_applicable_for_selling_or_buying() self.validate_min_max_qty() self.cleanup_fields_value() self.validate_price_or_discount() self.validate_max_discount() if self.price_or_discount != 'Price': self.currency = None if not self.margin_type: self.margin_rate_or_amount = 0.0 def validate_mandatory(self): for field in ["apply_on", "applicable_for"]: tocheck = frappe.scrub(self.get(field) or "") if tocheck and not self.get(tocheck): throw(_("{0} is required").format(self.meta.get_label(tocheck)), frappe.MandatoryError) def validate_applicable_for_selling_or_buying(self): if not self.selling and not self.buying: throw(_("Atleast one of the Selling or Buying must be selected")) if not self.selling and self.applicable_for in ["Customer", "Customer Group", "Territory", "Sales Partner", "Campaign"]: throw(_("Selling must be checked, if Applicable For is selected as {0}" .format(self.applicable_for))) if not self.buying and self.applicable_for in ["Supplier", "Supplier Type"]: throw(_("Buying must be checked, if Applicable For is selected as {0}" .format(self.applicable_for))) def validate_min_max_qty(self): if self.min_qty and self.max_qty and flt(self.min_qty) > flt(self.max_qty): throw(_("Min Qty can not be greater than Max Qty")) def cleanup_fields_value(self): for logic_field in ["apply_on", "applicable_for", "price_or_discount"]: fieldname = frappe.scrub(self.get(logic_field) or "") # reset all values except for the logic field options = (self.meta.get_options(logic_field) or "").split("\n") for f in options: if not f: continue f = frappe.scrub(f) if f!=fieldname: self.set(f, None) def validate_price_or_discount(self): for field in ["Price"]: if flt(self.get(frappe.scrub(field))) < 0: throw(_("{0} can not be negative").format(field)) def validate_max_discount(self): if self.price_or_discount == "Discount Percentage" and self.item_code: max_discount = frappe.db.get_value("Item", self.item_code, "max_discount") if max_discount and flt(self.discount_percentage) > flt(max_discount): throw(_("Max discount allowed for item: {0} is {1}%").format(self.item_code, max_discount)) #-------------------------------------------------------------------------------- @frappe.whitelist() def apply_pricing_rule(args): """ args = { "items": [{"doctype": "", "name": "", "item_code": "", "brand": "", "item_group": ""}, ...], "customer": "something", "customer_group": "something", "territory": "something", "supplier": "something", "supplier_type": "something", "currency": "something", "conversion_rate": "something", "price_list": "something", "plc_conversion_rate": "something", "company": "something", "transaction_date": "something", "campaign": "something", "sales_partner": "something", "ignore_pricing_rule": "something" } """ if isinstance(args, basestring): args = json.loads(args) args = frappe._dict(args) if not args.transaction_type: set_transaction_type(args) # list of dictionaries out = [] if args.get("doctype") == "Material Request": return out item_list = args.get("items") args.pop("items") set_serial_nos_based_on_fifo = frappe.db.get_single_value("Stock Settings", "automatically_set_serial_nos_based_on_fifo") for item in item_list: args_copy = copy.deepcopy(args) args_copy.update(item) out.append(get_pricing_rule_for_item(args_copy)) if set_serial_nos_based_on_fifo and not args.get('is_return'): out.append(get_serial_no_for_item(args_copy)) return out def get_serial_no_for_item(args): from erpnext.stock.get_item_details import get_serial_no item_details = frappe._dict({ "doctype": args.doctype, "name": args.name, "serial_no": args.serial_no }) if args.get("parenttype") in ("Sales Invoice", "Delivery Note") and args.stock_qty > 0: item_details.serial_no = get_serial_no(args) return item_details def get_pricing_rule_for_item(args): if args.get("parenttype") == "Material Request": return {} item_details = frappe._dict({ "doctype": args.doctype, "name": args.name, "pricing_rule": None }) if args.ignore_pricing_rule or not args.item_code: if frappe.db.exists(args.doctype, args.name) and args.get("pricing_rule"): item_details = remove_pricing_rule_for_item(args.get("pricing_rule"), item_details) return item_details if not (args.item_group and args.brand): try: args.item_group, args.brand = frappe.db.get_value("Item", args.item_code, ["item_group", "brand"]) except TypeError: # invalid item_code return item_details if not args.item_group: frappe.throw(_("Item Group not mentioned in item master for item {0}").format(args.item_code)) if args.transaction_type=="selling": if args.customer and not (args.customer_group and args.territory): customer = frappe.db.get_value("Customer", args.customer, ["customer_group", "territory"]) if customer: args.customer_group, args.territory = customer args.supplier = args.supplier_type = None elif args.supplier and not args.supplier_type: args.supplier_type = frappe.db.get_value("Supplier", args.supplier, "supplier_type") args.customer = args.customer_group = args.territory = None pricing_rules = get_pricing_rules(args) pricing_rule = filter_pricing_rules(args, pricing_rules) if pricing_rule: item_details.pricing_rule = pricing_rule.name item_details.pricing_rule_for = pricing_rule.price_or_discount item_details.margin_type = pricing_rule.margin_type item_details.margin_rate_or_amount = pricing_rule.margin_rate_or_amount if pricing_rule.price_or_discount == "Price": if pricing_rule.get('currency') and \ pricing_rule.currency == args.currency: price_list_rate = pricing_rule.price * (args.conversion_factor or 1.0) else: price_list_rate = (pricing_rule.price/flt(args.conversion_rate)) * args.conversion_factor or 1.0 \ if args.conversion_rate else 0.0 item_details.update({ "price_list_rate": price_list_rate, "discount_percentage": 0.0 }) else: item_details.discount_percentage = pricing_rule.discount_percentage elif args.get('pricing_rule'): item_details = remove_pricing_rule_for_item(args.get("pricing_rule"), item_details) return item_details def remove_pricing_rule_for_item(pricing_rule, item_details): pricing_rule = frappe.db.get_value('Pricing Rule', pricing_rule, ['price_or_discount', 'margin_type'], as_dict=1) if pricing_rule and pricing_rule.price_or_discount == 'Discount Percentage': item_details.discount_percentage = 0.0 if pricing_rule and pricing_rule.margin_type in ['Percentage', 'Amount']: item_details.margin_rate_or_amount = 0.0 item_details.margin_type = None if item_details.pricing_rule: item_details.pricing_rule = None return item_details @frappe.whitelist() def remove_pricing_rules(item_list): if isinstance(item_list, basestring): item_list = json.loads(item_list) out = [] for item in item_list: item = frappe._dict(item) out.append(remove_pricing_rule_for_item(item.get("pricing_rule"), item)) return out def get_pricing_rules(args): def _get_tree_conditions(parenttype, allow_blank=True): field = frappe.scrub(parenttype) condition = "" if args.get(field): try: lft, rgt = frappe.db.get_value(parenttype, args[field], ["lft", "rgt"]) except TypeError: frappe.throw(_("Invalid {0}").format(args[field])) parent_groups = frappe.db.sql_list("""select name from `tab%s` where lft<=%s and rgt>=%s""" % (parenttype, '%s', '%s'), (lft, rgt)) if parent_groups: if allow_blank: parent_groups.append('') condition = " ifnull("+field+", '') in ('" + \ "', '".join([frappe.db.escape(d) for d in parent_groups])+"')" return condition conditions = item_variant_condition = "" values = {"item_code": args.get("item_code"), "brand": args.get("brand")} for field in ["company", "customer", "supplier", "supplier_type", "campaign", "sales_partner", "currency"]: if args.get(field): conditions += " and ifnull("+field+", '') in (%("+field+")s, '')" values[field] = args.get(field) else: conditions += " and ifnull("+field+", '') = ''" for parenttype in ["Customer Group", "Territory"]: group_condition = _get_tree_conditions(parenttype) if group_condition: conditions += " and " + group_condition if not args.price_list: args.price_list = None conditions += " and ifnull(for_price_list, '') in (%(price_list)s, '')" values["price_list"] = args.get("price_list") if args.get("transaction_date"): conditions += """ and %(transaction_date)s between ifnull(valid_from, '2000-01-01') and ifnull(valid_upto, '2500-12-31')""" values['transaction_date'] = args.get('transaction_date') item_group_condition = _get_tree_conditions("Item Group", False) if item_group_condition: item_group_condition = " or " + item_group_condition # load variant of if not defined if "variant_of" not in args: args.variant_of = frappe.db.get_value("Item", args.item_code, "variant_of") if args.variant_of: item_variant_condition = ' or item_code=%(variant_of)s ' values['variant_of'] = args.variant_of return frappe.db.sql("""select * from `tabPricing Rule` where (item_code=%(item_code)s {item_variant_condition} {item_group_condition} or brand=%(brand)s) and docstatus < 2 and disable = 0 and {transaction_type} = 1 {conditions} order by priority desc, name desc""".format( item_group_condition = item_group_condition, item_variant_condition = item_variant_condition, transaction_type = args.transaction_type, conditions = conditions), values, as_dict=1) def filter_pricing_rules(args, pricing_rules): # filter for qty stock_qty = args.get('qty') * args.get('conversion_factor', 1) if pricing_rules: pricing_rules = filter(lambda x: (flt(stock_qty)>=flt(x.min_qty) and (flt(stock_qty)<=x.max_qty if x.max_qty else True)), pricing_rules) # add variant_of property in pricing rule for p in pricing_rules: if p.item_code and args.variant_of: p.variant_of = args.variant_of else: p.variant_of = None # find pricing rule with highest priority if pricing_rules: max_priority = max([cint(p.priority) for p in pricing_rules]) if max_priority: pricing_rules = filter(lambda x: cint(x.priority)==max_priority, pricing_rules) # apply internal priority all_fields = ["item_code", "item_group", "brand", "customer", "customer_group", "territory", "supplier", "supplier_type", "campaign", "sales_partner", "variant_of"] if len(pricing_rules) > 1: for field_set in [["item_code", "variant_of", "item_group", "brand"], ["customer", "customer_group", "territory"], ["supplier", "supplier_type"]]: remaining_fields = list(set(all_fields) - set(field_set)) if if_all_rules_same(pricing_rules, remaining_fields): pricing_rules = apply_internal_priority(pricing_rules, field_set, args) break if len(pricing_rules) > 1: price_or_discount = list(set([d.price_or_discount for d in pricing_rules])) if len(price_or_discount) == 1 and price_or_discount[0] == "Discount Percentage": pricing_rules = filter(lambda x: x.for_price_list==args.price_list, pricing_rules) \ or pricing_rules if len(pricing_rules) > 1 and not args.for_shopping_cart: frappe.throw(_("Multiple Price Rules exists with same criteria, please resolve conflict by assigning priority. Price Rules: {0}") .format("\n".join([d.name for d in pricing_rules])), MultiplePricingRuleConflict) elif pricing_rules: return pricing_rules[0] def if_all_rules_same(pricing_rules, fields): all_rules_same = True val = [pricing_rules[0][k] for k in fields] for p in pricing_rules[1:]: if val != [p[k] for k in fields]: all_rules_same = False break return all_rules_same def apply_internal_priority(pricing_rules, field_set, args): filtered_rules = [] for field in field_set: if args.get(field): filtered_rules = filter(lambda x: x[field]==args[field], pricing_rules) if filtered_rules: break return filtered_rules or pricing_rules def set_transaction_type(args): if args.doctype in ("Opportunity", "Quotation", "Sales Order", "Delivery Note", "Sales Invoice"): args.transaction_type = "selling" elif args.doctype in ("Material Request", "Supplier Quotation", "Purchase Order", "Purchase Receipt", "Purchase Invoice"): args.transaction_type = "buying" elif args.customer: args.transaction_type = "selling" else: args.transaction_type = "buying"
gpl-3.0
7,289,619,787,295,705,000
34.693151
131
0.68834
false
openstack/designate
designate/tests/test_hookpoints.py
1
4338
# Copyright 2015 Rackspace Hosting. # # Author: Eric Larson <[email protected]> # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest.mock import Mock from unittest.mock import patch from oslo_config import cfg from stevedore.hook import HookManager from stevedore.extension import Extension from designate.hookpoints import hook_point from designate.hookpoints import BaseHook from designate.tests import TestCase class AddHook(BaseHook): OPTS = [ cfg.Opt('bar'), ] @property def bar(self): return cfg.CONF[self.group].bar def hook(self, *args, **kw): return self.hook_target(*args, **kw) + 1 def get_hook_manager(*hooks): hooks = hooks or [AddHook] group = 'hook_point:foo' ext = [ Extension('designate_hook', 'foo', hook, hook(group)) for hook in hooks ] return HookManager.make_test_instance(ext, 'designate_hook') def inc(num): return num + 1 class TestHookpoints(TestCase): def setUp(self): TestCase.setUp(self) group = 'hook_point:foo' self.CONF.register_group(cfg.OptGroup(group)) self.CONF.register_opts(BaseHook.OPTS, group=group) def test_no_hookpoint_is_noop(self): def doit(self, name): return 'done: %s' % name self.assertEqual(doit, hook_point('foo')(doit)) def test_hook_is_decorator(self): hp = hook_point('foo') hp.hook_manager = Mock(return_value=get_hook_manager()) assert hp(inc)(1) == 3 def test_apply_N_hooks(self): hp = hook_point('foo') hp.hook_manager = Mock(return_value=get_hook_manager(AddHook, AddHook)) assert hp(inc)(1) == 4 def test_hook_init(self): hp = hook_point('foo') # Make sure we set up our object when the hook point is # applied to a function / method. hp.find_name = Mock(return_value='foo.bar.baz') hp.hook_manager = Mock(return_value=get_hook_manager()) hp.find_config = Mock(return_value={'enabled': True}) hp.update_config_opts = Mock() hp(inc) self.assertEqual(hp.name, 'foo.bar.baz') self.assertEqual(hp.group, 'hook_point:foo.bar.baz') hp.update_config_opts.assert_called_with(hp.group, hp.hooks) class TestHookpointsConfigOpts(TestCase): """Make sure hooks add the necessary config opts. """ def test_hook_adds_config_opts(self): hp = hook_point('foo') hp.hook_manager = Mock(return_value=get_hook_manager()) hp(inc) assert hp.group in self.CONF.keys() class TestHookpointsEnabling(TestCase): def setUp(self): TestCase.setUp(self) # NOTE: The options need to be added here via the test classes # CONF in order to fall through group = 'hook_point:foo' self.CONF.register_group(cfg.OptGroup(group)) self.CONF.register_opts(BaseHook.OPTS, group=group) @patch.object(hook_point, 'hook_manager', Mock(return_value=get_hook_manager())) def test_hook_disabled(self): hp = hook_point('foo') result_func = hp(inc) # We should now have a config option we can set to disabled self.config(disabled=True, group='hook_point:foo') # The result is 2 so no extra add hook was applied self.assertEqual(result_func(1), 2) @patch.object(hook_point, 'hook_manager', Mock(return_value=get_hook_manager())) def test_hook_enabled_when_config_key_exists(self): hp = hook_point('foo') hp(inc) # Add our config self.config(bar='from config', group='hook_point:foo') # reapply our hook result_func = hp(inc) # The result is 3 so the extra add hook was applied self.assertEqual(result_func(1), 3)
apache-2.0
806,856,279,180,164,100
28.917241
79
0.644076
false
saleemjaveds/https-github.com-openstack-nova
nova/virt/hyperv/driver.py
1
9980
# Copyright (c) 2010 Cloud.com, Inc # Copyright (c) 2012 Cloudbase Solutions Srl # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ A Hyper-V Nova Compute driver. """ from nova.i18n import _ from nova.openstack.common import log as logging from nova.virt import driver from nova.virt.hyperv import hostops from nova.virt.hyperv import livemigrationops from nova.virt.hyperv import migrationops from nova.virt.hyperv import rdpconsoleops from nova.virt.hyperv import snapshotops from nova.virt.hyperv import vmops from nova.virt.hyperv import volumeops LOG = logging.getLogger(__name__) class HyperVDriver(driver.ComputeDriver): def __init__(self, virtapi): super(HyperVDriver, self).__init__(virtapi) self._hostops = hostops.HostOps() self._volumeops = volumeops.VolumeOps() self._vmops = vmops.VMOps() self._snapshotops = snapshotops.SnapshotOps() self._livemigrationops = livemigrationops.LiveMigrationOps() self._migrationops = migrationops.MigrationOps() self._rdpconsoleops = rdpconsoleops.RDPConsoleOps() def init_host(self, host): self._vmops.restart_vm_log_writers() def list_instances(self): return self._vmops.list_instances() def spawn(self, context, instance, image_meta, injected_files, admin_password, network_info=None, block_device_info=None): self._vmops.spawn(context, instance, image_meta, injected_files, admin_password, network_info, block_device_info) def reboot(self, context, instance, network_info, reboot_type, block_device_info=None, bad_volumes_callback=None): self._vmops.reboot(instance, network_info, reboot_type) def destroy(self, context, instance, network_info, block_device_info=None, destroy_disks=True, migrate_data=None): self._vmops.destroy(instance, network_info, block_device_info, destroy_disks) def cleanup(self, context, instance, network_info, block_device_info=None, destroy_disks=True, migrate_data=None, destroy_vifs=True): """Cleanup after instance being destroyed by Hypervisor.""" pass def get_info(self, instance): return self._vmops.get_info(instance) def attach_volume(self, context, connection_info, instance, mountpoint, disk_bus=None, device_type=None, encryption=None): return self._volumeops.attach_volume(connection_info, instance['name']) def detach_volume(self, connection_info, instance, mountpoint, encryption=None): return self._volumeops.detach_volume(connection_info, instance['name']) def get_volume_connector(self, instance): return self._volumeops.get_volume_connector(instance) def get_available_resource(self, nodename): return self._hostops.get_available_resource() def get_host_stats(self, refresh=False): return self._hostops.get_host_stats(refresh) def host_power_action(self, host, action): return self._hostops.host_power_action(host, action) def snapshot(self, context, instance, image_id, update_task_state): self._snapshotops.snapshot(context, instance, image_id, update_task_state) def pause(self, instance): self._vmops.pause(instance) def unpause(self, instance): self._vmops.unpause(instance) def suspend(self, instance): self._vmops.suspend(instance) def resume(self, context, instance, network_info, block_device_info=None): self._vmops.resume(instance) def power_off(self, instance, timeout=0, retry_interval=0): # TODO(PhilDay): Add support for timeout (clean shutdown) self._vmops.power_off(instance) def power_on(self, context, instance, network_info, block_device_info=None): self._vmops.power_on(instance) def live_migration(self, context, instance, dest, post_method, recover_method, block_migration=False, migrate_data=None): self._livemigrationops.live_migration(context, instance, dest, post_method, recover_method, block_migration, migrate_data) def rollback_live_migration_at_destination(self, context, instance, network_info, block_device_info, destroy_disks=True, migrate_data=None): self.destroy(context, instance, network_info, block_device_info) def pre_live_migration(self, context, instance, block_device_info, network_info, disk_info, migrate_data=None): self._livemigrationops.pre_live_migration(context, instance, block_device_info, network_info) def post_live_migration_at_destination(self, context, instance, network_info, block_migration=False, block_device_info=None): self._livemigrationops.post_live_migration_at_destination( context, instance, network_info, block_migration) def check_can_live_migrate_destination(self, context, instance, src_compute_info, dst_compute_info, block_migration=False, disk_over_commit=False): return self._livemigrationops.check_can_live_migrate_destination( context, instance, src_compute_info, dst_compute_info, block_migration, disk_over_commit) def check_can_live_migrate_destination_cleanup(self, context, dest_check_data): self._livemigrationops.check_can_live_migrate_destination_cleanup( context, dest_check_data) def check_can_live_migrate_source(self, context, instance, dest_check_data): return self._livemigrationops.check_can_live_migrate_source( context, instance, dest_check_data) def get_instance_disk_info(self, instance_name, block_device_info=None): pass def plug_vifs(self, instance, network_info): """Plug VIFs into networks.""" msg = _("VIF plugging is not supported by the Hyper-V driver.") raise NotImplementedError(msg) def unplug_vifs(self, instance, network_info): """Unplug VIFs from networks.""" msg = _("VIF unplugging is not supported by the Hyper-V driver.") raise NotImplementedError(msg) def ensure_filtering_rules_for_instance(self, instance, network_info): LOG.debug("ensure_filtering_rules_for_instance called", instance=instance) def unfilter_instance(self, instance, network_info): LOG.debug("unfilter_instance called", instance=instance) def migrate_disk_and_power_off(self, context, instance, dest, flavor, network_info, block_device_info=None, timeout=0, retry_interval=0): # TODO(PhilDay): Add support for timeout (clean shutdown) return self._migrationops.migrate_disk_and_power_off(context, instance, dest, flavor, network_info, block_device_info) def confirm_migration(self, migration, instance, network_info): self._migrationops.confirm_migration(migration, instance, network_info) def finish_revert_migration(self, context, instance, network_info, block_device_info=None, power_on=True): self._migrationops.finish_revert_migration(context, instance, network_info, block_device_info, power_on) def finish_migration(self, context, migration, instance, disk_info, network_info, image_meta, resize_instance, block_device_info=None, power_on=True): self._migrationops.finish_migration(context, migration, instance, disk_info, network_info, image_meta, resize_instance, block_device_info, power_on) def get_host_ip_addr(self): return self._hostops.get_host_ip_addr() def get_host_uptime(self, host): return self._hostops.get_host_uptime() def get_rdp_console(self, context, instance): return self._rdpconsoleops.get_rdp_console(instance) def get_console_output(self, context, instance): return self._vmops.get_console_output(instance)
apache-2.0
-6,918,559,753,360,435,000
43.159292
79
0.584469
false
CARPEM/GalaxyDocker
config/configShinyApp/checkContainerTime.py
1
1124
import sys import datetime from subprocess import Popen, PIPE import json #container ID extraction container_id = sys.argv[1] print("Check :"+container_id) cmd=['docker', 'inspect', "--format='{{json .State}}'", container_id] print(cmd) process = Popen(cmd, stdout=PIPE, stderr=PIPE) stdout, stderr = process.communicate() status=json.loads(stdout) print(status['StartedAt']) print(stdout) shinyAppStartDate=datetime.datetime.strptime(status['StartedAt'].split('.')[0], "%Y-%m-%dT%H:%M:%S") today=datetime.datetime.now() print(shinyAppStartDate) deltaTime = (today-shinyAppStartDate).total_seconds() print(deltaTime) cmdstop=['docker', 'stop', container_id] cmdrm=['docker', 'rm', container_id] process = Popen(cmd, stdout=PIPE, stderr=PIPE) stdout, stderr = process.communicate() #everything is compare in seconds if(deltaTime>300): print("delete containers") print("docker stop "+container_id) process = Popen(cmdstop, stdout=PIPE, stderr=PIPE) stdout, stderr = process.communicate() print("docker rm "+container_id) process = Popen(cmdrm, stdout=PIPE, stderr=PIPE) stdout, stderr = process.communicate()
mit
8,542,311,203,721,124,000
25.761905
100
0.736655
false
mnick/qutebrowser
qutebrowser/browser/commands.py
1
39927
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2014-2015 Florian Bruhin (The Compiler) <[email protected]> # # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. """Command dispatcher for TabbedBrowser.""" import re import os import subprocess import posixpath import functools from PyQt5.QtWidgets import QApplication, QTabBar from PyQt5.QtCore import Qt, QUrl from PyQt5.QtGui import QClipboard from PyQt5.QtPrintSupport import QPrintDialog, QPrintPreviewDialog from PyQt5.QtWebKitWidgets import QWebPage, QWebInspector import pygments import pygments.lexers import pygments.formatters from qutebrowser.commands import userscripts, cmdexc, cmdutils from qutebrowser.config import config, configexc from qutebrowser.browser import webelem from qutebrowser.utils import (message, usertypes, log, qtutils, urlutils, objreg, utils) from qutebrowser.misc import editor class CommandDispatcher: """Command dispatcher for TabbedBrowser. Contains all commands which are related to the current tab. We can't simply add these commands to BrowserTab directly and use currentWidget() for TabbedBrowser.cmd because at the time cmdutils.register() decorators are run, currentWidget() will return None. Attributes: _editor: The ExternalEditor object. _win_id: The window ID the CommandDispatcher is associated with. """ def __init__(self, win_id): self._editor = None self._win_id = win_id def __repr__(self): return utils.get_repr(self) def _tabbed_browser(self, window=False): """Convienence method to get the right tabbed-browser. Args: window: If True, open a new window. """ if window: main_window = objreg.get('main-window', scope='window', window=self._win_id) win_id = main_window.spawn() else: win_id = self._win_id return objreg.get('tabbed-browser', scope='window', window=win_id) def _count(self): """Convenience method to get the widget count.""" return self._tabbed_browser().count() def _set_current_index(self, idx): """Convenience method to set the current widget index.""" return self._tabbed_browser().setCurrentIndex(idx) def _current_index(self): """Convenience method to get the current widget index.""" return self._tabbed_browser().currentIndex() def _current_url(self): """Convenience method to get the current url.""" try: return self._tabbed_browser().current_url() except qtutils.QtValueError as e: msg = "Current URL is invalid" if e.reason: msg += " ({})".format(e.reason) msg += "!" raise cmdexc.CommandError(msg) def _current_widget(self): """Get the currently active widget from a command.""" widget = self._tabbed_browser().currentWidget() if widget is None: raise cmdexc.CommandError("No WebView available yet!") return widget def _open(self, url, tab, background, window): """Helper function to open a page. Args: url: The URL to open as QUrl. tab: Whether to open in a new tab. background: Whether to open in the background. window: Whether to open in a new window """ urlutils.raise_cmdexc_if_invalid(url) tabbed_browser = self._tabbed_browser() cmdutils.check_exclusive((tab, background, window), 'tbw') if window: tabbed_browser = self._tabbed_browser(window=True) tabbed_browser.tabopen(url) elif tab: tabbed_browser.tabopen(url, background=False, explicit=True) elif background: tabbed_browser.tabopen(url, background=True, explicit=True) else: widget = self._current_widget() widget.openurl(url) def _cntwidget(self, count=None): """Return a widget based on a count/idx. Args: count: The tab index, or None. Return: The current widget if count is None. The widget with the given tab ID if count is given. None if no widget was found. """ tabbed_browser = self._tabbed_browser() if count is None: return tabbed_browser.currentWidget() elif 1 <= count <= self._count(): cmdutils.check_overflow(count + 1, 'int') return tabbed_browser.widget(count - 1) else: return None def _scroll_percent(self, perc=None, count: {'special': 'count'}=None, orientation=None): """Inner logic for scroll_percent_(x|y). Args: perc: How many percent to scroll, or None count: How many percent to scroll, or None orientation: Qt.Horizontal or Qt.Vertical """ if perc is None and count is None: perc = 100 elif perc is None: perc = count perc = qtutils.check_overflow(perc, 'int', fatal=False) frame = self._current_widget().page().currentFrame() m = frame.scrollBarMaximum(orientation) if m == 0: return frame.setScrollBarValue(orientation, int(m * perc / 100)) def _tab_move_absolute(self, idx): """Get an index for moving a tab absolutely. Args: idx: The index to get, as passed as count. """ if idx is None: return 0 elif idx == 0: return self._count() - 1 else: return idx - 1 def _tab_move_relative(self, direction, delta): """Get an index for moving a tab relatively. Args: direction: + or - for relative moving, None for absolute. delta: Delta to the current tab. """ if delta is None: # We don't set delta to 1 in the function arguments because this # gets called from tab_move which has delta set to None by default. delta = 1 if direction == '-': return self._current_index() - delta elif direction == '+': return self._current_index() + delta def _tab_focus_last(self): """Select the tab which was last focused.""" try: tab = objreg.get('last-focused-tab', scope='window', window=self._win_id) except KeyError: raise cmdexc.CommandError("No last focused tab!") idx = self._tabbed_browser().indexOf(tab) if idx == -1: raise cmdexc.CommandError("Last focused tab vanished!") self._set_current_index(idx) def _editor_cleanup(self, oshandle, filename): """Clean up temporary file when the editor was closed.""" try: os.close(oshandle) os.remove(filename) except OSError: raise cmdexc.CommandError("Failed to delete tempfile...") def _get_selection_override(self, left, right, opposite): """Helper function for tab_close to get the tab to select. Args: left: Force selecting the tab to the left of the current tab. right: Force selecting the tab to the right of the current tab. opposite: Force selecting the tab in the oppsite direction of what's configured in 'tabs->select-on-remove'. Return: QTabBar.SelectLeftTab, QTabBar.SelectRightTab, or None if no change should be made. """ cmdutils.check_exclusive((left, right, opposite), 'lro') if left: return QTabBar.SelectLeftTab elif right: return QTabBar.SelectRightTab elif opposite: conf_selection = config.get('tabs', 'select-on-remove') if conf_selection == QTabBar.SelectLeftTab: return QTabBar.SelectRightTab elif conf_selection == QTabBar.SelectRightTab: return QTabBar.SelectLeftTab elif conf_selection == QTabBar.SelectPreviousTab: raise cmdexc.CommandError( "-o is not supported with 'tabs->select-on-remove' set to " "'previous'!") return None @cmdutils.register(instance='command-dispatcher', scope='window') def tab_close(self, left=False, right=False, opposite=False, count: {'special': 'count'}=None): """Close the current/[count]th tab. Args: left: Force selecting the tab to the left of the current tab. right: Force selecting the tab to the right of the current tab. opposite: Force selecting the tab in the oppsite direction of what's configured in 'tabs->select-on-remove'. count: The tab index to close, or None """ tab = self._cntwidget(count) if tab is None: return tabbed_browser = self._tabbed_browser() tabbar = tabbed_browser.tabBar() selection_override = self._get_selection_override(left, right, opposite) if selection_override is None: tabbed_browser.close_tab(tab) else: old_selection_behavior = tabbar.selectionBehaviorOnRemove() tabbar.setSelectionBehaviorOnRemove(selection_override) tabbed_browser.close_tab(tab) tabbar.setSelectionBehaviorOnRemove(old_selection_behavior) @cmdutils.register(instance='command-dispatcher', name='open', maxsplit=0, scope='window', completion=[usertypes.Completion.quickmark_by_url]) def openurl(self, url, bg=False, tab=False, window=False, count: {'special': 'count'}=None): """Open a URL in the current/[count]th tab. Args: url: The URL to open. bg: Open in a new background tab. tab: Open in a new tab. window: Open in a new window. count: The tab index to open the URL in, or None. """ try: url = urlutils.fuzzy_url(url) except urlutils.FuzzyUrlError as e: raise cmdexc.CommandError(e) if tab or bg or window: self._open(url, tab, bg, window) else: curtab = self._cntwidget(count) if curtab is None: if count is None: # We want to open a URL in the current tab, but none exists # yet. self._tabbed_browser().tabopen(url) else: # Explicit count with a tab that doesn't exist. return else: curtab.openurl(url) @cmdutils.register(instance='command-dispatcher', name='reload', scope='window') def reloadpage(self, force=False, count: {'special': 'count'}=None): """Reload the current/[count]th tab. Args: count: The tab index to reload, or None. force: Bypass the page cache. """ tab = self._cntwidget(count) if tab is not None: if force: tab.page().triggerAction(QWebPage.ReloadAndBypassCache) else: tab.reload() @cmdutils.register(instance='command-dispatcher', scope='window') def stop(self, count: {'special': 'count'}=None): """Stop loading in the current/[count]th tab. Args: count: The tab index to stop, or None. """ tab = self._cntwidget(count) if tab is not None: tab.stop() @cmdutils.register(instance='command-dispatcher', name='print', scope='window') def printpage(self, preview=False, count: {'special': 'count'}=None): """Print the current/[count]th tab. Args: preview: Show preview instead of printing. count: The tab index to print, or None. """ if not qtutils.check_print_compat(): # WORKAROUND (remove this when we bump the requirements to 5.3.0) raise cmdexc.CommandError( "Printing on Qt < 5.3.0 on Windows is broken, please upgrade!") tab = self._cntwidget(count) if tab is not None: if preview: diag = QPrintPreviewDialog() diag.setAttribute(Qt.WA_DeleteOnClose) diag.setWindowFlags(diag.windowFlags() | Qt.WindowMaximizeButtonHint | Qt.WindowMinimizeButtonHint) diag.paintRequested.connect(tab.print) diag.exec_() else: diag = QPrintDialog() diag.setAttribute(Qt.WA_DeleteOnClose) diag.open(lambda: tab.print(diag.printer())) @cmdutils.register(instance='command-dispatcher', scope='window') def tab_clone(self, bg=False, window=False): """Duplicate the current tab. Args: bg: Open in a background tab. window: Open in a new window. Return: The new QWebView. """ if bg and window: raise cmdexc.CommandError("Only one of -b/-w can be given!") curtab = self._current_widget() tabbed_browser = self._tabbed_browser(window) newtab = tabbed_browser.tabopen(background=bg, explicit=True) history = qtutils.serialize(curtab.history()) qtutils.deserialize(history, newtab.history()) return newtab def _back_forward(self, tab, bg, window, count, forward): """Helper function for :back/:forward.""" if (not forward and not self._current_widget().page().history().canGoBack()): raise cmdexc.CommandError("At beginning of history.") if (forward and not self._current_widget().page().history().canGoForward()): raise cmdexc.CommandError("At end of history.") if tab or bg or window: widget = self.tab_clone(bg, window) else: widget = self._current_widget() for _ in range(count): if forward: widget.forward() else: widget.back() @cmdutils.register(instance='command-dispatcher', scope='window') def back(self, tab=False, bg=False, window=False, count: {'special': 'count'}=1): """Go back in the history of the current tab. Args: tab: Go back in a new tab. bg: Go back in a background tab. window: Go back in a new window. count: How many pages to go back. """ self._back_forward(tab, bg, window, count, forward=False) @cmdutils.register(instance='command-dispatcher', scope='window') def forward(self, tab=False, bg=False, window=False, count: {'special': 'count'}=1): """Go forward in the history of the current tab. Args: tab: Go forward in a new tab. bg: Go forward in a background tab. window: Go forward in a new window. count: How many pages to go forward. """ self._back_forward(tab, bg, window, count, forward=True) def _navigate_incdec(self, url, incdec, tab, background, window): """Helper method for :navigate when `where' is increment/decrement. Args: url: The current url. incdec: Either 'increment' or 'decrement'. tab: Whether to open the link in a new tab. background: Open the link in a new background tab. window: Open the link in a new window. """ encoded = bytes(url.toEncoded()).decode('ascii') # Get the last number in a string match = re.match(r'(.*\D|^)(\d+)(.*)', encoded) if not match: raise cmdexc.CommandError("No number found in URL!") pre, number, post = match.groups() if not number: raise cmdexc.CommandError("No number found in URL!") try: val = int(number) except ValueError: raise cmdexc.CommandError("Could not parse number '{}'.".format( number)) if incdec == 'decrement': if val <= 0: raise cmdexc.CommandError("Can't decrement {}!".format(val)) val -= 1 elif incdec == 'increment': val += 1 else: raise ValueError("Invalid value {} for indec!".format(incdec)) urlstr = ''.join([pre, str(val), post]).encode('ascii') new_url = QUrl.fromEncoded(urlstr) self._open(new_url, tab, background, window) def _navigate_up(self, url, tab, background, window): """Helper method for :navigate when `where' is up. Args: url: The current url. tab: Whether to open the link in a new tab. background: Open the link in a new background tab. window: Open the link in a new window. """ path = url.path() if not path or path == '/': raise cmdexc.CommandError("Can't go up!") new_path = posixpath.join(path, posixpath.pardir) url.setPath(new_path) self._open(url, tab, background, window) @cmdutils.register(instance='command-dispatcher', scope='window') def navigate(self, where: {'type': ('prev', 'next', 'up', 'increment', 'decrement')}, tab=False, bg=False, window=False): """Open typical prev/next links or navigate using the URL path. This tries to automatically click on typical _Previous Page_ or _Next Page_ links using some heuristics. Alternatively it can navigate by changing the current URL. Args: where: What to open. - `prev`: Open a _previous_ link. - `next`: Open a _next_ link. - `up`: Go up a level in the current URL. - `increment`: Increment the last number in the URL. - `decrement`: Decrement the last number in the URL. tab: Open in a new tab. bg: Open in a background tab. window: Open in a new window. """ cmdutils.check_exclusive((tab, bg, window), 'tbw') widget = self._current_widget() frame = widget.page().currentFrame() url = self._current_url() if frame is None: raise cmdexc.CommandError("No frame focused!") hintmanager = objreg.get('hintmanager', scope='tab') if where == 'prev': hintmanager.follow_prevnext(frame, url, prev=True, tab=tab, background=bg, window=window) elif where == 'next': hintmanager.follow_prevnext(frame, url, prev=False, tab=tab, background=bg, window=window) elif where == 'up': self._navigate_up(url, tab, bg, window) elif where in ('decrement', 'increment'): self._navigate_incdec(url, where, tab, bg, window) else: raise ValueError("Got called with invalid value {} for " "`where'.".format(where)) @cmdutils.register(instance='command-dispatcher', hide=True, scope='window') def scroll(self, dx: {'type': float}, dy: {'type': float}, count: {'special': 'count'}=1): """Scroll the current tab by 'count * dx/dy'. Args: dx: How much to scroll in x-direction. dy: How much to scroll in x-direction. count: multiplier """ dx *= count dy *= count cmdutils.check_overflow(dx, 'int') cmdutils.check_overflow(dy, 'int') self._current_widget().page().currentFrame().scroll(dx, dy) @cmdutils.register(instance='command-dispatcher', hide=True, scope='window') def scroll_perc(self, perc: {'type': float}=None, horizontal: {'flag': 'x'}=False, count: {'special': 'count'}=None): """Scroll to a specific percentage of the page. The percentage can be given either as argument or as count. If no percentage is given, the page is scrolled to the end. Args: perc: Percentage to scroll. horizontal: Scroll horizontally instead of vertically. count: Percentage to scroll. """ self._scroll_percent(perc, count, Qt.Horizontal if horizontal else Qt.Vertical) @cmdutils.register(instance='command-dispatcher', hide=True, scope='window') def scroll_page(self, x: {'type': float}, y: {'type': float}, count: {'special': 'count'}=1): """Scroll the frame page-wise. Args: x: How many pages to scroll to the right. y: How many pages to scroll down. count: multiplier """ frame = self._current_widget().page().currentFrame() size = frame.geometry() dx = count * x * size.width() dy = count * y * size.height() cmdutils.check_overflow(dx, 'int') cmdutils.check_overflow(dy, 'int') frame.scroll(dx, dy) @cmdutils.register(instance='command-dispatcher', scope='window') def yank(self, title=False, sel=False): """Yank the current URL/title to the clipboard or primary selection. Args: sel: Use the primary selection instead of the clipboard. title: Yank the title instead of the URL. """ clipboard = QApplication.clipboard() if title: s = self._tabbed_browser().tabText(self._current_index()) else: s = self._current_url().toString( QUrl.FullyEncoded | QUrl.RemovePassword) if sel and clipboard.supportsSelection(): mode = QClipboard.Selection target = "primary selection" else: mode = QClipboard.Clipboard target = "clipboard" log.misc.debug("Yanking to {}: '{}'".format(target, s)) clipboard.setText(s, mode) what = 'Title' if title else 'URL' message.info(self._win_id, "{} yanked to {}".format(what, target)) @cmdutils.register(instance='command-dispatcher', scope='window') def zoom_in(self, count: {'special': 'count'}=1): """Increase the zoom level for the current tab. Args: count: How many steps to zoom in. """ tab = self._current_widget() tab.zoom(count) @cmdutils.register(instance='command-dispatcher', scope='window') def zoom_out(self, count: {'special': 'count'}=1): """Decrease the zoom level for the current tab. Args: count: How many steps to zoom out. """ tab = self._current_widget() tab.zoom(-count) @cmdutils.register(instance='command-dispatcher', scope='window') def zoom(self, zoom: {'type': int}=None, count: {'special': 'count'}=None): """Set the zoom level for the current tab. The zoom can be given as argument or as [count]. If neither of both is given, the zoom is set to the default zoom. Args: zoom: The zoom percentage to set. count: The zoom percentage to set. """ try: default = config.get('ui', 'default-zoom') level = cmdutils.arg_or_count(zoom, count, default=default) except ValueError as e: raise cmdexc.CommandError(e) tab = self._current_widget() tab.zoom_perc(level) @cmdutils.register(instance='command-dispatcher', scope='window') def tab_only(self, left=False, right=False): """Close all tabs except for the current one. Args: left: Keep tabs to the left of the current. right: Keep tabs to the right of the current. """ cmdutils.check_exclusive((left, right), 'lr') tabbed_browser = self._tabbed_browser() cur_idx = tabbed_browser.currentIndex() assert cur_idx != -1 for i, tab in enumerate(tabbed_browser.widgets()): if (i == cur_idx or (left and i < cur_idx) or (right and i > cur_idx)): continue else: tabbed_browser.close_tab(tab) @cmdutils.register(instance='command-dispatcher', scope='window') def undo(self): """Re-open a closed tab (optionally skipping [count] closed tabs).""" try: self._tabbed_browser().undo() except IndexError: raise cmdexc.CommandError("Nothing to undo!") @cmdutils.register(instance='command-dispatcher', scope='window') def tab_prev(self, count: {'special': 'count'}=1): """Switch to the previous tab, or switch [count] tabs back. Args: count: How many tabs to switch back. """ newidx = self._current_index() - count if newidx >= 0: self._set_current_index(newidx) elif config.get('tabs', 'wrap'): self._set_current_index(newidx % self._count()) else: raise cmdexc.CommandError("First tab") @cmdutils.register(instance='command-dispatcher', scope='window') def tab_next(self, count: {'special': 'count'}=1): """Switch to the next tab, or switch [count] tabs forward. Args: count: How many tabs to switch forward. """ newidx = self._current_index() + count if newidx < self._count(): self._set_current_index(newidx) elif config.get('tabs', 'wrap'): self._set_current_index(newidx % self._count()) else: raise cmdexc.CommandError("Last tab") @cmdutils.register(instance='command-dispatcher', scope='window') def paste(self, sel=False, tab=False, bg=False, window=False): """Open a page from the clipboard. Args: sel: Use the primary selection instead of the clipboard. tab: Open in a new tab. bg: Open in a background tab. window: Open in new window. """ clipboard = QApplication.clipboard() if sel and clipboard.supportsSelection(): mode = QClipboard.Selection target = "Primary selection" else: mode = QClipboard.Clipboard target = "Clipboard" text = clipboard.text(mode) if not text: raise cmdexc.CommandError("{} is empty.".format(target)) log.misc.debug("{} contained: '{}'".format(target, text)) try: url = urlutils.fuzzy_url(text) except urlutils.FuzzyUrlError as e: raise cmdexc.CommandError(e) self._open(url, tab, bg, window) @cmdutils.register(instance='command-dispatcher', scope='window') def tab_focus(self, index: {'type': (int, 'last')}=None, count: {'special': 'count'}=None): """Select the tab given as argument/[count]. Args: index: The tab index to focus, starting with 1. The special value `last` focuses the last focused tab. count: The tab index to focus, starting with 1. """ if index == 'last': self._tab_focus_last() return try: idx = cmdutils.arg_or_count(index, count, default=1, countzero=self._count()) except ValueError as e: raise cmdexc.CommandError(e) cmdutils.check_overflow(idx + 1, 'int') if 1 <= idx <= self._count(): self._set_current_index(idx - 1) else: raise cmdexc.CommandError("There's no tab with index {}!".format( idx)) @cmdutils.register(instance='command-dispatcher', scope='window') def tab_move(self, direction: {'type': ('+', '-')}=None, count: {'special': 'count'}=None): """Move the current tab. Args: direction: `+` or `-` for relative moving, not given for absolute moving. count: If moving absolutely: New position (default: 0) If moving relatively: Offset. """ if direction is None: new_idx = self._tab_move_absolute(count) elif direction in '+-': try: new_idx = self._tab_move_relative(direction, count) except ValueError: raise cmdexc.CommandError("Count must be given for relative " "moving!") else: raise cmdexc.CommandError("Invalid direction '{}'!".format( direction)) if not 0 <= new_idx < self._count(): raise cmdexc.CommandError("Can't move tab to position {}!".format( new_idx)) tabbed_browser = self._tabbed_browser() tab = self._current_widget() cur_idx = self._current_index() icon = tabbed_browser.tabIcon(cur_idx) label = tabbed_browser.tabText(cur_idx) cmdutils.check_overflow(cur_idx, 'int') cmdutils.check_overflow(new_idx, 'int') tabbed_browser.setUpdatesEnabled(False) try: tabbed_browser.removeTab(cur_idx) tabbed_browser.insertTab(new_idx, tab, icon, label) self._set_current_index(new_idx) finally: tabbed_browser.setUpdatesEnabled(True) @cmdutils.register(instance='command-dispatcher', scope='window') def spawn(self, userscript=False, *args): """Spawn a command in a shell. Note the {url} variable which gets replaced by the current URL might be useful here. // We use subprocess rather than Qt's QProcess here because we really don't care about the process anymore as soon as it's spawned. Args: userscript: Run the command as an userscript. *args: The commandline to execute. """ log.procs.debug("Executing: {}, userscript={}".format( args, userscript)) if userscript: if len(args) > 1: self.run_userscript(args[0], args[1:]) else: self.run_userscript(args[0]) else: try: subprocess.Popen(args) except OSError as e: raise cmdexc.CommandError("Error while spawning command: " "{}".format(e)) @cmdutils.register(instance='command-dispatcher', scope='window') def home(self): """Open main startpage in current tab.""" self.openurl(config.get('general', 'startpage')[0]) @cmdutils.register(instance='command-dispatcher', scope='window', deprecated='Use :spawn --userscript instead!') def run_userscript(self, cmd, *args: {'nargs': '*'}): """Run an userscript given as argument. Args: cmd: The userscript to run. args: Arguments to pass to the userscript. """ cmd = os.path.expanduser(cmd) env = { 'QUTE_MODE': 'command', } idx = self._current_index() tabbed_browser = self._tabbed_browser() if idx != -1: env['QUTE_TITLE'] = tabbed_browser.tabText(idx) webview = tabbed_browser.currentWidget() if webview is not None and webview.hasSelection(): env['QUTE_SELECTED_TEXT'] = webview.selectedText() env['QUTE_SELECTED_HTML'] = webview.selectedHtml() try: url = tabbed_browser.current_url() except qtutils.QtValueError: pass else: env['QUTE_URL'] = url.toString(QUrl.FullyEncoded) userscripts.run(cmd, *args, win_id=self._win_id, env=env) @cmdutils.register(instance='command-dispatcher', scope='window') def quickmark_save(self): """Save the current page as a quickmark.""" quickmark_manager = objreg.get('quickmark-manager') quickmark_manager.prompt_save(self._win_id, self._current_url()) @cmdutils.register(instance='command-dispatcher', scope='window', maxsplit=0, completion=[usertypes.Completion.quickmark_by_name]) def quickmark_load(self, name, tab=False, bg=False, window=False): """Load a quickmark. Args: name: The name of the quickmark to load. tab: Load the quickmark in a new tab. bg: Load the quickmark in a new background tab. window: Load the quickmark in a new window. """ url = objreg.get('quickmark-manager').get(name) self._open(url, tab, bg, window) @cmdutils.register(instance='command-dispatcher', name='inspector', scope='window') def toggle_inspector(self): """Toggle the web inspector.""" cur = self._current_widget() if cur.inspector is None: if not config.get('general', 'developer-extras'): raise cmdexc.CommandError( "Please enable developer-extras before using the " "webinspector!") cur.inspector = QWebInspector() cur.inspector.setPage(cur.page()) cur.inspector.show() elif cur.inspector.isVisible(): cur.inspector.hide() else: if not config.get('general', 'developer-extras'): raise cmdexc.CommandError( "Please enable developer-extras before using the " "webinspector!") else: cur.inspector.show() @cmdutils.register(instance='command-dispatcher', scope='window') def download_page(self): """Download the current page.""" page = self._current_widget().page() download_manager = objreg.get('download-manager', scope='window', window=self._win_id) download_manager.get(self._current_url(), page) @cmdutils.register(instance='command-dispatcher', scope='window') def view_source(self): """Show the source of the current page.""" # pylint doesn't seem to like pygments... # pylint: disable=no-member widget = self._current_widget() if widget.viewing_source: raise cmdexc.CommandError("Already viewing source!") frame = widget.page().currentFrame() html = frame.toHtml() lexer = pygments.lexers.HtmlLexer() formatter = pygments.formatters.HtmlFormatter( full=True, linenos='table') highlighted = pygments.highlight(html, lexer, formatter) current_url = self._current_url() tab = self._tabbed_browser().tabopen(explicit=True) tab.setHtml(highlighted, current_url) tab.viewing_source = True @cmdutils.register(instance='command-dispatcher', name='help', completion=[usertypes.Completion.helptopic], scope='window') def show_help(self, tab=False, bg=False, window=False, topic=None): r"""Show help about a command or setting. Args: tab: Open in a new tab. bg: Open in a background tab. window: Open in a new window. topic: The topic to show help for. - :__command__ for commands. - __section__\->__option__ for settings. """ if topic is None: path = 'index.html' elif topic.startswith(':'): command = topic[1:] if command not in cmdutils.cmd_dict: raise cmdexc.CommandError("Invalid command {}!".format( command)) path = 'commands.html#{}'.format(command) elif '->' in topic: parts = topic.split('->') if len(parts) != 2: raise cmdexc.CommandError("Invalid help topic {}!".format( topic)) try: config.get(*parts) except configexc.NoSectionError: raise cmdexc.CommandError("Invalid section {}!".format( parts[0])) except configexc.NoOptionError: raise cmdexc.CommandError("Invalid option {}!".format( parts[1])) path = 'settings.html#{}'.format(topic.replace('->', '-')) else: raise cmdexc.CommandError("Invalid help topic {}!".format(topic)) url = QUrl('qute://help/{}'.format(path)) self._open(url, tab, bg, window) @cmdutils.register(instance='command-dispatcher', modes=[usertypes.KeyMode.insert], hide=True, scope='window') def open_editor(self): """Open an external editor with the currently selected form field. The editor which should be launched can be configured via the `general -> editor` config option. // We use QProcess rather than subprocess here because it makes it a lot easier to execute some code as soon as the process has been finished and do everything async. """ frame = self._current_widget().page().currentFrame() try: elem = webelem.focus_elem(frame) except webelem.IsNullError: raise cmdexc.CommandError("No element focused!") if not elem.is_editable(strict=True): raise cmdexc.CommandError("Focused element is not editable!") if elem.is_content_editable(): text = str(elem) else: text = elem.evaluateJavaScript('this.value') self._editor = editor.ExternalEditor( self._win_id, self._tabbed_browser()) self._editor.editing_finished.connect( functools.partial(self.on_editing_finished, elem)) self._editor.edit(text) def on_editing_finished(self, elem, text): """Write the editor text into the form field and clean up tempfile. Callback for QProcess when the editor was closed. Args: elem: The WebElementWrapper which was modified. text: The new text to insert. """ try: if elem.is_content_editable(): log.misc.debug("Filling element {} via setPlainText.".format( elem.debug_text())) elem.setPlainText(text) else: log.misc.debug("Filling element {} via javascript.".format( elem.debug_text())) text = webelem.javascript_escape(text) elem.evaluateJavaScript("this.value='{}'".format(text)) except webelem.IsNullError: raise cmdexc.CommandError("Element vanished while editing!")
gpl-3.0
7,033,561,843,097,685,000
37.764078
79
0.567586
false
prataprc/tayra
tayra/test/stdttl/ref/useinterface.ttl.py
1
2558
import imp from io import StringIO from pluggdapps.plugin import Plugin, implements from tayra import BaseTTLPlugin def __traceback_decorator__( frames ): from copy import deepcopy from os.path import basename def _map2ttl( frame ): filename = frame.filename lineno = frame.lineno lines = open(filename).readlines()[:lineno] lines.reverse() rc = {} for l in lines : if l.strip().startswith('# lineno') : _, ttl_lineno = l.split(':', 1) ttl_lineno = int( ttl_lineno ) ttl_text = open( _ttlfile ).readlines()[ ttl_lineno-1 ] return ttl_lineno, ttl_text return None, None newframes = [] for frame in frames : newframes.append( frame ) frameadded = getattr( frame, '_ttlframeadded', False ) basen = basename( frame.filename ) if basen.endswith( '.ttl.py' ) and basen == (basename( _ttlfile ) + '.py') and frameadded == False : newframe = deepcopy( frame ) frame._ttlframeadded = True try : newframe.lineno, newframe.linetext = _map2ttl( newframe ) if newframe.lineno : newframe.filename = _ttlfile newframes.append( newframe ) except : raise continue return newframes from tayra.interfaces import ITayraTestInterface def body( *args, **kwargs ) : _m.pushbuf() _m.extend( ['<!DOCTYPE html>\n'] ) # lineno:4 obj = _compiler.query_plugin( ITayraTestInterface, 'tayra.XYZTestInterface' ) # lineno:6 _m.pushbuf() _m.extend( ['<html>'] ) _m.pushbuf() _m.extend( ['\n '] ) # lineno:7 _m.pushbuf() _m.extend( ['<head>'] ) _m.pushbuf() _m.extend( ['\n '] ) _m.handletag( _m.popbuftext(), _m.popbuftext(), **{'nl': '', 'oprune': False, 'indent': False, 'iprune': False} ) # lineno:8 _m.pushbuf() _m.extend( ['<body>'] ) _m.pushbuf() _m.extend( ['\n '] ) # lineno:9 _m.extend( [''] ) _m.append(_m.evalexprs( '', 'obj.render()', '', globals(), locals()) ) _m.extend( ['\n'] ) _m.handletag( _m.popbuftext(), _m.popbuftext(), **{'nl': '', 'oprune': False, 'indent': False, 'iprune': False} ) _m.handletag( _m.popbuftext(), _m.popbuftext(), **{'nl': '', 'oprune': False, 'indent': False, 'iprune': False} ) return _m.popbuftext() # ---- Global Functions # ---- Interface functions # ---- Footer
gpl-3.0
4,534,265,095,234,785,000
31.794872
134
0.538702
false
asiersarasua/QGIS
python/plugins/db_manager/layer_preview.py
1
5023
# -*- coding: utf-8 -*- """ /*************************************************************************** Name : DB Manager Description : Database manager plugin for QGIS Date : May 23, 2011 copyright : (C) 2011 by Giuseppe Sucameli email : [email protected] ***************************************************************************/ /*************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * ***************************************************************************/ """ from qgis.PyQt.QtCore import Qt, QTimer from qgis.PyQt.QtGui import QColor, QCursor from qgis.PyQt.QtWidgets import QApplication from qgis.gui import QgsMapCanvas, QgsMessageBar from qgis.core import Qgis, QgsVectorLayer, QgsProject, QgsSettings from qgis.utils import OverrideCursor from .db_plugins.plugin import Table class LayerPreview(QgsMapCanvas): def __init__(self, parent=None): super(LayerPreview, self).__init__(parent) self.parent = parent self.setCanvasColor(QColor(255, 255, 255)) self.item = None self.dirty = False self.currentLayerId = None # reuse settings from QGIS settings = QgsSettings() self.enableAntiAliasing(settings.value("/qgis/enable_anti_aliasing", False, type=bool)) zoomFactor = settings.value("/qgis/zoom_factor", 2, type=float) self.setWheelFactor(zoomFactor) def refresh(self): self.setDirty(True) self.loadPreview(self.item) def loadPreview(self, item): if item == self.item and not self.dirty: return if item is None: return self._clear() if isinstance(item, Table) and item.type in [Table.VectorType, Table.RasterType]: # update the preview, but first let the manager chance to show the canvas def runPrev(): return self._loadTablePreview(item) QTimer.singleShot(50, runPrev) else: return self.item = item self.item.aboutToChange.connect(self.setDirty) def setDirty(self, val=True): self.dirty = val def _clear(self): """ remove any layers from preview canvas """ if self.item is not None: # skip exception on RuntimeError fixes #6892 try: self.item.aboutToChange.disconnect(self.setDirty) except RuntimeError: pass self.item = None self.dirty = False self._loadTablePreview(None) def _loadTablePreview(self, table, limit=False): """ if has geometry column load to map canvas """ with OverrideCursor(Qt.WaitCursor): self.freeze() vl = None if table and table.geomType: # limit the query result if required if limit and table.rowCount > 1000: uniqueField = table.getValidQgisUniqueFields(True) if uniqueField is None: self.parent.tabs.setCurrentWidget(self.parent.info) self.parent.infoBar.pushMessage( QApplication.translate("DBManagerPlugin", "Unable to find a valid unique field"), Qgis.Warning, self.parent.iface.messageTimeout()) return uri = table.database().uri() uri.setDataSource("", u"(SELECT * FROM %s LIMIT 1000)" % table.quotedName(), table.geomColumn, "", uniqueField.name) provider = table.database().dbplugin().providerName() vl = QgsVectorLayer(uri.uri(False), table.name, provider) else: vl = table.toMapLayer() if vl and not vl.isValid(): vl.deleteLater() vl = None # remove old layer (if any) and set new if self.currentLayerId: if not QgsProject.instance().layerTreeRoot().findLayer(self.currentLayerId): QgsProject.instance().removeMapLayers([self.currentLayerId]) if vl and vl.isValid(): self.setLayers([vl]) QgsProject.instance().addMapLayers([vl], False) self.zoomToFullExtent() else: self.setLayers([]) self.currentLayerId = vl.id() self.freeze(False) super().refresh()
gpl-2.0
151,934,034,634,910,370
36.207407
118
0.516624
false
swayf/pyLoad
module/plugins/accounts/FilesonicCom.py
1
2796
# -*- coding: utf-8 -*- """ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, see <http://www.gnu.org/licenses/>. @author: RaNaN """ from time import mktime, strptime from module.plugins.Account import Account from module.common.json_layer import json_loads class FilesonicCom(Account): __name__ = "FilesonicCom" __version__ = "0.31" __type__ = "account" __description__ = """filesonic.com account plugin""" __author_name__ = ("RaNaN", "Paul King") __author_mail__ = ("[email protected]", "") API_URL = "http://api.filesonic.com" def getDomain(self, req): xml = req.load(self.API_URL + "/utility?method=getFilesonicDomainForCurrentIp&format=json", decode=True) return json_loads(xml)["FSApi_Utility"]["getFilesonicDomainForCurrentIp"]["response"] def loadAccountInfo(self, req): xml = req.load(self.API_URL + "/user?method=getInfo&format=json", post={"u": self.loginname, "p": self.password}, decode=True) self.logDebug("account status retrieved from api %s" % xml) json = json_loads(xml) if json["FSApi_User"]["getInfo"]["status"] != "success": self.logError(_("Invalid login retrieving user details")) return {"validuntil": -1, "trafficleft": -1, "premium": False} premium = json["FSApi_User"]["getInfo"]["response"]["users"]["user"]["is_premium"] if premium: validuntil = json["FSApi_User"]["getInfo"]["response"]["users"]["user"]["premium_expiration"] validuntil = int(mktime(strptime(validuntil, "%Y-%m-%d %H:%M:%S"))) else: validuntil = -1 return {"validuntil": validuntil, "trafficleft": -1, "premium": premium} def login(self, req): domain = self.getDomain(req) post_vars = { "email": self.loginname, "password": self.password, "rememberMe": 1 } page = req.load("http://www%s/user/login" % domain, cookies=True, post=post_vars, decode=True) if "Provided password does not match." in page or "You must be logged in to view this page." in page: self.wrongPassword()
agpl-3.0
-295,136,709,200,270,100
38.380282
109
0.620529
false
Imperat/SSU-Courses
ssu-formal-languages/pda/pda.py
1
1105
import pda_exceptions as e class PDA(object): def __init__(self, rules, input_alphabet, states, initial_state, terminate_states): self.rules = rules self.input_alphabet = input_alphabet self.states = states self.state = initial_state self.terminate_states = terminate_states self.crash = False def _crash(self): self.crash = True def input(self, symbol): # print symbol + "- - " + self.state try: if self.crash: raise e.PDACrashException( "Error by input. PDA is crashed!") self.state = self.rules[self.state][symbol] except KeyError: if symbol not in self.input_alphabet: self._crash() raise e.UnknownSymbolException( "Symbol isn't in input alphabet") else: self._crash() raise e.PDACrashException( "PDA is crashed") def in_terminate_state(self): return self.state in self.terminate_states
apache-2.0
1,192,358,845,135,127,600
30.571429
55
0.540271
false
caedesvvv/pynoded
pynoded/graph.py
1
4291
""" Base graph objects """ from evh.base import EvHandler,EvStack from math import * class Drawable(object): """ Base class for drawable objects. """ def __init__(self,x,y,scale=1.0): self.x=x self.y=y self.scale=scale def Draw(self,ctx): ctx.save() ctx.scale(self.scale,self.scale) ctx.translate(self.x,self.y) self.Draw_(ctx) ctx.restore() def ToLocal(self,x,y): return ((x/self.scale)-self.x,(y/self.scale)-self.y) def FromLocal(self,x,y): return (self.x+x*self.scale,self.y+y*self.scale) def Draw_(self,ctx): """ Main method to do the cairo drawing of the object. This is the main function drawable objects have to override. @param ctx: cairo context """ pass class Collider(object): """ Base class for colliders. """ def Test(self,x,y): raise repr(self),"Not implemented!" class CircleCollider(Collider): """ A circle collider. """ def __init__(self,r): self.r=r def Test(self,x,y): return sqrt((x-self.x)**2+(y-self.y)**2)<=self.r class RectCollider(Collider): """ A rect collider. """ def __init__(self,w,h): self.w=w self.h=h def Test(self,x,y): return x>=self.x and x<=self.x+self.w and y>=self.y and y<=self.y+self.h class GraphObject(Drawable,Collider): """ Base class for graph objects. """ def __init__(self,parent,x,y,scale=1.0): if parent: self.parent=parent Drawable.__init__(self,x,y,scale) self.evstack=EvStack() def GetPointer(self): return self.ToLocal(*self.parent.GetPointer()) def Redraw(self): self.parent.Redraw() def ToParent(self,obj,x,y): if obj==self: return (x,y) else: return self.parent.ToParent(obj,*self.FromLocal(x,y)) def Root(self): return self.parent.Root() class Graph(GraphObject): """ A graph capable of containing connected objects. """ def __init__(self,parent,x,y,scale=1.0): GraphObject.__init__(self,parent,x,y,scale) self.evstack.append(PropagateEvH(self)) self.Clear() def Clear(self): self.objects=[[],[],[],[]] def Draw_(self,ctx): for prio in self.objects: for obj in prio: obj.Draw(ctx) def Propagate(self,x,y,event,*args): o=self.ObjectAt(x,y) return o and getattr(o.evstack,event,False) and getattr(o.evstack,event)(*args) def ObjectAt(self,x,y): for prio in reversed(self.objects): for o in reversed(prio): if o.Test(x,y): return o class MainGraph(Graph): """ Base class for main graphs. """ def __init__(self,*args): Graph.__init__(self,*args) self.objects[1]=[] def Clear(self): Graph.Clear(self) self.objects[1]=[] def Zoom(self,x,y,factor): pre_x,pre_y = self.ToLocal(x,y) self.scale *=factor post_x,post_y = self.ToLocal(x,y) self.x,self.y = (self.x+post_x-pre_x,self.y+post_y-pre_y) self.Redraw() def AddNode(self,obj): self.objects[0].append(obj) self.Redraw() def ToGlobal(self,x,y): return (x,y) def GetRawPointer(self): raise "Not implemented" def GetPointer(self): return self.ToLocal(*self.RawPointer) def Root(self): return self def Test(self,x,y): return True def CenteredBB(self,x,y,size): # not really needed, but useful in general.. obj_size = size bb = [x-(obj_size/2),y-(obj_size/2),obj_size,obj_size] return bb RawPointer = property(GetRawPointer) Pointer = property(GetPointer) class PropagateEvH(EvHandler): """ Event handler for propagating to children. """ def __init__(self,graph): """ PropagateEvH Constructor. @param graph: graph to which this event handler is attached. """ self.graph=graph def __getattr__(self,name): x,y=self.graph.GetPointer() return lambda *args: self.graph.Propagate(x,y,name,*args)
gpl-3.0
-4,142,430,700,984,500,700
23.66092
87
0.563505
false
wadobo/congressus
congressus/tickets/templatetags/tickets.py
1
1531
import re from django import template from django.utils.translation import ugettext as _ from django.utils.html import mark_safe register = template.Library() @register.simple_tag def ticket_seat_class(session, layout, seat, row, col): row = str(row) col = str(col) if seat == 'R': return 'seat-R' elif seat == '_': return 'seat-_' holded_type = session.is_seat_holded(layout, row, col) if holded_type: return 'seat-' + re.sub('[CP]', 'H', holded_type) if session.is_seat_available(layout, row, col): return 'seat-L' return 'seat-R' @register.simple_tag(takes_context=True) def scene_span(context, session, map): flag = 'scenedraw-%s' % session.id if flag in context: return '' context.dicts[0][flag] = True rows = (map.scene_bottom - map.scene_top) + 1 cols = (map.scene_right - map.scene_left) + 1 html = '<td class="scene" rowspan="%s" colspan="%s"> %s </td>' % (rows, cols, _('scene')) return mark_safe(html) @register.simple_tag def key(data, key, prefix="", default=''): k = key if prefix: k = prefix + str(key) return data.get(k, default) @register.simple_tag def get_value(dic, key): if not isinstance(dic, dict): return return dic.get(key) @register.simple_tag def get_free_seats(dic, session_id, layout): if not isinstance(dic, dict): return free = dic.get((session_id, layout.id)) if free is None: free = layout.free() return free
agpl-3.0
2,939,241,391,327,671,000
21.514706
93
0.614631
false
michaelkirk/QGIS
python/plugins/processing/algs/lidar/lastools/lasnoise.py
1
3645
# -*- coding: utf-8 -*- """ *************************************************************************** lasnoise.py --------------------- Date : September 2013 Copyright : (C) 2013 by Martin Isenburg Email : martin near rapidlasso point com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Martin Isenburg' __date__ = 'September 2013' __copyright__ = '(C) 2013, Martin Isenburg' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' import os from LAStoolsUtils import LAStoolsUtils from LAStoolsAlgorithm import LAStoolsAlgorithm from processing.core.parameters import ParameterNumber from processing.core.parameters import ParameterSelection class lasnoise(LAStoolsAlgorithm): ISOLATED = "ISOLATED" STEP_XY = "STEP_XY" STEP_Z = "STEP_Z" OPERATION = "OPERATION" OPERATIONS = ["classify", "remove"] CLASSIFY_AS = "CLASSIFY_AS" def defineCharacteristics(self): self.name, self.i18n_name = self.trAlgorithm('lasnoise') self.group, self.i18n_group = self.trAlgorithm('LAStools') self.addParametersVerboseGUI() self.addParametersPointInputGUI() self.addParameter(ParameterNumber(lasnoise.ISOLATED, self.tr("isolated if surrounding cells have only"), 0, None, 5)) self.addParameter(ParameterNumber(lasnoise.STEP_XY, self.tr("resolution of isolation grid in xy"), 0, None, 4.0)) self.addParameter(ParameterNumber(lasnoise.STEP_Z, self.tr("resolution of isolation grid in z"), 0, None, 4.0)) self.addParameter(ParameterSelection(lasnoise.OPERATION, self.tr("what to do with isolated points"), lasnoise.OPERATIONS, 0)) self.addParameter(ParameterNumber(lasnoise.CLASSIFY_AS, self.tr("classify as"), 0, None, 7)) self.addParametersPointOutputGUI() self.addParametersAdditionalGUI() def processAlgorithm(self, progress): commands = [os.path.join(LAStoolsUtils.LAStoolsPath(), "bin", "lasnoise")] self.addParametersVerboseCommands(commands) self.addParametersPointInputCommands(commands) isolated = self.getParameterValue(lasnoise.ISOLATED) commands.append("-isolated") commands.append(str(isolated)) step_xy = self.getParameterValue(lasnoise.STEP_XY) commands.append("-step_xy") commands.append(str(step_xy)) step_z = self.getParameterValue(lasnoise.STEP_Z) commands.append("-step_z") commands.append(str(step_z)) operation = self.getParameterValue(lasnoise.OPERATION) if operation != 0: commands.append("-remove_noise") else: commands.append("-classify_as") classify_as = self.getParameterValue(lasnoise.CLASSIFY_AS) commands.append(str(classify_as)) self.addParametersPointOutputCommands(commands) self.addParametersAdditionalCommands(commands) LAStoolsUtils.runLAStools(commands, progress)
gpl-2.0
-8,790,487,615,896,949,000
42.915663
82
0.590672
false
pirate/bookmark-archiver
archivebox/extractors/readability.py
1
4294
__package__ = 'archivebox.extractors' from pathlib import Path from tempfile import NamedTemporaryFile from typing import Optional import json from ..index.schema import Link, ArchiveResult, ArchiveError from ..system import run, atomic_write from ..util import ( enforce_types, download_url, is_static_file, ) from ..config import ( TIMEOUT, CURL_BINARY, SAVE_READABILITY, DEPENDENCIES, READABILITY_VERSION, ) from ..logging_util import TimedProgress @enforce_types def get_html(link: Link, path: Path) -> str: """ Try to find wget, singlefile and then dom files. If none is found, download the url again. """ canonical = link.canonical_outputs() abs_path = path.absolute() sources = [canonical["singlefile_path"], canonical["wget_path"], canonical["dom_path"]] document = None for source in sources: try: with open(abs_path / source, "r", encoding="utf-8") as f: document = f.read() break except (FileNotFoundError, TypeError): continue if document is None: return download_url(link.url) else: return document @enforce_types def should_save_readability(link: Link, out_dir: Optional[str]=None, overwrite: Optional[bool]=False) -> bool: if is_static_file(link.url): return False out_dir = out_dir or Path(link.link_dir) if not overwrite and (out_dir / 'readability').exists(): return False return SAVE_READABILITY @enforce_types def save_readability(link: Link, out_dir: Optional[str]=None, timeout: int=TIMEOUT) -> ArchiveResult: """download reader friendly version using @mozilla/readability""" out_dir = Path(out_dir or link.link_dir) output_folder = out_dir.absolute() / "readability" output = "readability" # Readability Docs: https://github.com/mozilla/readability status = 'succeeded' # fake command to show the user so they have something to try debugging if get_html fails cmd = [ CURL_BINARY, link.url ] readability_content = None timer = TimedProgress(timeout, prefix=' ') try: document = get_html(link, out_dir) temp_doc = NamedTemporaryFile(delete=False) temp_doc.write(document.encode("utf-8")) temp_doc.close() if not document or len(document) < 10: raise ArchiveError('Readability could not find HTML to parse for article text') cmd = [ DEPENDENCIES['READABILITY_BINARY']['path'], temp_doc.name, ] result = run(cmd, cwd=out_dir, timeout=timeout) try: result_json = json.loads(result.stdout) assert result_json and 'content' in result_json except json.JSONDecodeError: raise ArchiveError('Readability was not able to archive the page', result.stdout + result.stderr) output_folder.mkdir(exist_ok=True) readability_content = result_json.pop("textContent") atomic_write(str(output_folder / "content.html"), result_json.pop("content")) atomic_write(str(output_folder / "content.txt"), readability_content) atomic_write(str(output_folder / "article.json"), result_json) # parse out number of files downloaded from last line of stderr: # "Downloaded: 76 files, 4.0M in 1.6s (2.52 MB/s)" output_tail = [ line.strip() for line in (result.stdout + result.stderr).decode().rsplit('\n', 3)[-3:] if line.strip() ] hints = ( 'Got readability response code: {}.'.format(result.returncode), *output_tail, ) # Check for common failure cases if (result.returncode > 0): raise ArchiveError('Readability was not able to archive the page', hints) except (Exception, OSError) as err: status = 'failed' output = err cmd = [cmd[0], './{singlefile,dom}.html'] finally: timer.end() return ArchiveResult( cmd=cmd, pwd=str(out_dir), cmd_version=READABILITY_VERSION, output=output, status=status, index_texts=[readability_content] if readability_content else [], **timer.stats, )
mit
4,898,612,251,076,805,000
30.807407
110
0.622729
false
tysonholub/twilio-python
twilio/rest/taskrouter/v1/workspace/workspace_statistics.py
1
10107
# coding=utf-8 r""" This code was generated by \ / _ _ _| _ _ | (_)\/(_)(_|\/| |(/_ v1.0.0 / / """ from twilio.base import serialize from twilio.base import values from twilio.base.instance_context import InstanceContext from twilio.base.instance_resource import InstanceResource from twilio.base.list_resource import ListResource from twilio.base.page import Page class WorkspaceStatisticsList(ListResource): """ """ def __init__(self, version, workspace_sid): """ Initialize the WorkspaceStatisticsList :param Version version: Version that contains the resource :param workspace_sid: The SID of the Workspace :returns: twilio.rest.taskrouter.v1.workspace.workspace_statistics.WorkspaceStatisticsList :rtype: twilio.rest.taskrouter.v1.workspace.workspace_statistics.WorkspaceStatisticsList """ super(WorkspaceStatisticsList, self).__init__(version) # Path Solution self._solution = {'workspace_sid': workspace_sid, } def get(self): """ Constructs a WorkspaceStatisticsContext :returns: twilio.rest.taskrouter.v1.workspace.workspace_statistics.WorkspaceStatisticsContext :rtype: twilio.rest.taskrouter.v1.workspace.workspace_statistics.WorkspaceStatisticsContext """ return WorkspaceStatisticsContext(self._version, workspace_sid=self._solution['workspace_sid'], ) def __call__(self): """ Constructs a WorkspaceStatisticsContext :returns: twilio.rest.taskrouter.v1.workspace.workspace_statistics.WorkspaceStatisticsContext :rtype: twilio.rest.taskrouter.v1.workspace.workspace_statistics.WorkspaceStatisticsContext """ return WorkspaceStatisticsContext(self._version, workspace_sid=self._solution['workspace_sid'], ) def __repr__(self): """ Provide a friendly representation :returns: Machine friendly representation :rtype: str """ return '<Twilio.Taskrouter.V1.WorkspaceStatisticsList>' class WorkspaceStatisticsPage(Page): """ """ def __init__(self, version, response, solution): """ Initialize the WorkspaceStatisticsPage :param Version version: Version that contains the resource :param Response response: Response from the API :param workspace_sid: The SID of the Workspace :returns: twilio.rest.taskrouter.v1.workspace.workspace_statistics.WorkspaceStatisticsPage :rtype: twilio.rest.taskrouter.v1.workspace.workspace_statistics.WorkspaceStatisticsPage """ super(WorkspaceStatisticsPage, self).__init__(version, response) # Path Solution self._solution = solution def get_instance(self, payload): """ Build an instance of WorkspaceStatisticsInstance :param dict payload: Payload response from the API :returns: twilio.rest.taskrouter.v1.workspace.workspace_statistics.WorkspaceStatisticsInstance :rtype: twilio.rest.taskrouter.v1.workspace.workspace_statistics.WorkspaceStatisticsInstance """ return WorkspaceStatisticsInstance( self._version, payload, workspace_sid=self._solution['workspace_sid'], ) def __repr__(self): """ Provide a friendly representation :returns: Machine friendly representation :rtype: str """ return '<Twilio.Taskrouter.V1.WorkspaceStatisticsPage>' class WorkspaceStatisticsContext(InstanceContext): """ """ def __init__(self, version, workspace_sid): """ Initialize the WorkspaceStatisticsContext :param Version version: Version that contains the resource :param workspace_sid: The SID of the Workspace to fetch :returns: twilio.rest.taskrouter.v1.workspace.workspace_statistics.WorkspaceStatisticsContext :rtype: twilio.rest.taskrouter.v1.workspace.workspace_statistics.WorkspaceStatisticsContext """ super(WorkspaceStatisticsContext, self).__init__(version) # Path Solution self._solution = {'workspace_sid': workspace_sid, } self._uri = '/Workspaces/{workspace_sid}/Statistics'.format(**self._solution) def fetch(self, minutes=values.unset, start_date=values.unset, end_date=values.unset, task_channel=values.unset, split_by_wait_time=values.unset): """ Fetch a WorkspaceStatisticsInstance :param unicode minutes: Only calculate statistics since this many minutes in the past :param datetime start_date: Only calculate statistics from on or after this date :param datetime end_date: Only calculate statistics from this date and time and earlier :param unicode task_channel: Only calculate statistics on this TaskChannel. :param unicode split_by_wait_time: A comma separated list of values that describes the thresholds to calculate statistics on :returns: Fetched WorkspaceStatisticsInstance :rtype: twilio.rest.taskrouter.v1.workspace.workspace_statistics.WorkspaceStatisticsInstance """ params = values.of({ 'Minutes': minutes, 'StartDate': serialize.iso8601_datetime(start_date), 'EndDate': serialize.iso8601_datetime(end_date), 'TaskChannel': task_channel, 'SplitByWaitTime': split_by_wait_time, }) payload = self._version.fetch( 'GET', self._uri, params=params, ) return WorkspaceStatisticsInstance( self._version, payload, workspace_sid=self._solution['workspace_sid'], ) def __repr__(self): """ Provide a friendly representation :returns: Machine friendly representation :rtype: str """ context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items()) return '<Twilio.Taskrouter.V1.WorkspaceStatisticsContext {}>'.format(context) class WorkspaceStatisticsInstance(InstanceResource): """ """ def __init__(self, version, payload, workspace_sid): """ Initialize the WorkspaceStatisticsInstance :returns: twilio.rest.taskrouter.v1.workspace.workspace_statistics.WorkspaceStatisticsInstance :rtype: twilio.rest.taskrouter.v1.workspace.workspace_statistics.WorkspaceStatisticsInstance """ super(WorkspaceStatisticsInstance, self).__init__(version) # Marshaled Properties self._properties = { 'realtime': payload.get('realtime'), 'cumulative': payload.get('cumulative'), 'account_sid': payload.get('account_sid'), 'workspace_sid': payload.get('workspace_sid'), 'url': payload.get('url'), } # Context self._context = None self._solution = {'workspace_sid': workspace_sid, } @property def _proxy(self): """ Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: WorkspaceStatisticsContext for this WorkspaceStatisticsInstance :rtype: twilio.rest.taskrouter.v1.workspace.workspace_statistics.WorkspaceStatisticsContext """ if self._context is None: self._context = WorkspaceStatisticsContext( self._version, workspace_sid=self._solution['workspace_sid'], ) return self._context @property def realtime(self): """ :returns: n object that contains the real-time statistics for the Workspace :rtype: dict """ return self._properties['realtime'] @property def cumulative(self): """ :returns: An object that contains the cumulative statistics for the Workspace :rtype: dict """ return self._properties['cumulative'] @property def account_sid(self): """ :returns: The SID of the Account that created the resource :rtype: unicode """ return self._properties['account_sid'] @property def workspace_sid(self): """ :returns: The SID of the Workspace :rtype: unicode """ return self._properties['workspace_sid'] @property def url(self): """ :returns: The absolute URL of the Workspace statistics resource :rtype: unicode """ return self._properties['url'] def fetch(self, minutes=values.unset, start_date=values.unset, end_date=values.unset, task_channel=values.unset, split_by_wait_time=values.unset): """ Fetch a WorkspaceStatisticsInstance :param unicode minutes: Only calculate statistics since this many minutes in the past :param datetime start_date: Only calculate statistics from on or after this date :param datetime end_date: Only calculate statistics from this date and time and earlier :param unicode task_channel: Only calculate statistics on this TaskChannel. :param unicode split_by_wait_time: A comma separated list of values that describes the thresholds to calculate statistics on :returns: Fetched WorkspaceStatisticsInstance :rtype: twilio.rest.taskrouter.v1.workspace.workspace_statistics.WorkspaceStatisticsInstance """ return self._proxy.fetch( minutes=minutes, start_date=start_date, end_date=end_date, task_channel=task_channel, split_by_wait_time=split_by_wait_time, ) def __repr__(self): """ Provide a friendly representation :returns: Machine friendly representation :rtype: str """ context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items()) return '<Twilio.Taskrouter.V1.WorkspaceStatisticsInstance {}>'.format(context)
mit
-1,229,389,616,420,329,200
34.588028
132
0.646977
false
awsdocs/aws-doc-sdk-examples
python/example_code/rekognition/rekognition_collections.py
1
13421
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 """ Purpose Shows how to use the AWS SDK for Python (Boto3) with Amazon Rekognition to create a collection that contains faces indexed from a series of images. The collection is then searched for faces that match a reference face. The usage demo in this file uses images in the .media folder. If you run this code without cloning the GitHub repository, you must first download the image files from https://github.com/awsdocs/aws-doc-sdk-examples/tree/master/python/example_code/rekognition/.media """ import logging from pprint import pprint import boto3 from botocore.exceptions import ClientError from rekognition_objects import RekognitionFace from rekognition_image_detection import RekognitionImage logger = logging.getLogger(__name__) class RekognitionCollection: """ Encapsulates an Amazon Rekognition collection. This class is a thin wrapper around parts of the Boto3 Amazon Rekognition API. """ def __init__(self, collection, rekognition_client): """ Initializes a collection object. :param collection: Collection data in the format returned by a call to create_collection. :param rekognition_client: A Boto3 Rekognition client. """ self.collection_id = collection['CollectionId'] self.collection_arn, self.face_count, self.created = self._unpack_collection( collection) self.rekognition_client = rekognition_client @staticmethod def _unpack_collection(collection): """ Unpacks optional parts of a collection that can be returned by describe_collection. :param collection: The collection data. :return: A tuple of the data in the collection. """ return ( collection.get('CollectionArn'), collection.get('FaceCount', 0), collection.get('CreationTimestamp')) def to_dict(self): """ Renders parts of the collection data to a dict. :return: The collection data as a dict. """ rendering = { 'collection_id': self.collection_id, 'collection_arn': self.collection_arn, 'face_count': self.face_count, 'created': self.created } return rendering def describe_collection(self): """ Gets data about the collection from the Amazon Rekognition service. :return: The collection rendered as a dict. """ try: response = self.rekognition_client.describe_collection( CollectionId=self.collection_id) # Work around capitalization of Arn vs. ARN response['CollectionArn'] = response.get('CollectionARN') (self.collection_arn, self.face_count, self.created) = self._unpack_collection(response) logger.info("Got data for collection %s.", self.collection_id) except ClientError: logger.exception("Couldn't get data for collection %s.", self.collection_id) raise else: return self.to_dict() def delete_collection(self): """ Deletes the collection. """ try: self.rekognition_client.delete_collection(CollectionId=self.collection_id) logger.info("Deleted collection %s.", self.collection_id) self.collection_id = None except ClientError: logger.exception("Couldn't delete collection %s.", self.collection_id) raise def index_faces(self, image, max_faces): """ Finds faces in the specified image, indexes them, and stores them in the collection. :param image: The image to index. :param max_faces: The maximum number of faces to index. :return: A tuple. The first element is a list of indexed faces. The second element is a list of faces that couldn't be indexed. """ try: response = self.rekognition_client.index_faces( CollectionId=self.collection_id, Image=image.image, ExternalImageId=image.image_name, MaxFaces=max_faces, DetectionAttributes=['ALL']) indexed_faces = [ RekognitionFace({**face['Face'], **face['FaceDetail']}) for face in response['FaceRecords']] unindexed_faces = [ RekognitionFace(face['FaceDetail']) for face in response['UnindexedFaces']] logger.info( "Indexed %s faces in %s. Could not index %s faces.", len(indexed_faces), image.image_name, len(unindexed_faces)) except ClientError: logger.exception("Couldn't index faces in image %s.", image.image_name) raise else: return indexed_faces, unindexed_faces def list_faces(self, max_results): """ Lists the faces currently indexed in the collection. :param max_results: The maximum number of faces to return. :return: The list of faces in the collection. """ try: response = self.rekognition_client.list_faces( CollectionId=self.collection_id, MaxResults=max_results) faces = [RekognitionFace(face) for face in response['Faces']] logger.info( "Found %s faces in collection %s.", len(faces), self.collection_id) except ClientError: logger.exception( "Couldn't list faces in collection %s.", self.collection_id) raise else: return faces def search_faces_by_image(self, image, threshold, max_faces): """ Searches for faces in the collection that match the largest face in the reference image. :param image: The image that contains the reference face to search for. :param threshold: The match confidence must be greater than this value for a face to be included in the results. :param max_faces: The maximum number of faces to return. :return: A tuple. The first element is the face found in the reference image. The second element is the list of matching faces found in the collection. """ try: response = self.rekognition_client.search_faces_by_image( CollectionId=self.collection_id, Image=image.image, FaceMatchThreshold=threshold, MaxFaces=max_faces) image_face = RekognitionFace({ 'BoundingBox': response['SearchedFaceBoundingBox'], 'Confidence': response['SearchedFaceConfidence'] }) collection_faces = [ RekognitionFace(face['Face']) for face in response['FaceMatches']] logger.info("Found %s faces in the collection that match the largest " "face in %s.", len(collection_faces), image.image_name) except ClientError: logger.exception( "Couldn't search for faces in %s that match %s.", self.collection_id, image.image_name) raise else: return image_face, collection_faces def search_faces(self, face_id, threshold, max_faces): """ Searches for faces in the collection that match another face from the collection. :param face_id: The ID of the face in the collection to search for. :param threshold: The match confidence must be greater than this value for a face to be included in the results. :param max_faces: The maximum number of faces to return. :return: The list of matching faces found in the collection. This list does not contain the face specified by `face_id`. """ try: response = self.rekognition_client.search_faces( CollectionId=self.collection_id, FaceId=face_id, FaceMatchThreshold=threshold, MaxFaces=max_faces) faces = [RekognitionFace(face['Face']) for face in response['FaceMatches']] logger.info( "Found %s faces in %s that match %s.", len(faces), self.collection_id, face_id) except ClientError: logger.exception( "Couldn't search for faces in %s that match %s.", self.collection_id, face_id) raise else: return faces def delete_faces(self, face_ids): """ Deletes faces from the collection. :param face_ids: The list of IDs of faces to delete. :return: The list of IDs of faces that were deleted. """ try: response = self.rekognition_client.delete_faces( CollectionId=self.collection_id, FaceIds=face_ids) deleted_ids = response['DeletedFaces'] logger.info( "Deleted %s faces from %s.", len(deleted_ids), self.collection_id) except ClientError: logger.exception("Couldn't delete faces from %s.", self.collection_id) raise else: return deleted_ids class RekognitionCollectionManager: """ Encapsulates Amazon Rekognition collection management functions. This class is a thin wrapper around parts of the Boto3 Amazon Rekognition API. """ def __init__(self, rekognition_client): """ Initializes the collection manager object. :param rekognition_client: A Boto3 Rekognition client. """ self.rekognition_client = rekognition_client def create_collection(self, collection_id): """ Creates an empty collection. :param collection_id: Text that identifies the collection. :return: The newly created collection. """ try: response = self.rekognition_client.create_collection( CollectionId=collection_id) response['CollectionId'] = collection_id collection = RekognitionCollection(response, self.rekognition_client) logger.info("Created collection %s.", collection_id) except ClientError: logger.exception("Couldn't create collection %s.", collection_id) raise else: return collection def list_collections(self, max_results): """ Lists collections for the current account. :param max_results: The maximum number of collections to return. :return: The list of collections for the current account. """ try: response = self.rekognition_client.list_collections(MaxResults=max_results) collections = [ RekognitionCollection({'CollectionId': col_id}, self.rekognition_client) for col_id in response['CollectionIds']] except ClientError: logger.exception("Couldn't list collections.") raise else: return collections def usage_demo(): print('-'*88) print("Welcome to the Amazon Rekognition face collection demo!") print('-'*88) logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s') rekognition_client = boto3.client('rekognition') images = [ RekognitionImage.from_file( '.media/pexels-agung-pandit-wiguna-1128316.jpg', rekognition_client, image_name='sitting'), RekognitionImage.from_file( '.media/pexels-agung-pandit-wiguna-1128317.jpg', rekognition_client, image_name='hopping'), RekognitionImage.from_file( '.media/pexels-agung-pandit-wiguna-1128318.jpg', rekognition_client, image_name='biking')] collection_mgr = RekognitionCollectionManager(rekognition_client) collection = collection_mgr.create_collection('doc-example-collection-demo') print(f"Created collection {collection.collection_id}:") pprint(collection.describe_collection()) print("Indexing faces from three images:") for image in images: collection.index_faces(image, 10) print("Listing faces in collection:") faces = collection.list_faces(10) for face in faces: pprint(face.to_dict()) input("Press Enter to continue.") print(f"Searching for faces in the collection that match the first face in the " f"list (Face ID: {faces[0].face_id}.") found_faces = collection.search_faces(faces[0].face_id, 80, 10) print(f"Found {len(found_faces)} matching faces.") for face in found_faces: pprint(face.to_dict()) input("Press Enter to continue.") print(f"Searching for faces in the collection that match the largest face in " f"{images[0].image_name}.") image_face, match_faces = collection.search_faces_by_image(images[0], 80, 10) print(f"The largest face in {images[0].image_name} is:") pprint(image_face.to_dict()) print(f"Found {len(match_faces)} matching faces.") for face in match_faces: pprint(face.to_dict()) input("Press Enter to continue.") collection.delete_collection() print('Thanks for watching!') print('-'*88) if __name__ == '__main__': usage_demo()
apache-2.0
4,713,168,682,652,689,000
38.12828
102
0.617167
false
atarax82/lotto-project
project/monitor.py
1
2948
import os import sys #import time import signal import threading import atexit import queue _interval = 1.0 _times = {} _files = [] _running = False _queue = queue.Queue() _lock = threading.Lock() def _restart(path): _queue.put(True) prefix = 'monitor (pid=%d):' % os.getpid() print('%s Change detected to \'%s\'.' % (prefix, path), file=sys.stderr) print('%s Triggering process restart.' % prefix, file=sys.stderr) os.kill(os.getpid(), signal.SIGINT) def _modified(path): try: # If path doesn't denote a file and were previously # tracking it, then it has been removed or the file type # has changed so force a restart. If not previously # tracking the file then we can ignore it as probably # pseudo reference such as when file extracted from a # collection of modules contained in a zip file. if not os.path.isfile(path): return path in _times # Check for when file last modified. mtime = os.stat(path).st_mtime if path not in _times: _times[path] = mtime # Force restart when modification time has changed, even # if time now older, as that could indicate older file # has been restored. if mtime != _times[path]: return True except: # If any exception occured, likely that file has been # been removed just before stat(), so force a restart. return True return False def _monitor(): while 1: # Check modification times on all files in sys.modules. for module in list(sys.modules.values()): if not hasattr(module, '__file__'): continue path = getattr(module, '__file__') if not path: continue if os.path.splitext(path)[1] in ['.pyc', '.pyo', '.pyd']: path = path[:-1] if _modified(path): return _restart(path) # Check modification times on files which have # specifically been registered for monitoring. for path in _files: if _modified(path): return _restart(path) # Go to sleep for specified interval. try: return _queue.get(timeout=_interval) except: pass _thread = threading.Thread(target=_monitor) _thread.setDaemon(True) def _exiting(): try: _queue.put(True) except: pass _thread.join() atexit.register(_exiting) def track(path): if not path in _files: _files.append(path) def start(interval=1.0): global _interval if interval < _interval: _interval = interval global _running _lock.acquire() if not _running: prefix = 'monitor (pid=%d):' % os.getpid() print('%s Starting change monitor.' % prefix, file=sys.stderr) _running = True _thread.start() _lock.release()
gpl-3.0
-7,386,364,848,896,357,000
25.097345
76
0.585142
false
ESSolutions/ESSArch_Core
ESSArch_Core/agents/documents.py
1
1455
from elasticsearch_dsl import Date, InnerDoc, Keyword, Nested, Text from ESSArch_Core.agents.models import Agent from ESSArch_Core.search.documents import DocumentBase from ESSArch_Core.tags.documents import autocomplete_analyzer class AgentNameDocument(InnerDoc): main = Text() part = Text() description = Text() start_date = Date() end_date = Date() @classmethod def from_obj(cls, obj): doc = AgentNameDocument( main=obj.main, part=obj.part, description=obj.description, start_date=obj.start_date, end_date=obj.end_date, ) return doc class AgentDocument(DocumentBase): id = Keyword() task_id = Keyword() names = Nested(AgentNameDocument) start_date = Date() end_date = Date() @classmethod def get_model(cls): return Agent @classmethod def from_obj(cls, obj): if obj.task is None: task_id = None else: task_id = str(obj.task.pk) doc = AgentDocument( _id=str(obj.pk), id=str(obj.pk), task_id=task_id, names=[ AgentNameDocument.from_obj(name) for name in obj.names.iterator() ], start_date=obj.start_date, end_date=obj.end_date, ) return doc class Index: name = 'agent' analyzers = [autocomplete_analyzer]
gpl-3.0
3,981,038,037,303,409,700
23.661017
81
0.57457
false
MasterGowen/moonrain
moonrain/projects/views.py
1
3432
import json from django.shortcuts import render, redirect from django.views.generic.edit import UpdateView, DeleteView from .models import Project from ..videos.models import Video, VideosSequence from ..videos.views import new_sequence, get_sequence from django.contrib.auth.decorators import login_required from django.http import Http404, HttpResponse from django.core.exceptions import ObjectDoesNotExist from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger from django.core.context_processors import csrf from .forms import ProjectForm def projects_list_all(request): projects = [] for project in Project.objects.all(): if project.permission == 'public': projects.append(project) @login_required def for_users(request, project, projects): if project.permission == 'for_users': projects.append(project) for_users(request, project, projects) @login_required def for_staff(request, project, projects): if project.permission == 'for_staff': if request.user == project.author or str(request.user) in str(project.users()): projects.append(project) for_staff(request, project, projects) projects = list(reversed(projects)) paginator = Paginator(projects, 10) page = request.GET.get('page') try: projects = paginator.page(page) except PageNotAnInteger: projects = paginator.page(1) except EmptyPage: projects = paginator.page(paginator.num_pages) return render(request, 'projects/index.html', {'projects': projects, 'pages': range(1, (paginator.num_pages + 1))}) def detail(request, project_id): try: project = Project.objects.get(id=project_id) jsonSequence = json.loads(get_sequence(request, project)) except ObjectDoesNotExist: raise Http404 videos_ids = jsonSequence['sequence'].split(',') videos = [] for video_id in videos_ids: if video_id != 'None': video = Video.objects.get(id=video_id) videos.append(video) if project.permission == 'public': return render(request, 'projects/project.html', {'project': project, 'videos': videos}) elif project.permission == 'for_users' \ and request.user: return render(request, 'projects/project.html', {'project': project, 'videos': videos}) elif project.permission == 'for_staff' \ and request.user == project.author \ or str(request.user) \ in str(project.users()): return render(request, 'projects/project.html', {'project': project, 'videos': videos}) else: return HttpResponse(status=403) def new_project(request): if request.method == 'POST': form = ProjectForm(request.POST) if form.is_valid(): project = form.save(commit=False) project.author_id = request.user.id project.save() new_sequence(request, project.id) return redirect(project) args = {} args.update(csrf(request)) args['form'] = ProjectForm() return render(request, 'projects/new.html', args) class ProjectDelete(DeleteView): model = Project fields = [] success_url = '/projects/' class ProjectUpdate(UpdateView): model = Project fields = ['name', 'comments', 'tags', 'permission']
gpl-2.0
8,194,564,417,942,022,000
32.656863
119
0.648893
false
harisbal/pandas
pandas/tests/arrays/categorical/test_missing.py
1
3078
# -*- coding: utf-8 -*- import collections import numpy as np import pytest from pandas.compat import lrange from pandas.core.dtypes.dtypes import CategoricalDtype from pandas import Categorical, Index, isna import pandas.util.testing as tm class TestCategoricalMissing(object): def test_na_flags_int_categories(self): # #1457 categories = lrange(10) labels = np.random.randint(0, 10, 20) labels[::5] = -1 cat = Categorical(labels, categories, fastpath=True) repr(cat) tm.assert_numpy_array_equal(isna(cat), labels == -1) def test_nan_handling(self): # Nans are represented as -1 in codes c = Categorical(["a", "b", np.nan, "a"]) tm.assert_index_equal(c.categories, Index(["a", "b"])) tm.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0], dtype=np.int8)) c[1] = np.nan tm.assert_index_equal(c.categories, Index(["a", "b"])) tm.assert_numpy_array_equal(c._codes, np.array([0, -1, -1, 0], dtype=np.int8)) # Adding nan to categories should make assigned nan point to the # category! c = Categorical(["a", "b", np.nan, "a"]) tm.assert_index_equal(c.categories, Index(["a", "b"])) tm.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0], dtype=np.int8)) def test_set_dtype_nans(self): c = Categorical(['a', 'b', np.nan]) result = c._set_dtype(CategoricalDtype(['a', 'c'])) tm.assert_numpy_array_equal(result.codes, np.array([0, -1, -1], dtype='int8')) def test_set_item_nan(self): cat = Categorical([1, 2, 3]) cat[1] = np.nan exp = Categorical([1, np.nan, 3], categories=[1, 2, 3]) tm.assert_categorical_equal(cat, exp) @pytest.mark.parametrize('fillna_kwargs, msg', [ (dict(value=1, method='ffill'), "Cannot specify both 'value' and 'method'."), (dict(), "Must specify a fill 'value' or 'method'."), (dict(method='bad'), "Invalid fill method. Expecting .* bad"), ]) def test_fillna_raises(self, fillna_kwargs, msg): # https://github.com/pandas-dev/pandas/issues/19682 cat = Categorical([1, 2, 3]) with tm.assert_raises_regex(ValueError, msg): cat.fillna(**fillna_kwargs) @pytest.mark.parametrize("named", [True, False]) def test_fillna_iterable_category(self, named): # https://github.com/pandas-dev/pandas/issues/21097 if named: Point = collections.namedtuple("Point", "x y") else: Point = lambda *args: args # tuple cat = Categorical([Point(0, 0), Point(0, 1), None]) result = cat.fillna(Point(0, 0)) expected = Categorical([Point(0, 0), Point(0, 1), Point(0, 0)]) tm.assert_categorical_equal(result, expected)
bsd-3-clause
-2,132,304,201,633,114,000
34.37931
73
0.547109
false
tbetcke/PyBEM2D
examples/circscatt.py
1
1272
import pybem2d.core.bases as pcb import pybem2d.core.segments as pcs import pybem2d.core.quadrules as pcq import pybem2d.core.kernels as pck import pybem2d.core.mesh as pcm import pybem2d.core.assembly as pca import pybem2d.core.evaluation as pce import pybem2d.core.visualization as pcv import numpy as np k=10 nelems=50 dirs=np.array([1.0,0]) # Define the mesh circle=pcs.Arc(3,0,0,2*np.pi,1.0) d=pcm.Domain([circle]) mesh=pcm.Mesh([d]) mesh.discretize(nelems) quadrule=pcq.GaussQuadrature() # A standard Gauss Quadrature with default parameters mToB=pcb.Legendre.legendreBasis(mesh,2) # A basis of Legendre polynomials of degree 2 kernel=pck.AcousticCombined(k,k) # The combined potential layer singleLayer=pck.AcousticSingleLayer(k) assembly=pca.Assembly(mToB,quadrule) rhsfun=lambda t,x,n: 2j*k*np.exp(1j*k*(dirs[0]*x[0]+dirs[1]*x[1]))*(dirs[0]*n[0]+dirs[1]*n[1]-1) rhs=assembly.projFun([rhsfun]) mKernel=assembly.getKernel(kernel) mIdentity=assembly.getIdentity() op=mIdentity+2*mKernel print op.shape coeffs=np.linalg.solve(op,rhs) #ev=pce.Evaluator(mToB,singleLayer,quadrule) #v=pcv.Visualizer(ev,[-3,5,-3,3],200,200,incWave=lambda x: np.exp(1j*k*(x[0]*dirs[0]+x[1]*dirs[1]))) #v.fullField(-coeffs[:,0]) x,f=pce.evalDensity(mToB,coeffs[:,0])
mit
7,395,326,172,782,290,000
22.127273
100
0.750786
false
sertansenturk/tomato
tests/test_converter.py
1
1334
import numpy as np import pytest from tomato.converter import Converter def test_hz_to_cent_negative_hz_track(): # GIVEN hz_track = np.array([-50]) ref_freq = np.float(25.0) # WHEN; THEN with pytest.raises(ValueError): Converter.hz_to_cent(hz_track, ref_freq) def test_hz_to_cent_negative_ref_freq(): # GIVEN hz_track = np.array([50]) ref_freq = np.float(-5.0) # WHEN; THEN with pytest.raises(ValueError): Converter.hz_to_cent(hz_track, ref_freq) def test_hz_to_cent_negative_min_freq(): # GIVEN hz_track = np.array([50]) ref_freq = np.float(25.0) min_freq = -5.0 # WHEN; THEN with pytest.raises(ValueError): Converter.hz_to_cent(hz_track, ref_freq, min_freq) def test_hz_to_cent_ref_less_than_min(): # GIVEN hz_track = np.array([50]) ref_freq = np.float(25.0) min_freq = np.float(30.0) # WHEN; THEN with pytest.raises(ValueError): Converter.hz_to_cent(hz_track, ref_freq, min_freq) def test_hz_to_cent_hz_less_than_min(): # GIVEN hz_track = np.array([20]) ref_freq = np.float(35.0) min_freq = np.float(30.0) # WHEN result = Converter.hz_to_cent(hz_track, ref_freq, min_freq) # THEN expected = np.array([np.nan]) np.testing.assert_array_equal(result, expected)
agpl-3.0
-5,201,118,722,165,138,000
21.233333
63
0.618441
false
mjafin/bcbio-nextgen
bcbio/variation/bedutils.py
1
7691
"""Utilities for manipulating BED files. """ import os import shutil import sys import subprocess import toolz as tz from bcbio import utils from bcbio.bam import ref from bcbio.distributed.transaction import file_transaction from bcbio.pipeline import config_utils from bcbio.pipeline import datadict as dd from bcbio.provenance import do from bcbio.variation import vcfutils def get_sort_cmd(): """Retrieve GNU coreutils sort command, using version-sort if available. Recent versions of sort have alpha-numeric sorting, which provides more natural sorting of chromosomes (chr1, chr2) instead of (chr1, chr10). This also fixes versions of sort, like 8.22 in CentOS 7.1, that have broken sorting without version sorting specified. https://github.com/chapmanb/bcbio-nextgen/issues/624 https://github.com/chapmanb/bcbio-nextgen/issues/1017 """ has_versionsort = subprocess.check_output("sort --help | grep version-sort; exit 0", shell=True).strip() if has_versionsort: return "sort -V" else: return "sort" def check_bed_contigs(in_file, data): """Ensure BED file contigs match the reference genome. """ contigs = set([]) with utils.open_gzipsafe(in_file) as in_handle: for line in in_handle: if not line.startswith(("#", "track", "browser")) and line.strip(): contigs.add(line.split()[0]) ref_contigs = set([x.name for x in ref.file_contigs(dd.get_ref_file(data))]) if len(contigs - ref_contigs) / float(len(contigs)) > 0.25: raise ValueError("Contigs in BED file %s not in reference genome:\n %s\n" % (in_file, list(contigs - ref_contigs)) + "This is typically due to chr1 versus 1 differences in BED file and reference.") def clean_file(in_file, data, prefix="", bedprep_dir=None): """Prepare a clean sorted input BED file without headers """ if in_file: if not bedprep_dir: bedprep_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "bedprep")) out_file = os.path.join(bedprep_dir, "%s%s" % (prefix, os.path.basename(in_file))).replace(".gz", "") if not utils.file_uptodate(out_file, in_file): check_bed_contigs(in_file, data) with file_transaction(data, out_file) as tx_out_file: py_cl = os.path.join(os.path.dirname(sys.executable), "py") cat_cmd = "zcat" if in_file.endswith(".gz") else "cat" sort_cmd = get_sort_cmd() cmd = ("{cat_cmd} {in_file} | grep -v ^track | grep -v ^browser | " "grep -v ^# | " "{py_cl} -x 'bcbio.variation.bedutils.remove_bad(x)' | " "{sort_cmd} -k1,1 -k2,2n > {tx_out_file}") do.run(cmd.format(**locals()), "Prepare cleaned BED file", data) vcfutils.bgzip_and_index(out_file, data.get("config", {}), remove_orig=False) return out_file def sort_merge(in_file, data): """Sort and merge a BED file, collapsing gene names. """ out_file = "%s-sort.bed" % os.path.splitext(in_file)[0] if not utils.file_uptodate(out_file, in_file): with file_transaction(data, out_file) as tx_out_file: cat_cmd = "zcat" if in_file.endswith(".gz") else "cat" sort_cmd = get_sort_cmd() cmd = ("{cat_cmd} {in_file} | {sort_cmd} -k1,1 -k2,2n | " "bedtools merge -i - -c 4 -o distinct > {tx_out_file}") do.run(cmd.format(**locals()), "Sort BED file", data) return out_file def remove_bad(line): """Remove non-increasing BED lines which will cause variant callers to choke. """ parts = line.strip().split("\t") if line.strip() and len(parts) > 2 and int(parts[2]) > int(parts[1]): return line else: return None def merge_overlaps(in_file, data, distance=None, out_dir=None): """Merge bed file intervals to avoid overlapping regions. Overlapping regions (1:1-100, 1:90-100) cause issues with callers like FreeBayes that don't collapse BEDs prior to using them. """ config = data["config"] if in_file: bedtools = config_utils.get_program("bedtools", config, default="bedtools") work_dir = tz.get_in(["dirs", "work"], data) if out_dir: bedprep_dir = out_dir elif work_dir: bedprep_dir = utils.safe_makedir(os.path.join(work_dir, "bedprep")) else: bedprep_dir = os.path.dirname(in_file) out_file = os.path.join(bedprep_dir, "%s-merged.bed" % (utils.splitext_plus(os.path.basename(in_file))[0])) if not utils.file_uptodate(out_file, in_file): with file_transaction(data, out_file) as tx_out_file: distance = "-d %s" % distance if distance else "" cmd = "{bedtools} merge {distance} -i {in_file} > {tx_out_file}" do.run(cmd.format(**locals()), "Prepare merged BED file", data) vcfutils.bgzip_and_index(out_file, data["config"], remove_orig=False) return out_file def population_variant_regions(items): """Retrieve the variant region BED file from a population of items. If tumor/normal, return the tumor BED file. If a population, return the BED file covering the most bases. """ import pybedtools if len(items) == 1: return dd.get_variant_regions(items[0]) else: paired = vcfutils.get_paired(items) if paired: return dd.get_variant_regions(paired.tumor_data) else: vrs = [] for data in items: vr_bed = dd.get_variant_regions(data) if vr_bed: vrs.append((pybedtools.BedTool(vr_bed).total_coverage(), vr_bed)) vrs.sort(reverse=True) if vrs: return vrs[0][1] def clean_inputs(data): """Clean BED input files to avoid overlapping segments that cause downstream issues. Per-merges inputs to avoid needing to call multiple times during later parallel steps. """ clean_vr = clean_file(utils.get_in(data, ("config", "algorithm", "variant_regions")), data) merged_vr = merge_overlaps(clean_vr, data) data["config"]["algorithm"]["variant_regions"] = clean_vr data["config"]["algorithm"]["variant_regions_merged"] = merged_vr return data def combine(in_files, out_file, config): """Combine multiple BED files into a single output. """ if not utils.file_exists(out_file): with file_transaction(config, out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: for in_file in in_files: with open(in_file) as in_handle: shutil.copyfileobj(in_handle, out_handle) return out_file def intersect_two(f1, f2, work_dir, data): """Intersect two regions, handling cases where either file is not present. """ f1_exists = f1 and utils.file_exists(f1) f2_exists = f2 and utils.file_exists(f2) if not f1_exists and not f2_exists: return None elif f1_exists and not f2_exists: return f1 elif f2_exists and not f1_exists: return f2 else: out_file = os.path.join(work_dir, "%s-merged.bed" % (utils.splitext_plus(os.path.basename(f1))[0])) if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: cmd = "bedtools intersect -a {f1} -b {f2} > {tx_out_file}" do.run(cmd.format(**locals()), "Intersect BED files", data) return out_file
mit
-970,185,428,665,828,700
41.727778
115
0.606683
false
openqt/algorithms
leetcode/python/lc331-verify-preorder-serialization-of-a-binary-tree.py
1
1578
# coding=utf-8 import unittest """331. Verify Preorder Serialization of a Binary Tree https://leetcode.com/problems/verify-preorder-serialization-of-a-binary-tree/description/ One way to serialize a binary tree is to use pre-order traversal. When we encounter a non-null node, we record the node's value. If it is a null node, we record using a sentinel value such as `#`. _9_ / \ 3 2 / \ / \ 4 1 # 6 / \ / \ / \ # # # # # # For example, the above binary tree can be serialized to the string `"9,3,4,#,#,1,#,#,2,#,6,#,#"`, where `#` represents a null node. Given a string of comma separated values, verify whether it is a correct preorder traversal serialization of a binary tree. Find an algorithm without reconstructing the tree. Each comma separated value in the string must be either an integer or a character `'#'` representing `null` pointer. You may assume that the input format is always valid, for example it could never contain two consecutive commas such as `"1,,3"`. **Example 1:** **Input:**"9,3,4,#,#,1,#,#,2,#,6,#,#" **Output:**true **Example 2:** **Input:**"1,#" **Output:**false **Example 3:** **Input:**"9,#,#,1" **Output:**false Similar Questions: """ class Solution(object): def isValidSerialization(self, preorder): """ :type preorder: str :rtype: bool """ def test(self): pass if __name__ == "__main__": unittest.main()
gpl-3.0
4,291,146,712,371,423,000
19.486842
89
0.586819
false
JeroenZegers/Nabu-MSSS
nabu/neuralnetworks/loss_computers/ms_loss.py
1
2597
"""@file ms_loss.py contains the MsLoss. Temporary naming of file and class""" import loss_computer import tensorflow as tf class MsLoss(loss_computer.LossComputer): """A loss computer that calculates the loss""" def __call__(self, targets, logits, seq_length): # target is actually only required for it's shape to derive the number of active speakers multi_targets = targets['multi_targets'] nr_act_spk = multi_targets.get_shape()[-1] # seq_length = seq_length['bin_est'] logits = logits['act_logit'] logits = tf.squeeze(logits, axis=-1) nr_spk = logits.get_shape()[1] batch_size = logits.get_shape()[0] if self.lossconf['activation'] == 'sigmoid': logits = tf.sigmoid(logits) else: raise BaseException('Other activations not yet implemented') if len(logits.get_shape()) != 3: raise BaseException('Hardcoded some stuff for 3 dimensions') second_dim = logits.get_shape()[1] seq_length = seq_length['features'] # have to do this better max_len = tf.shape(logits)[-1] tmp = [] for utt_ind in range(batch_size): tmp.append( tf.expand_dims( tf.concat( [tf.ones([second_dim, seq_length[utt_ind]]), tf.zeros([second_dim, max_len - seq_length[utt_ind]])], -1), 0)) # seq_length_mask[utt_ind, :seq_length[utt_ind]] = 1 seq_length_mask = tf.concat(tmp, 0) logits = logits * seq_length_mask if self.lossconf['av_time'] == 'True': logits = tf.reduce_sum(logits, 2) logits = tf.divide(logits, tf.expand_dims(tf.to_float(seq_length), -1)) targets = tf.concat([tf.ones([batch_size, nr_act_spk]), tf.zeros([batch_size, nr_spk-nr_act_spk])], -1) loss = tf.reduce_sum(tf.square(logits - targets)) norm = tf.to_float(batch_size * nr_spk) return loss, norm def oldcall(self, targets, logits, seq_length): # target is actually only required for it's shape to derive the number of active speakers multi_targets = targets['multi_targets'] nr_act_spk = multi_targets.get_shape()[-1] # seq_length = seq_length['bin_est'] logits = logits['act_logit'] logits = tf.squeeze(logits, axis=-1) nr_spk = logits.get_shape()[1] batch_size = logits.get_shape()[0] if self.lossconf['activation'] == 'sigmoid': logits = tf.sigmoid(logits) else: raise BaseException('Other activations not yet implemented') if self.lossconf['av_time'] == 'True': logits = tf.reduce_mean(logits, 2) targets = tf.concat([tf.ones([batch_size, nr_act_spk]), tf.zeros([batch_size, nr_spk-nr_act_spk])], -1) loss = tf.reduce_sum(tf.square(logits - targets)) norm = tf.to_float(batch_size * nr_spk) return loss, norm
mit
8,943,644,888,906,579,000
32.727273
115
0.672699
false
ikosenn/cray-cray
fummy.py
1
2626
""" Author: ikosenn This is a program to eliminate stale git branches. It checks last commits and based on the staleness threshold eliminates all stale branches Another CL function is provided to eliminate all available branches. You can also remove all branches that have already been merged to the main branch """ import os from datetime import datetime import click from sarge import capture_stdout import pytz from dateutil.parser import parse DEFAULT_BRANCH = 'master' # helper functions def get_time_difference(time): """ Computes the difference with todays time """ timezone = "Africa/Nairobi" branch_time = parse(time) current_time = datetime.now(pytz.timezone(timezone)) diff_days = (current_time - branch_time) return diff_days.days def cwd(path): os.chdir(path) @click.command() @click.option( '--threshold', '-t', default=10, prompt='What number of days should the threshold be? [10 days]') @click.option( 'branches', '--branch', '-b', default=DEFAULT_BRANCH, prompt='What branches should be excluded? [master]', multiple=True) @click.option( '--path', '-p', prompt='File path to the git repo?', type=click.Path(exists=True)) def fummy(threshold, branches, path): cwd(path) all_branches = capture_stdout('git branch') # remove spaces and any blank spaces temp = all_branches.stdout.text.replace( '*', '').replace(' ', '').split('\n') for branch in temp: if branch and branch not in branches: click.echo('Processing branch: {}'.format(branch)) p = capture_stdout( 'git show {} --format="%cI" --no-patch'.format(branch)) diff_days = get_time_difference(p.stdout.text) if diff_days > threshold: click.echo('Deleting {}'.format(branch)) p = capture_stdout( 'git branch -D {}'.format(branch)) click.echo(p.stdout.text) @click.command() @click.option('--filename', type=click.Path(exists=True)) @click.option('--default', '-d', default=DEFAULT_BRANCH) def kill_merged(default): """ Start by checking out to the master branch and then finding out the branches already merged to master and eliminating the buggage """ # git branch --merged master pass @click.group() def cli(): """ Command Line Interface tools loader for ``fummy`` These utilities help with deleting git branches older than the specified period """ pass cli.add_command(fummy) if __name__ == '__main__': cli()
mit
-3,121,794,362,118,456,300
25
76
0.639756
false
jairglez/intel-iot-refkit
meta-iotqa/lib/oeqa/runtime/connectivity/bluetooth/bluetooth.py
1
11137
import time import os import string from oeqa.utils.helper import shell_cmd_timeout class BTFunction(object): """ @class BTFunction """ log = "" def __init__(self, target): self.target = target # un-block software rfkill lock self.target.run('rfkill unblock all') self.target.run('killall gatttool') self.target.run('killall hcitool') def target_collect_info(self, cmd): """ @fn target_collect_info @param self @param cmd @return """ (status, output) = self.target.run(cmd) self.log = self.log + "\n\n[Debug] Command output --- %s: \n" % cmd self.log = self.log + output def target_hciconfig_init(self): ''' init target bluetooth by hciconfig commands @fn target_hciconfig_init @param self @return ''' (status, output) = self.target.run('hciconfig hci0 reset') assert status == 0, "reset hci0 fails, please check if your BT device exists" time.sleep(1) self.target.run('hciconfig hci0 up') self.target.run('hciconfig hci0 piscan') self.target.run('hciconfig hci0 noleadv') time.sleep(1) def set_leadv(self): ''' Get hci0 MAC address @fn get_bt_mac @param self @return ''' (status, output) = self.target.run('hciconfig hci0 leadv') time.sleep(2) assert status == 0, "Set leadv fail: %s" % (output) def get_bt_mac(self): ''' Get hci0 MAC address @fn get_bt_mac @param self @return ''' (status, output) = self.target.run('hciconfig hci0 | grep "BD Address"') return output.split()[2] def get_bt0_ip(self): ''' Get bt0 (ipv6) address @fn get_bt0_ip @param self @return ''' self.target_collect_info('ifconfig') (status, output) = self.target.run('ifconfig bt0 | grep "inet6 addr"') assert status == 0, "Get bt0 address failure: %s\n%s" % (output, self.log) return output.split('%')[0].split()[2] def get_name(self): ''' Get bt0 device name by bluetoothctl @fn get_name @param self @return ''' exp = os.path.join(os.path.dirname(__file__), "files/bt_get_name.exp") btmac = self.get_bt_mac() cmd = 'expect %s %s %s' % (exp, self.target.ip, btmac) (status, output) = shell_cmd_timeout(cmd) if type(output) is bytes: output = output.decode("ascii") assert status == 0, "Get hci0 name fails: %s" % output for line in output.splitlines(): if type(line) is bytes: line = line.decode('ascii') if "Controller %s" % btmac in line: return line.split()[3] return "" def enable_bluetooth(self): ''' enable bluetooth after testing @fn enable_bluetooth @param self @return ''' # Enable Bluetooth (status, output) = self.target.run('connmanctl enable bluetooth') assert status == 0, "Error messages: %s" % output time.sleep(1) def disable_bluetooth(self): ''' disable bluetooth after testing @fn disable_bluetooth @param self @return ''' (status, output) = self.target.run('connmanctl disable bluetooth') assert status == 0, "Error messages: %s" % output # sleep some seconds to ensure disable is done time.sleep(1) def ctl_power_on(self): '''bluetoothctl power on bluetooth device @fn ctl_power_on @param self @return ''' # start bluetoothctl, then input 'power on' exp = os.path.join(os.path.dirname(__file__), "files/power_on.exp") target_ip = self.target.ip status, output = shell_cmd_timeout('expect %s %s' % (exp, target_ip), timeout=200) if type(output) is bytes: output = output.decode("ascii") assert status == 2, "power on command fails: %s" % output def ctl_power_off(self): '''bluetoothctl power off bluetooth device @fn ctl_power_off @param self @return ''' # start bluetoothctl, then input 'power off' exp = os.path.join(os.path.dirname(__file__), "files/power_off.exp") target_ip = self.target.ip status, output = shell_cmd_timeout('expect %s %s' % (exp, target_ip), timeout=200) if type(output) is bytes: output = output.decode("ascii") assert status == 2, "power off command fails: %s" % output def ctl_visable_on(self): '''bluetoothctl enable visibility @fn ctl_visable_on @param self @return ''' # start bluetoothctl, then input 'discoverable on' exp = os.path.join(os.path.dirname(__file__), "files/discoverable_on.exp") target_ip = self.target.ip status, output = shell_cmd_timeout('expect %s %s' % (exp, target_ip), timeout=200) if type(output) is bytes: output = output.decode("ascii") assert status == 2, "discoverable on command fails: %s" % output def ctl_visable_off(self): '''bluetoothctl disable visibility @fn ctl_visable_off @param self @return ''' # start bluetoothctl, then input 'discoverable off' exp = os.path.join(os.path.dirname(__file__), "files/discoverable_off.exp") target_ip = self.target.ip status, output = shell_cmd_timeout('expect %s %s' % (exp, target_ip), timeout=200) if type(output) is bytes: output = output.decode("ascii") assert status == 2, "discoverable off command fails: %s" % output def insert_6lowpan_module(self): '''Insert BLE 6lowpan module @fn insert_6lowpan_module @param self @return ''' status, output = self.target.run('modprobe bluetooth_6lowpan') assert status == 0, "insert ble 6lowpan module fail: %s" % output # check lsmod, to see if the module is in status, output = self.target.run('lsmod') if "bluetooth_6lowpan" in output: pass else: self.target_collect_info('lsmod') assert False, "BLE 6lowpan module insert fails. %s" % self.log def enable_6lowpan_ble(self): '''Enable 6lowpan over BLE @fn enable_6lowpan_ble @param self @return ''' self.insert_6lowpan_module() status, output = self.target.run('echo 1 > /sys/kernel/debug/bluetooth/6lowpan_enable') assert status == 0, "Enable ble 6lowpan fail: %s" % output # check file number, it should be 1 status, output = self.target.run('cat /sys/kernel/debug/bluetooth/6lowpan_enable') if output == "1": pass else: self.target_collect_info('lsmod') assert False, "BLE 6lowpan interface is: %s\n%s" % (output, self.log) def disable_6lowpan_ble(self): '''Disable 6lowpan over BLE @fn disable_6lowpan_ble @param self @return ''' status, output = self.target.run('echo 0 > /sys/kernel/debug/bluetooth/6lowpan_enable') assert status == 0, "Disable ble 6lowpan fail: %s" % output # check file number, it should be 1 status, output = self.target.run('ifconfig') if "bt0" in output: self.target_collect_info('ifconfig') assert False, "Disable BLE 6lowpan fails: %s\n%s" % (output, self.log) else: pass def bt0_ping6_check(self, ipv6): ''' On main target, run ping6 to ping second's ipv6 address @fn bt0_ping6_check @param self @param ipv6: second target ipv6 address @return ''' cmd='ping6 -I bt0 -c 5 %s' % ipv6 (status, output) = self.target.run(cmd) assert status == 0, "Ping second target lowpan0 ipv6 address fail: %s" % output def bt0_ssh_check(self, ipv6): ''' On main target, ssh to second @fn bt0_ssh_check @param self @param ipv6: second target ipv6 address @return ''' # ssh root@<ipv6 address>%bt0 ssh_key = os.path.join(os.path.dirname(__file__), "files/refkit_qa_rsa") self.target.copy_to(ssh_key, "/tmp/") self.target.run("chmod 400 /tmp/refkit_qa_rsa") exp = os.path.join(os.path.dirname(__file__), "files/target_ssh.exp") exp_cmd = 'expect %s %s %s' % (exp, self.target.ip, ipv6) (status, output) = shell_cmd_timeout(exp_cmd) if type(output) is bytes: output = output.decode("ascii") assert status == 2, "Error messages: %s" % output def connect_6lowpan_ble(self, second): '''Build 6lowpan connection between taregts[0] and targets[1] over BLE @fn connect_6lowpan_ble @param self @param second: second target @return ''' self.enable_6lowpan_ble() second.enable_6lowpan_ble() success = 1 for i in range(3): # Second target does advertising second_mac = second.get_bt_mac() (status, output) = second.target.run('hciconfig hci0 leadv') time.sleep(1) # Self connects to second (status, output) = self.target.run('echo "connect %s 1" > /sys/kernel/debug/bluetooth/6lowpan_control' % second_mac) time.sleep(10) self.target_collect_info('hcitool con') assert status == 0, "BLE 6lowpan connection fails: %s\n%s" % (output, self.log) (status, output) = self.target.run('ifconfig') if 'bt0' in output: success = 0 break else: second.target.run('hciconfig hci0 reset') time.sleep(3) assert success == 0, "No bt0 generated: %s\n%s" % (output, self.log) def gatt_basic_check(self, btmac, point): '''Do basic gatt tool check points. @fn gatt_basic_check @param self @param btmac: remote advertising device BT MAC address @param point: a string for basic checking points. @return ''' # Local does gatttool commands if point == "connect": exp = os.path.join(os.path.dirname(__file__), "files/gatt_connect.exp") cmd = "expect %s %s %s" % (exp, self.target.ip, btmac) return shell_cmd_timeout(cmd, timeout=100) if point == "primary": cmd = "/tmp/gatttool -b %s --%s | grep '^attr handle'" % (btmac, point) elif point == "characteristics": cmd = "/tmp/gatttool -b %s --%s | grep '^handle'" % (btmac, point) elif point == "handle": cmd = "/tmp/gatttool -b %s --char-read -a 0x0002 | grep '02 03 00 00 2a'" % btmac else: assert False, "Wrong check point name, please check case" return self.target.run(cmd, timeout=20) ## # @} # @} ##
mit
-8,955,407,898,348,415,000
35.276873
128
0.56227
false
AutorestCI/azure-sdk-for-python
azure-mgmt-monitor/azure/mgmt/monitor/operations/action_groups_operations.py
1
19086
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- import uuid from msrest.pipeline import ClientRawResponse from .. import models class ActionGroupsOperations(object): """ActionGroupsOperations operations. :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An objec model deserializer. :ivar api_version: Client Api Version. Constant value: "2017-04-01". """ def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self.api_version = "2017-04-01" self.config = config def create_or_update( self, resource_group_name, action_group_name, action_group, custom_headers=None, raw=False, **operation_config): """Create a new action group or update an existing one. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param action_group_name: The name of the action group. :type action_group_name: str :param action_group: The action group to create or use for the update. :type action_group: ~azure.mgmt.monitor.models.ActionGroupResource :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: ActionGroupResource or ClientRawResponse if raw=true :rtype: ~azure.mgmt.monitor.models.ActionGroupResource or ~msrest.pipeline.ClientRawResponse :raises: :class:`ErrorResponseException<azure.mgmt.monitor.models.ErrorResponseException>` """ # Construct URL url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/actionGroups/{actionGroupName}' path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'actionGroupName': self._serialize.url("action_group_name", action_group_name, 'str'), 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct body body_content = self._serialize.body(action_group, 'ActionGroupResource') # Construct and send request request = self._client.put(url, query_parameters) response = self._client.send( request, header_parameters, body_content, **operation_config) if response.status_code not in [200, 201]: raise models.ErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('ActionGroupResource', response) if response.status_code == 201: deserialized = self._deserialize('ActionGroupResource', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized def get( self, resource_group_name, action_group_name, custom_headers=None, raw=False, **operation_config): """Get an action group. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param action_group_name: The name of the action group. :type action_group_name: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: ActionGroupResource or ClientRawResponse if raw=true :rtype: ~azure.mgmt.monitor.models.ActionGroupResource or ~msrest.pipeline.ClientRawResponse :raises: :class:`ErrorResponseException<azure.mgmt.monitor.models.ErrorResponseException>` """ # Construct URL url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/actionGroups/{actionGroupName}' path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'actionGroupName': self._serialize.url("action_group_name", action_group_name, 'str'), 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.get(url, query_parameters) response = self._client.send(request, header_parameters, **operation_config) if response.status_code not in [200]: raise models.ErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('ActionGroupResource', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized def delete( self, resource_group_name, action_group_name, custom_headers=None, raw=False, **operation_config): """Delete an action group. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param action_group_name: The name of the action group. :type action_group_name: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: None or ClientRawResponse if raw=true :rtype: None or ~msrest.pipeline.ClientRawResponse :raises: :class:`ErrorResponseException<azure.mgmt.monitor.models.ErrorResponseException>` """ # Construct URL url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/actionGroups/{actionGroupName}' path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'actionGroupName': self._serialize.url("action_group_name", action_group_name, 'str'), 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.delete(url, query_parameters) response = self._client.send(request, header_parameters, **operation_config) if response.status_code not in [200, 204]: raise models.ErrorResponseException(self._deserialize, response) if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response def list_by_subscription_id( self, custom_headers=None, raw=False, **operation_config): """Get a list of all action groups in a subscription. :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: An iterator like instance of ActionGroupResource :rtype: ~azure.mgmt.monitor.models.ActionGroupResourcePaged[~azure.mgmt.monitor.models.ActionGroupResource] :raises: :class:`ErrorResponseException<azure.mgmt.monitor.models.ErrorResponseException>` """ def internal_paging(next_link=None, raw=False): if not next_link: # Construct URL url = '/subscriptions/{subscriptionId}/providers/microsoft.insights/actionGroups' path_format_arguments = { 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') else: url = next_link query_parameters = {} # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.get(url, query_parameters) response = self._client.send( request, header_parameters, **operation_config) if response.status_code not in [200]: raise models.ErrorResponseException(self._deserialize, response) return response # Deserialize response deserialized = models.ActionGroupResourcePaged(internal_paging, self._deserialize.dependencies) if raw: header_dict = {} client_raw_response = models.ActionGroupResourcePaged(internal_paging, self._deserialize.dependencies, header_dict) return client_raw_response return deserialized def list_by_resource_group( self, resource_group_name, custom_headers=None, raw=False, **operation_config): """Get a list of all action groups in a resource group. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: An iterator like instance of ActionGroupResource :rtype: ~azure.mgmt.monitor.models.ActionGroupResourcePaged[~azure.mgmt.monitor.models.ActionGroupResource] :raises: :class:`ErrorResponseException<azure.mgmt.monitor.models.ErrorResponseException>` """ def internal_paging(next_link=None, raw=False): if not next_link: # Construct URL url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/actionGroups' path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') else: url = next_link query_parameters = {} # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.get(url, query_parameters) response = self._client.send( request, header_parameters, **operation_config) if response.status_code not in [200]: raise models.ErrorResponseException(self._deserialize, response) return response # Deserialize response deserialized = models.ActionGroupResourcePaged(internal_paging, self._deserialize.dependencies) if raw: header_dict = {} client_raw_response = models.ActionGroupResourcePaged(internal_paging, self._deserialize.dependencies, header_dict) return client_raw_response return deserialized def enable_receiver( self, resource_group_name, action_group_name, receiver_name, custom_headers=None, raw=False, **operation_config): """Enable a receiver in an action group. This changes the receiver's status from Disabled to Enabled. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param action_group_name: The name of the action group. :type action_group_name: str :param receiver_name: The name of the receiver to resubscribe. :type receiver_name: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: None or ClientRawResponse if raw=true :rtype: None or ~msrest.pipeline.ClientRawResponse :raises: :class:`ErrorResponseException<azure.mgmt.monitor.models.ErrorResponseException>` """ enable_request = models.EnableRequest(receiver_name=receiver_name) # Construct URL url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/actionGroups/{actionGroupName}/subscribe' path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'actionGroupName': self._serialize.url("action_group_name", action_group_name, 'str'), 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct body body_content = self._serialize.body(enable_request, 'EnableRequest') # Construct and send request request = self._client.post(url, query_parameters) response = self._client.send( request, header_parameters, body_content, **operation_config) if response.status_code not in [200, 409]: raise models.ErrorResponseException(self._deserialize, response) if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response
mit
-7,601,957,632,301,776,000
46.125926
152
0.648643
false
molly/women-social-reformers-on-wikipedia
gather.py
1
2451
# Copyright (c) 2015–2016 Molly White # # Permission is hereby granted, free of charge, to any person obtaining a copy of this software # and associated documentation files (the "Software"), to deal in the Software without # restriction, including without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all copies or # substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING # BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, # DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import requests def load_list(filename): """Load the list of women from file.""" with open(filename, "r", encoding="utf-8") as f: lines = f.readlines() return format_names([line.strip() for line in lines]) def format_names(women): """Format the names for searching.""" formatted = [] for name in women: split = name.split(",") formatted.append(" ".join([split[1].strip(), split[0].strip()]) if len(split) == 2 else split[0].strip()) return formatted def search(women): """Do the search on the list of women.""" for woman in women: find_page(woman) def find_page(search_term): """Attempt to find a matching Wikipedia article for the given woman.""" api_params = {"action": "opensearch", "search": search_term, "limit": 1, "namespace": 0, "format": "json"} r = requests.get(api_url, params=api_params, headers=headers) if r: print(r.json()) else: print(None) def main(): women = load_list("women.txt") search(women) if __name__ == "__main__": headers = {'user-agent': "women-social-reformers-on-wikipedia: https://github.com/molly/women-social-reformers-" "on-wikipedia"} api_url = "https://en.wikipedia.org/w/api.php" main()
mit
6,424,813,447,681,501,000
36.692308
116
0.665578
false
plin1112/pysimm
pysimm/cassandra.py
1
67253
# ****************************************************************************** # pysimm.cassandra module # ****************************************************************************** # # ****************************************************************************** # License # ****************************************************************************** # The MIT License (MIT) # # Copyright (c) 2017 Alexander Demidov, Michael E. Fortunato, Coray M. Colina # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. from StringIO import StringIO from subprocess import call, Popen, PIPE import os import re import numpy as np import random import logging import types from collections import Iterable, OrderedDict from pysimm import system from string import ascii_uppercase from pydoc import locate DATA_PATH = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../dat/csndra_data')) KCALMOL_2_K = 503.22271716452 CASSANDRA_EXEC = os.environ.get('CASSANDRA_EXEC') # Creating a logger instance and send its output to console 'deafault' logging.basicConfig(level=logging.INFO, datefmt='%H:%M:%S', format='%(asctime)s [%(levelname)s]: %(message)s') DEFAULT_PARAMS = { 'Temperature_Info': 300, 'Pressure_Info': 1, 'Rcutoff_Low': 0.1 } class MCSimulation(object): """pysimm.cassandra.MCSimulation Object containing the settings and the logic necessary to partially set-up an abstract Monte Carlo simulation to be submitted to the CASSANDRA software. The object also will include the simulation results once the simulations are finished. Attributes: mc_sst (:class:`~pysimm.cassandra.McSystem`) : describes all molecules to be inserted by CASSANDRA init_sst (:class:`~pysimm.system.System`) : describes the optional initial fixed molecular configuration for MC simulations (default: empty cubic box with 1 nm side length). If the particles in the system are not attributed with the flag `is_fixed` all of them are considered to be fixed, and will be marked with this flag, otherwise all particles with is_fixed=False will be removed. Keyword Args: out_folder (str) : the relative path of the simulation results (all .dat, .mcf, as well as .chk, ... files will go there). If the folder does not exist it will be created with 0755 permissions. props_file (str) : the name of the .inp file. Note: Other keyword arguments that are accepted are the GCMC simulation settings. The keywords of the settings are the same as they are described in CASSANDRA specification but without # symbol. **For example**: the keyword argument `Run_Name='my_simulation'` will set `#Run_Name` setting in CASSANDRA input file to `my_simulation` value Parameters: props (dictionary) : include all simulation settings to be written to the CASSANDRA .inp file input (str) : text stream that will be written to the CASSANDRA .inp file tot_sst (:class:`~pysimm.system.System`) : object containing the results of CASSANDRA simulations """ def __init__(self, mc_sst=None, init_sst=None, **kwargs): global DATA_PATH # Initializing CASSANDRA input stream, empty at the beginning self.input = '' # Initializing dictionary that contains records that directly will be sent to the .inp file self.props = OrderedDict() self.logger = logging.getLogger('MC Simulation') # Reading default properties of the GCMC simulations def_dat = Cassandra(system.System()).read_input(os.path.join(DATA_PATH, 'mc_default.inp')) tmp = kwargs.get('out_folder') # Folder for the results and temporary files if tmp: self.out_folder = tmp if os.path.isabs(tmp): self.out_folder = os.path.relpath(tmp) else: self.out_folder = os.getcwd() if not os.path.exists(self.out_folder): os.makedirs(self.out_folder, mode=0755) prefix = kwargs.get('Run_Name', def_dat['Run_Name']) self.props['Run_Name'] = InpSpec('Run_Name', os.path.join(self.out_folder, prefix), '') self.props_file = os.path.join(self.out_folder, kwargs.get('props_file', '')) # Simple (one-value) dynamic properties self.props['Temperature_Info'] = InpSpec('Temperature_Info', kwargs.get('Temperature_Info'), DEFAULT_PARAMS['Temperature_Info']) self.props['Pair_Energy'] = InpSpec('Pair_Energy', kwargs.get('Pair_Energy'), def_dat['Pair_Energy']) self.props['Rcutoff_Low'] = InpSpec('Rcutoff_Low', kwargs.get('Rcutoff_Low'), def_dat['Rcutoff_Low']) self.props['Mixing_Rule'] = InpSpec('Mixing_Rule', kwargs.get('Mixing_Rule'), def_dat['Mixing_Rule']) self.props['Seed_Info'] = InpSpec('Seed_Info', kwargs.get('Seed_Info'), [random.randint(int(1e+7), int(1e+8 - 1)), random.randint(int(1e+7), int(1e+8 - 1))]) # Multiple-value one/many line dynamic properties self.props['Run_Type'] = InpSpec('Run_Type', kwargs.get('Run_Type'), def_dat['Run_Type']) self.props['Charge_Style'] = InpSpec('Charge_Style', kwargs.get('Charge_Style'), def_dat['Charge_Style']) self.props['VDW_Style'] = InpSpec('VDW_Style', kwargs.get('VDW_Style'), def_dat['VDW_Style']) self.props['Simulation_Length_Info'] = InpSpec('Simulation_Length_Info', kwargs.get('Simulation_Length_Info'), def_dat['Simulation_Length_Info'], **{'write_headers': True, 'new_line': True}) self.props['CBMC_Info'] = InpSpec('CBMC_Info', kwargs.get('CBMC_Info'), def_dat['CBMC_Info'], **{'write_headers': True, 'new_line': True}) self.props['Box_Info'] = InpSpec('Box_Info', kwargs.get('Box_Info'), def_dat['Box_Info'], **{'new_line': True}) self.props['Property_Info 1'] = InpSpec('Property_Info 1', kwargs.get('Property_Info'), None, **{'new_line': True}) # Setting the simulation total system if init_sst: self.tot_sst = init_sst.copy() self.tot_sst.center('box', [0, 0, 0], True) # the center of the calculation box should be at origin else: self.logger.warning('The frame generating system for Monte-Carlo simulations is not set. ' 'Creating empty cubic box of 1 nm size') self.tot_sst = system.System() self.tot_sst.forcefield = 'trappe/amber' self.tot_sst.dim = system.Dimension(dx=10, dy=10, dz=10) # Molecule configuration files describing all species of the system. # They are **absolutely** needed to start calculation mol_files = OrderedDict() # Some necessary verification of obtained system # TODO: check the forcefield to be sure that it is claas 1 if False: self.logger.error('CASSANDRA supports only 1-st class force fields') exit(1) self.tot_sst.zero_charge() # the sum of the charges should necessary be 0 # Creating the system of fixed molecules self.fxd_sst_mcfile = None self.fxd_sst = kwargs.get('fixed_sst') if self.tot_sst.particles: tmp = self.tot_sst.copy() for p in tmp.particles: if not p.is_fixed: tmp.particles.remove(p.tag) tmp.remove_spare_bonding() self.fxd_sst = tmp self.fxd_sst_mcfile = os.path.join(self.out_folder, 'fixed_syst.mcf') mol_files['file1'] = [self.fxd_sst_mcfile, 1] # Setting up the Monte Carlo system self.mc_sst = mc_sst if mc_sst: mc_sst.file_store = self.out_folder mol_files = mc_sst.update_props(mol_files) if kwargs.get('Molecule_Files'): mol_files = OrderedDict(sorted(kwargs.get('Molecule_Files').items())) # Raising an error and stop execution if no MCF information in one or another way is provided if (mc_sst is None) and (not kwargs.get('Molecule_Files')): self.logger.error('The molecular configuration files of gas molecules for simulation are not set. ' 'Nothing to simulate. Exiting...') exit(0) self._n_spec = len(mol_files) self.props['Nbr_Species'] = InpSpec('Nbr_Species', self._n_spec, self._n_spec) self.props['Molecule_Files'] = InpSpec('Molecule_Files', mol_files, None, **{'new_line': True}) # Synchronzing "start type" .inp record self.fxd_sst_xyz = '' pops_list = [0] * self._n_spec start_type = 'make_config' if self.fxd_sst: pops_list[0] = 1 self.fxd_sst_xyz = os.path.join(self.out_folder, 'fixed_syst.xyz') start_type = 'read_config' start_conf_dict = OrderedDict([('start_type', start_type), ('species', pops_list), ('file_name', self.fxd_sst_xyz)]) self.props['Start_Type'] = InpSpec('Start_Type', kwargs.get('Start_Type'), start_conf_dict) # Synchronzing Fragment files: frag_files = OrderedDict() if mc_sst: mc_sst.temperature = self.props['Temperature_Info'].value frag_files = mc_sst.update_frag_record(frag_files) if kwargs.get('Fragment_Files'): frag_files = OrderedDict(sorted(kwargs.get('Fragment_Files').items())) if (mc_sst is None) and (not kwargs.get('Fragment_Files')): self.logger.error('Cannot set the fragment files of gas molecules for simulation') exit(1) self.props['Fragment_Files'] = InpSpec('Fragment_Files', frag_files, None, **{'new_line': True}) def write(self): """pysimm.cassandra.MCSimulation.write Iterates through the :class:`~MCSimulation.props` dictionary creating the text for correct CASSANDRA input """ for key in self.props.keys(): if self.props[key].value is not None: self.input += '{:}\n'.format(self.props[key].to_string()) self.input += '\nEND' # Initializing output stream self.logger.info('Writing CASSANDRA .inp file to "{:}"...'.format(self.props_file)) out_stream = open(self.props_file, 'w') out_stream.write('{:}'.format(self.input)) out_stream.close() self.logger.info('File: "{:}" was created sucsessfully'.format(self.props_file)) def group_by_id(self, group_key='matrix'): """pysimm.cassandra.MCSimulation.group_by_id Method groups the atoms of the system :class:`~MCSimulation.tot_sst` by a certain property. Will iterate through all atoms in the system and return indexes of only those atoms that match the property. Currently supports 3 properties defined by the input keyword argument argument. Keyword Args: group_key (str): text constant defines the property to match. Possible keywords are: (1) `matrix` -- (default) indexes of the atoms in :obj:`~MCSimulation.fxd_sst` (2) `rigid` -- indexes of all atoms that have rigid atomic bonds. It is assumed here that rigid and nonrigid atoms can interact only through intermolecular forces (3) `nonrigid` -- opposite of previous, indexes of all atoms that have nonrigid atomic bonds Returns: str: string in format `a1:b1 a2:b2 ...` where all indexes inside `[ak, bk]` belongs to the selected group and array of the form `[[a1, b1], [a2, b2], ...]` """ fxd_sst_idxs = [] if self.fxd_sst: fxd_sst_idxs = range(1, len(self.fxd_sst.particles) + 1) # Behaviour depending on type of particles to check check = lambda x: x if group_key.lower() == 'nonrigid': check = lambda x: not x.is_rigid elif group_key.lower() == 'rigid': check = lambda x: x.is_rigid elif group_key.lower() == 'matrix': check = lambda x: x.tag in fxd_sst_idxs idx_array = [[-1, -1]] for p in self.tot_sst.particles: if check(p): if idx_array[-1][0] > 0: if abs(p.tag - idx_array[-1][1]) > 1: idx_array.append([p.tag, p.tag]) else: idx_array[-1][1] = p.tag else: idx_array[-1] = [p.tag, p.tag] idx_string = '' for t in idx_array: if t[1] - t[0] > 1: idx_string += str(t[0]) + ':' + str(t[1]) + ' ' return idx_string, idx_array def upd_simulation(self): """pysimm.cassandra.MCSimulation.upd_simulation Updates the :class:`~MCSimulation.tot_sst` field using the `MCSimulation.props['Run_Name'].chk` file. Will try to parse the checkpoint file and read the coordinates of the molecules inserted by CASSANDRA. If neither of the molecules from the :class:`~MCSimulation.mc_sst` can be fit to the text that was read the method will raise an exception. The fitting method: :class:`~McSystem.make_system` assumes that different molecules inserted by CASSANDRA have the same order of the atoms. """ fname = '{:}{:}'.format(self.props['Run_Name'].value, '.chk') self.logger.info('Updating MC system from the CASSANDRA {:} file...'.format(fname)) if os.path.isfile(fname): try: with open(fname, 'r') as inp: lines = inp.read() # Define the starting index of the lines with inserted atoms start_ind = lines.find('total number of molecules') end_ind = start_ind + lines[start_ind:-1].find('****', 1) count_info = lines[start_ind:end_ind].split('\n') offset = 1 if self.fxd_sst: tmp = count_info[1].split() offset += int(tmp[1]) * len(self.fxd_sst.particles) # Grab the lines with inserted atoms start_ind = lines.find('coordinates for all the boxes') all_coord_lines = lines[start_ind:-1].split('\n') inp.close() gas_lines = all_coord_lines[offset:] if len(gas_lines) > 0: if self.fxd_sst: self.tot_sst = self.fxd_sst.copy() self.tot_sst.add(self.mc_sst.make_system(gas_lines), change_dim=False) self.logger.info('Simulation system successfully updated') else: self.logger.info('Final MC configuration has 0 new particles the initial system remains the same') except IndexError: self.logger.error('Cannot fit the molecules from the CASSANDRA file to the PySIMM system') else: self.logger.error('Cannot find the CASSANDRA checkpoint file to update simulation. ' 'Probably it cannot be written by CASSANDRA to the place you specified') def __check_params__(self): """pysimm.cassandra.MCSimulation.__check_params__ Private method designed for update the fields of the simulation object to make them conformed with each other """ # Sync the simulation box parameters dx, dy, dz = self.tot_sst.dim.size() if (dx == dy) and (dy == dz): box_type = 'cubic' box_dims = str(dx) else: box_type = 'orthogonal' box_dims = '{0:} {1:} {2:}'.format(dx, dy, dz) upd_vals = OrderedDict([('box_count', 1), ('box_type', box_type), ('box_size', box_dims)]) if ('Box_Info' in self.props.keys()) and isinstance(self.props['Box_Info'], InpSpec): self.props['Box_Info'] = InpSpec('Box_Info', upd_vals, None, **{'new_line': True}) else: self.props['Box_Info'] = upd_vals tmp = self.props['Box_Info'].value['box_size'].split() if self.props['Box_Info'].value['box_type'] == 'cubic': tmp = tmp + tmp + tmp self.tot_sst.dim = system.Dimension(dx=float(tmp[0]), dy=float(tmp[1]), dz=float(tmp[2])) # Sync of the volume change frequency in equilibration regime if 'Prob_Volume' in self.props.keys(): if self.props['Prob_Volume'] is None: self.props['Run_Type'].value['steps'] = self.props['Run_Type'].value['steps'][0] def __write_chk__(self, out_file): """pysimm.cassandra.MCSimulation.__write_chk__ Creates the CASSANDRA checkpoint file basing on the information from the `~MCSimulation.tot_sst` field """ # Initializing output stream if out_file == 'string': out_stream = StringIO() else: out_stream = open(out_file, 'w+') blk_separ = ' {:*^75}\n' # Writing Translation/rotation/... info out_stream.write(blk_separ.format('Translation,rotation, dihedral, angle distortion')) tmplate = '{t[0]$$}{t[1]$$}{t[2]$$}{t[3]$$}{t[4]$$}\n' molecules = self.props['Molecule_Files'].value for m, i in zip(molecules, range(len(molecules))): out_stream.write(tmplate.replace('$$', ':>6d').format(t=[i + 1, 0, 0, 0, 0])) out_stream.write(tmplate.replace('$$', ':>6d').format(t=[i + 1, 0, 0, 0, 0])) out_stream.write('{t[0]:>23.14E}{t[2]:>23.14E}{t[2]:>23.14E}\n'.format(t=[0, 0, 0])) out_stream.write('{0:>12d}{0:>12d}\n'.format(0, 0)) # Small section with total # of MC trials -- it is 0 at the beginning out_stream.write(blk_separ.format('# of MC steps')) out_stream.write('{:>12d}\n'.format(0)) # Writing Box-info information out_stream.write(blk_separ.format('Box info')) tmp = self.props['Box_Info'].value['box_size'] x, y, z = 0, 0, 0 bx_type = None if isinstance(tmp, types.ListType): if len(tmp) > 3: x, y, z = tmp[0], tmp[1], tmp[2] elif isinstance(tmp, int) or isinstance(tmp, float): x, y, z = tmp, tmp, tmp else: exit(0) # First 0 here correspond to the # of trials out_stream.write('{0:>12d}\n{1:<18.10f}\n{2:}\n'.format(0, x * y * z, self.props['Box_Info'].value['box_type'])) tmpl = '{t[0]&&}{t[1]&&}{t[2]&&}\n' tmp = np.diag([x, y, z]) for lines in tmp: out_stream.write((tmpl.replace('&&', ':^22.14f')).format(t=lines)) tmp = np.diag([1 / x, 1 / y, 1 / z]) for lines in tmp: out_stream.write((tmpl.replace('&&', ':^22.8f')).format(t=lines)) out_stream.write('{:>18.12f}\n'.format(0)) # Creating seeds out_stream.write(blk_separ.format('SEEDS')) out_stream.write('{t[0]:>12d}{t[1]:>12d}{t[2]:>12d}\n{t[3]:>12d}{t[4]:>12d}\n'.format( t=np.random.random_integers(int(1e+7), int(1e+8 - 1), 5))) # Writing total number of molecules by species out_stream.write(blk_separ.format('Info for total number of molecules')) out_stream.write('{0:>11d}{1:>11d}\n'.format(1, 1)) # Currentely only one polymer "molecule" in the simulation for i in range(1, len(molecules)): out_stream.write('{0:>11d}{1:>11d}\n'.format(i + 1, 0)) out_stream.write(blk_separ.format('Writing coordinates of all boxes')) # Writing coordinates of atoms in all boxes line_template = '{l[0]:<5}{l[1]:<25.15f}{l[2]:<25.15f}{l[3]:<25.15f}{l[4]:>10d}\n' for parts in self.tot_sst.particles: try: out_stream.write(line_template.format(l=[parts.type.name, parts.x, parts.y, parts.z, 1])) except: continue out_stream.close() class GCMC(MCSimulation): """pysimm.cassandra.GCMC Initiates the specific type of Monte Carlo simulations for CASSANDRA: simulations using Grand-Canonical ensemble of particles (constant volume-temperature-chemical potential, muVT). See :class:`~pysimm.cassandra.MCSimulation` for the detailed description of the properties. """ def __init__(self, mc_sst=None, init_sst=None, **kwargs): MCSimulation.__init__(self, mc_sst, init_sst, **kwargs) self.logger.name = 'GCMC' self.props['Sim_Type'] = InpSpec('Sim_Type', 'GCMC', 'gcmc') # Path for all intermediate Cassandra files and results self.props_file = os.path.join(self.out_folder, kwargs.get('props_file', 'gcmc_input.inp')) add = 0 if self.fxd_sst and self.fxd_sst.particles.count: add = 1 self.props['Chemical_Potential_Info'] = InpSpec('Chemical_Potential_Info', kwargs.get('chem_pot'), -30 * (self._n_spec - add)) # Order of the next four items is IMPORTANT! Check the CASSANDRA spec file for further info def_init_prob = 0.25 limits = [0.3] * self._n_spec if self.fxd_sst: limits[0] = 0 self.props['Prob_Translation'] = InpProbSpec('Prob_Translation', kwargs.get('Prob_Translation'), OrderedDict([('tot_prob', def_init_prob), ('limit_vals', limits)]), **{'new_line': True, 'indicator': 'start'}) tps = ['cbmc'] * self._n_spec if self.fxd_sst: tps[0] = 'none' self.props['Prob_Insertion'] = InpProbSpec('Prob_Insertion', kwargs.get('Prob_Insertion'), OrderedDict([('tot_prob', def_init_prob), ('types', tps)]), **{'new_line': True}) self.props['Prob_Deletion'] = InpProbSpec('Prob_Deletion', kwargs.get('Prob_Deletion'), def_init_prob) max_ang = [180] * self._n_spec if self.fxd_sst: max_ang[0] = 0 self.props['Prob_Rotation'] = InpProbSpec('Prob_Rotation', kwargs.get('Prob_Rotation'), OrderedDict([('tot_prob', def_init_prob), ('limit_vals', max_ang)]), **{'new_line': True, 'indicator': 'end'}) class NVT(MCSimulation): """pysimm.cassandra.NVT Initiates the specific type of Monte Carlo simulations for CASSANDRA: simulations using Canonical ensemble of particles (constant volume-temperature-number of particles, NVT). See :class:`~pysimm.cassandra.MCSimulation` for the detailed description of the properties. """ def __init__(self, mc_sst=None, init_sst=None, **kwargs): MCSimulation.__init__(self, mc_sst, init_sst, **kwargs) self.logger.name = 'NVT' self.props_file = os.path.join(self.out_folder, kwargs.get('props_file', 'nvt-mc_input.inp')) self.props['Sim_Type'] = InpSpec('Sim_Type', 'nvt_mc', 'nvt_mc') move_probs = [1, 1, 1] limits = [0.3] * self._n_spec if self.fxd_sst: limits[0] = 0 self.props['Prob_Translation'] = InpProbSpec('Prob_Translation', kwargs.get('Prob_Translation'), OrderedDict([('tot_prob', move_probs[0]), ('limit_vals', limits)]), **{'new_line': True, 'indicator': 'start'}) sub_probs = [1] * self._n_spec if self.fxd_sst: sub_probs[0] = 0 sm = sum(sub_probs) sub_probs = [s / sm for s in sub_probs] self.props['Prob_Regrowth'] = InpProbSpec('Prob_Regrowth', kwargs.get('Prob_Regrowth'), OrderedDict([('tot_prob', move_probs[1]), ('sub_probs', sub_probs)]), **{'new_line': True}) max_ang = [180] * self._n_spec if self.fxd_sst: max_ang[0] = 0 self.props['Prob_Rotation'] = InpProbSpec('Prob_Rotation', kwargs.get('Prob_Rotation'), OrderedDict([('tot_prob', move_probs[2]), ('limit_vals', max_ang)]), **{'new_line': True, 'indicator': 'end'}) class NPT(MCSimulation): """pysimm.cassandra.NPT Initiates the specific type of Monte Carlo simulations for CASSANDRA: simulations using Isobaric-Isothermal ensemble of particles (NPT). See :class:`~pysimm.cassandra.MCSimulation` for the detailed description of the properties. """ def __init__(self, mc_sst=None, init_sst=None, **kwargs): MCSimulation.__init__(self, mc_sst, init_sst, **kwargs) # Initialising object attributes self.logger.name = 'NPT' self.props_file = os.path.join(self.out_folder, kwargs.get('props_file', 'npt-mc_input.inp')) # Initialising simulation-specific props attribute self.props['Sim_Type'] = InpSpec('Sim_Type', 'npt_mc', 'npt_mc') self.props['Pressure_Info'] = InpSpec('Pressure_Info', kwargs.get('Pressure_Info'), DEFAULT_PARAMS['Pressure_Info']) move_probs = [.34, .02, .32, .32] limits = [0.3] * self._n_spec if self.fxd_sst: limits[0] = 0 self.props['Prob_Translation'] = InpProbSpec('Prob_Translation', kwargs.get('Prob_Translation'), OrderedDict([('tot_prob', move_probs[0]), ('limit_vals', limits)]), **{'new_line': True, 'indicator': 'start'}) vol_margins = 0.1 * self.props['Box_Info'].value['box_size'] self.props['Prob_Volume'] = InpProbSpec('Prob_Volume', kwargs.get('Prob_Volume'), OrderedDict([('tot_prob', move_probs[1]), ('types', vol_margins)]), **{'new_line': True}) sub_probs = [1] * self._n_spec if self.fxd_sst: sub_probs[0] = 0 sm = sum(sub_probs) sub_probs = [s / sm for s in sub_probs] self.props['Prob_Regrowth'] = InpProbSpec('Prob_Regrowth', kwargs.get('Prob_Regrowth'), OrderedDict([('tot_prob', move_probs[2]), ('sub_probs', sub_probs)]), **{'new_line': True}) max_ang = [180] * self._n_spec if self.fxd_sst: max_ang[0] = 0 self.props['Prob_Rotation'] = InpProbSpec('Prob_Rotation', kwargs.get('Prob_Rotation'), OrderedDict([('tot_prob', move_probs[3]), ('limit_vals', max_ang)]), **{'new_line': True, 'indicator': 'end'}) class InpSpec(object): """pysimm.cassandra.InpSpec Represents the most common object used for carrying one logical unit of the CASSANDRA simulation options Parameters: key (str) : the keyword of the simulation option (literally the string that goes after the # sign in CASSANDRA .inp file) value (object) : numerical or text values of the particular simulation option structured in a certain way. Here goes only the values that are wished to be changed (it might be just one field of a big dictionary) default (object) : the most complete default description of the simulation option Keyword Args: write_headers (boolean): if the :obj:`~value` is dictionary defines whether the dictionary keys should be written to the output new_line (boolean): if the :obj:`~value` is iterable defines whether each new element will be written to the new line """ def __init__(self, key, value, default, **kwargs): self.key = key self.write_headers = kwargs.get('write_headers') self.is_new_line = kwargs.get('new_line') self.value = value if value: if isinstance(default, types.DictType): # Add from default structure all properties that were not defined by user for ky in value.keys(): default[ky] = value[ky] self.value = default else: self.value = value elif value == []: self.value = [] else: # If nothing was passed write default self.value = default def to_string(self): """pysimm.cassandra.InpSpec.to_string Creates the proper text representation of the property stored in the :obj:`~value` field Returns: str: formatted text string """ if self.value is not None: result = '# {:}\n'.format(self.key) # Strings if isinstance(self.value, types.StringTypes): result += str(self.value) # Dictionaries elif isinstance(self.value, types.DictType): for ks in list(self.value.keys()): if self.write_headers: result += ks + ' ' tmp = self.value[ks] if (isinstance(tmp, Iterable)) & (not isinstance(tmp, types.StringTypes)): result += ' '.join(str(p) for p in tmp) else: result += str(tmp) if self.is_new_line: result += '\n' else: result += ' ' result = result[:-1] # Remove the very last new line character # Lists elif isinstance(self.value, Iterable): for elem in self.value: if isinstance(elem, Iterable): subresult = '' for subelem in elem: subresult = subresult + str(subelem) + ' ' else: subresult = str(elem) + ' ' result += subresult # Simple types else: result += str(self.value) result += '\n!{:^^20}\n'.format('') return result class InpProbSpec(InpSpec): """pysimm.cassandra.InpSpec Extension of the :class:`~InpSpec` class that takes into account special representation of the movement probabilities in the CASSANDRA input file. """ def __init__(self, key, value, default, **kwargs): super(InpProbSpec, self).__init__(key, value, default, **kwargs) def to_string(self): tmp = super(InpProbSpec, self).to_string() if self.key == 'Prob_Translation': tmp = '# Move_Probability_Info\n\n' + tmp elif self.key == 'Prob_Rotation': tmp += '\n# Done_Probability_Info\n' return tmp class McSystem(object): """pysimm.cassandra.McSystem Wrapper around the list of :class:`~pysimm.system.System` objects. Each element in the list represents single molecule of a different specie that will be used during MC simulations. Additionally, the object is responsible for creating .dat and .mcf files needed for the simulation and reading back the CASSANDRA simulation results. Attributes: sst (list of :class:`~pysimm.system.System`) : items representing single molecules of different species to be inserted by CASSANDRA. If the sst is a list (not a single value) it is assumed that all of the following properties are synchronized with it by indexes. chem_pot (list of int) : chemical potential for each specie [Joule/mol] Keyword Args: max_ins (list of int) : defines the highest possible number of molecules of corresponding specie. Basing on these values CASSANDRA allocates memory for simulations. (default: 5000). is_rigid (list of boolean): defines whether the atoms in the particular molecule should be marked as rigid or not. **Important!** In current implementation the module doesn't support flexible molecule angles, so the `is_rigid=False` is designed to be used exclusively for **single bead** molecules. Parameters: made_ins (list of int) : number of particles of each specie inserted by CASSANDRA. mcf_file (list of str) : defines full relative names of molecule configuration files **(.mcf)** required by CASSANDRA. Files will be created automatically. frag_file (list of str) : defines full relative names of possible relative configuration files **(.dat)** required by CASSANDRA. Files will be created automatically. """ def __init__(self, sst, **kwargs): self.logger = logging.getLogger('MC_SYSTEM') self.sst = make_iterable(sst) for sst in self.sst: # Checking that the force-field of the input system is of the class-1 as it is direct CASSANDRA restriction if isinstance(sst, system.System): sst.zero_charge() sst.add_particle_bonding() if sst.ff_class: if not (sst.ff_class == '1'): self.logger.error('Currently cassandra supports only with **Type-I** force fields. ' 'The PYSIMM systems you provided are of the different types' 'Exiting...') exit(1) else: self.logger.info('The Force-Field type of the system is not defined. ' 'Assuming it is **Type-1** force field') sst.ff_class = '1' if not all([pt.name for pt in sst.particle_types]): self.logger.error('The name of at least one particle type in MC system is not defined. ' 'Will not be able to map particles back after the CASSANDRA simulations. ' '\nPlease, setup the names for all particle types for your MC system') exit(1) # Decorating the system with bonds_fixed flag and angle_fixed flag for bt in sst.bond_types: bt.is_fixed = True for at in sst.angle_types: if at.k > 70: at.is_fixed = True self.file_store = os.getcwd() self.max_ins = make_iterable(kwargs.get('max_ins', 5000)) self.is_rigid = make_iterable(kwargs.get('is_rigid', [True] * len(self.sst))) self.made_ins = [0] * len(self.sst) self.mcf_file = [] self.frag_file = [] self.temperature = None def update_props(self, props): """pysimm.cassandra.McSystem.update_props For each specie in the system creates the .mcf file required for CASSANDRA simulation. Args: props (dictionary) : contains the .mcf file names and maximally allowed number of molecules insertions. The dictionary is to be assigned to 'Molecule_Files' property of the MC simulation Returns: props: updated input dictionary """ # Generate correct .mcf files al_ind = 0 for (sstm, count) in zip(self.sst, range(len(self.sst))): fullfile = os.path.join(self.file_store, '{:}{:}{:}'.format('particle', str(count + 1), '.mcf')) for p_type in sstm.particle_types: if p_type.elem and (not p_type.real_elem): p_type.real_elem = p_type.elem p_type.elem = ascii_uppercase[int(al_ind / 10)] + str(al_ind % 10) al_ind += 1 McfWriter(sstm, fullfile).write() self.mcf_file.append(fullfile) # Make the files list to be returned offset = len(props) for (mcf, ins, count) in zip(self.mcf_file, self.max_ins, range(1 + offset, len(self.mcf_file) + 1 + offset)): props['file' + str(count)] = [mcf, ins] return props def update_frag_record(self, frag_record): """pysimm.cassandra.McSystem.update_frag_record For each specie in the system creates the single configuration .dat file required for CASSANDRA simulation. Args: frag_record: dictionary containing the .dat file names and their ids. The dictionary is to be assigned to 'Molecule_Files' property of the MC simulation Returns: dictionary: updated dictionary """ # Generating the structure files if self.temperature is None: self.temperature = 300 for (sstm, count) in zip(self.sst, range(len(self.sst))): fullfile = os.path.join(self.file_store, '{:}{:}{:}'.format('particle', str(count + 1), '.dat')) with open(fullfile, 'w') as out: frag_count = 1 out.write('{:>12d}\n'.format(frag_count)) out.write('{:>21f}{:>21f}\n'.format(self.temperature, 0)) tmplte = '{:<10}{:<24f}{:<24f}{:<24f}\n' for prt in sstm.particles: out.write(tmplte.format(prt.type.elem, prt.x, prt.y, prt.z)) self.frag_file.append(fullfile) # Generating the files list for (frags, count) in zip(self.frag_file, range(1, len(self.frag_file) + 1)): frag_record['file' + str(count)] = [frags, count] return frag_record def make_system(self, text_output): """pysimm.cassandra.McSystem.make_system Parses the checkpoint (.chk) file made by CASSANDRA and creates new molecules basing on the new coordinates information. Assumes that all atoms of a certain molecule are listed in .chk file together (molecule identifiers are not mixed). Note: The logic of comparison of the xyz-like text record from the .chk file with the :class:`~pysimm.system.System` object is most straightforward: It is the consecutive comparison of particle names and first letters (before the white space) in the text record. In this implementation order matters! For example, for CO2, if in the system atoms are ordered as C-O-O and in the text they are ordered as O-C-O fit will fail. Args: text_output (str): text stream from the CASSANDRA .chk file containing the coordinates of newly inserted molecules Returns: :class:`~pysimm.system.System` : object containing all newly inserted molecules """ tmp_sst = None count = 0 # counter of the lines in the input file sys_idx = 0 # counter of the gas molecules to lookup while count < len(text_output): tmp = self.sst[sys_idx].copy() dictn = text_output[count:(len(tmp.particles) + count)] if self.__fit_atoms__(tmp, dictn): for p in tmp.particles: vals = dictn[p.tag - 1].split() # Read the coordinates from the text output of the CASSANDRA simulation p.x, p.y, p.z = map(float, vals[1:4]) # Force velocities of the particles to be 0 p.vx, p.vy, p.vz = 0.0, 0.0, 0.0 p.molecule.syst_tag = 0 if self.is_rigid[sys_idx]: for p in tmp.particles: p.is_rigid = True if tmp_sst: tmp_sst.add(tmp) else: tmp_sst = tmp.copy() self.made_ins[sys_idx] += 1 count += len(tmp.particles) sys_idx = 0 else: sys_idx += 1 if sys_idx >= len(self.sst): self.logger.error('Wasn\'t able to read CASSANDRA .chk file. ' 'Please check either MC-simulation provided to PySIMM or the CASSANDRA ' 'checkpoint file ') exit(1) if tmp_sst: tmp_sst.update_tags() tmp_sst.objectify() return tmp_sst def __fit_atoms__(self, molec, text_lines): """pysimm.cassandra.McSystem.__fit_atoms__ Implements simple logic of comparison of the xyz-like text record with the :class:`~pysimm.system.System` object. The comparison is based on the consecutive comparison of particle names and first letters (before the white space) in the text. In this implementation order matters! E.g. for CO2, if in the system atoms are ordered as C-O-O and in the text they are ordered like O-C-O fit will return False. Returns: boolean: flag whether the text record fit the molecule or not """ flag = True # Cannot map anything if number of molecules is different from number of data lines if len(molec.particles) != len(text_lines): return False # Check the sequence of element names they for p in molec.particles: vals = text_lines[p.tag - 1].split() if vals[0] != p.type.elem: return False return flag class Cassandra(object): """pysimm.cassandra.Cassandra Organizational object for running CASSANDRA simulation tasks. In current implementation it is able to run Canonical, Grand Canonical, and Isothermal-Isobaric Monte Carlo simulations (:class:`~GCMC`, :class:`~NVT`, and :class:`~NPT`, correspondingly). Parameters: system (:class:`~pysimm.system.System`) : molecular updated during the simulations run_queue (list) : the list of scheduled tasks """ def __init__(self, init_sst): self.logger = logging.getLogger('CSNDRA') # Assume all particles in initial system are fixed self.system = init_sst if init_sst.particles: for p in init_sst.particles: p.is_fixed = True self.run_queue = [] def run(self): """pysimm.cassandra.Cassandra.run Method that triggers the simulations. Does two consecutive steps: **(1)** tries to write all files necessary for simulation (.dat, .inp, .mcf): **(2)** tries to invoke the CASSANDRA executable. """ global CASSANDRA_EXEC if check_cs_exec(): for task in self.run_queue: # Write .inp file task.write() # Write .xyz of the fixed system if provided if task.fxd_sst: if task.fxd_sst_mcfile is not None: McfWriter(task.fxd_sst, task.fxd_sst_mcfile).write('atoms') task.fxd_sst.write_xyz(task.fxd_sst_xyz) try: self.logger.info('Starting the GCMC simulations with CASSANDRA') print('{:.^60}'.format('')) p = Popen([CASSANDRA_EXEC, task.props_file], stdin=PIPE, stdout=PIPE, stderr=PIPE) stout, sterr = p.communicate() print(stout) print(sterr) task.upd_simulation() self.system = task.tot_sst.copy() except OSError as ose: self.logger.error('There was a problem calling CASSANDRA executable') exit(1) except IOError as ioe: if check_cs_exec(): self.logger.error('There was a problem running CASSANDRA. ' 'The process started but did not finish') exit(1) else: self.logger.error('There was a problem running CASSANDRA: seems it is not configured properly.\n' 'Please, be sure the CSNDRA_EXEC environment variable is set to the correct ' 'CASSANDRA executable path. The current path is set to:\n\n{}\n\n'.format(CASSANDRA_EXEC)) exit(1) def add_simulation(self, ens_type, obj=None, **kwargs): """pysimm.cassandra.Cassandra.add_simulation Method for adding new Monte Carlo simulation to the run queue. Args: ens_type: Type of the molecular ensemble for the Monte-Carlo simulations. The supported options are: `GCMC` (Grand Canonical); `NVT` (canonical); `NPT` (isobaric-isothermal) obj: the entity that should be added. Will be ignored if it is not of a type :class:`~MCSimulation` Keyword Args: is_new (boolean) : defines whether all previous simulations should be erased or not species (list of :class:`~pysimm.system.System`) : systems that describe molecules and will be passed to :class:`~McSystem` constructor. Note: Other keyword arguments of this method will be redirected to the :class:`~McSystem` and :class:`~MCSimulation` constructors. See their descriptions for the possible keyword options. """ new_job = None # Reading the molecule ensemble type simul = locate('pysimm.cassandra.' + ens_type) if simul is None: self.logger.error('Unsopported simulation ensemble option. Please use ether GCMC, NPT, or ' 'NVT in \'add_simulation\' ') exit(1) if isinstance(obj, MCSimulation): new_job = obj else: specs = kwargs.get('species') if specs: mc_sst = McSystem(specs, **kwargs) new_job = simul(mc_sst, self.system, **kwargs) else: self.logger.error('Incorrect ' + ens_type + ' initialization. Please provide either Cassandra.' + ens_type + ' simulation object or the dictionary with initialization parameters ' 'of that object') exit(1) # Clean the run queue if 'is_new' set to to True if kwargs.get('is_new'): self.run_queue[:] = [] if new_job: new_job.__check_params__() self.run_queue.append(new_job) def add_gcmc(self, obj=None, **kwargs): """pysimm.cassandra.Cassandra.add_gcmc Ads new simulation in grand-canonical ensemble to the run queue. Args: obj: the entity that should be added. Will be ignored if it is not of a type :class:`~GCMC` Keyword Args: is_new (boolean) : defines whether all previous simulations should be erased or not species (list of :class:`~pysimm.system.System`) : systems that describe molecules and will be passed to :class:`~McSystem` constructor. Note: Other keyword arguments of this method will be redirected to the :class:`~McSystem`, :class:`~MCSimulation`, and :class:`~GCMC` constructors. See their descriptions for the possible keyword options. """ new_job = None if isinstance(obj, GCMC): new_job = obj else: specs = kwargs.get('species') if specs: mc_sst = McSystem(specs, **kwargs) new_job = GCMC(mc_sst, self.system, **kwargs) else: self.logger.error('Unknown GCMC initialization. Please provide either ' 'the dictionary with GCMC parameters or Cassandra.GCMC simulation object') exit(1) if kwargs.get('is_new'): self.run_queue[:] = [] if new_job: new_job.__check_params__() self.run_queue.append(new_job) def add_npt_mc(self, obj=None, **kwargs): """pysimm.cassandra.Cassandra.add_npt_mc Ads new simulation in isobaric-isothermal ensemble to the run queue. Args: obj: the entity that should be added. Will be ignored if it is not of a type :class:`~NPT` Keyword Args: is_new (boolean) : defines whether all previous simulations should be erased or not species (list of :class:`~pysimm.system.System`) : systems that describe molecules and will be passed to :class:`~McSystem` constructor. Note: Other keyword arguments of this method will be redirected to the :class:`~McSystem`, :class:`~MCSimulation`, and :class:`~NPT` constructors. See their descriptions for the possible keyword options. """ new_job = None if isinstance(obj, NPT): new_job = obj else: specs = kwargs.get('species') if specs: mc_sst = McSystem(specs, **kwargs) new_job = NPT(mc_sst, self.system, **kwargs) else: self.logger.error('Unknown NPT initialization. Please provide either ' 'the dictionary with NPT simulation parameters or Cassandra.NPT simulation object') exit(1) if kwargs.get('is_new'): self.run_queue[:] = [] if new_job: new_job.__check_params__() self.run_queue.append(new_job) def add_nvt(self, obj=None, **kwargs): """pysimm.cassandra.Cassandra.add_nvt Ads new simulation in canonical ensemble to the run queue. Args: obj: the entity that should be added. Will be ignored if it is not of a type :class:`~NVT` Keyword Args: is_new (boolean) : defines whether all previous simulations should be erased or not species (list of :class:`~pysimm.system.System`) : systems that describe molecules and will be passed to :class:`~McSystem` constructor. Note: Other keyword arguments of this method will be redirected to the :class:`~McSystem`, :class:`~MCSimulation`, and :class:`~NVT` constructors. See their descriptions for the possible keyword options. """ new_job = None if isinstance(obj, NVT): new_job = obj else: specs = kwargs.get('species') if specs: mc_sst = McSystem(specs, **kwargs) new_job = NVT(mc_sst, self.system, **kwargs) else: self.logger.error('Unknown NVT initialization. Please provide either ' 'the dictionary with NPT simulation parameters or Cassandra.NPT simulation object') exit(1) if kwargs.get('is_new'): self.run_queue[:] = [] if new_job: new_job.__check_params__() self.run_queue.append(new_job) def read_input(self, inp_file): """pysimm.cassandra.Cassandra.read_input The method parses the CASSANDRA instructions file (.inp) split it into separate instructions and analyses each according to the instruction name. Args: inp_file (str) : the full relative path of the file to be read Returns: dictionary : read CASSANDRA properties in the format required by :class:`~GCMC` """ result = {} if os.path.isfile(inp_file): self.logger.info('Reading simulation parameters from {:} file'.format(inp_file)) # Reading the cassandra .inp file as one long string inp_stream = open(inp_file, 'r') lines = inp_stream.read() raw_props = lines.split('#') for prop in raw_props: line = re.sub('\n!.*', '', prop) # Get rid of the CASSANDRA comments line = re.sub('\n(e|E)(n|N)(d|D)', '', line) # Get rid of the 'END in the end of the file tmp = line.split() if len(tmp) > 1: result[tmp[0]] = self.__parse_value__(tmp) # File seems fine let's close the stream and return true in the flag inp_stream.close() self.logger.info('Reading finished sucsessfully') else: self.logger.error('Cannot find specified file: \"{:}\"'.format(inp_file)) return result def __parse_value__(self, cells): title = cells[0].lower() if title == 'run_type': return OrderedDict([('type', cells[1]), ('steps', map(int, cells[2:]))]) elif title == 'charge_style': return OrderedDict([('type', cells[1]), ('sum_type', cells[2]), ('cut_val', float(cells[3])), ('accuracy', float(cells[4]))]) elif title == 'vdw_style': return OrderedDict([('type', cells[1]), ('cut_type', cells[2]), ('cut_val', float(cells[3]))]) elif title == 'simulation_length_info': tmp = OrderedDict([('units', cells[2]), ('prop_freq', int(cells[4])), ('coord_freq', int(cells[6])), ('run', int(cells[8]))]) if len(cells) > 10: tmp['steps_per_sweep'] = int(cells[10]) if len(cells) > 12: tmp['block_averages'] = int(cells[12]) return tmp elif title == 'cbmc_info': return OrderedDict([('kappa_ins', int(cells[2])), ('kappa_dih', int(cells[4])), ('rcut_cbmc', float(cells[6]))]) elif title == 'box_info': size = float(cells[3]) if len(cells) > 6: size = [float(cells[3]), float(cells[4]), float(cells[5])] return OrderedDict([('box_count', int(cells[1])), ('box_type', cells[2]), ('box_size', size)]) elif title == 'prob_translation': vals = [] for i in range(2, len(cells)): vals.append(float(cells[i])) return OrderedDict([('tot_prob', float(cells[1])), ('limit_vals', vals)]) elif title == 'prob_insertion': vals = [] for i in range(2, len(cells)): vals.append(cells[i]) return OrderedDict([('tot_prob', float(cells[1])), ('types', vals)]) elif title == 'prob_rotation': vals = [] for i in range(2, len(cells)): vals.append(float(cells[i])) return OrderedDict([('tot_prob', float(cells[1])), ('limit_vals', vals)]) elif (title == 'molecule_files') or (title == 'fragment_files'): tmp = OrderedDict() for i, c in zip(range(1, len(cells) - 1, 2), range(1, 1 + len(cells) / 2)): tmp['file' + str(c)] = [cells[i], int(cells[i + 1])] return tmp elif title == 'start_type': if cells[1] == 'read_config': specs = [] for i in range(2, len(cells) - 1): specs.append(int(cells[i])) return OrderedDict([('start_type', 'read_config'), ('species', specs), ('file_name', cells[-1])]) if cells[1] == 'make_config': specs = [] for i in range(2, len(cells)): specs.append(int(cells[i])) return OrderedDict([('start_type', 'make_config'), ('species', specs), ('file_name', '')]) if cells[1] == 'add to config': self.logger.error('Sorry, \'add to config\' regime of ''Start_Type option is not supported yet') exit(1) if cells[1] == 'checkpoint': self.logger.error('Sorry, \'checkpoint\' regime of ''Start_Type option is not supported yet ') exit(1) elif title == 'property_info': if int(cells[1]) == 1: tmp = OrderedDict() for i in range(2, len(cells)): tmp['prop' + str(i - 1)] = str.lower(cells[i]) return tmp elif title == 'seed_info': return [int(cells[1]), int(cells[2])] elif (title == 'prob_deletion') or (title == 'rcutoff_low') or \ (title == 'bond_prob_cutoff') or (title == 'chemical_potential_info'): return float(cells[1]) elif (title == 'average_Info') or (title == 'nbr_species') or (title == 'temperature_info'): return int(cells[1]) else: return cells[1] def unwrap_gas(self): """pysimm.cassandra.Cassandra.unwrap_gas Ensures that all particles that are not fixed are unwrapped, otherwise CASSANDRA might not interpret them correctly """ gas_system = self.system.copy() for p in gas_system.particles: if p.is_fixed: gas_system.particles.remove(p.tag, update=False) else: self.system.particles.remove(p.tag, update=False) for m in gas_system.molecules: if any([t.is_fixed for t in m.particles]): gas_system.molecules.remove(m.tag, update=False) else: self.system.molecules.remove(m.tag, update=False) gas_system.remove_spare_bonding() self.system.remove_spare_bonding() gas_system.unwrap() self.system.add(gas_system, change_dim=False) class McfWriter(object): """pysimm.cassandra.McfWriter Object responsible for creating the CASSANDRA Molecular Configuration file (.mcf). Attributes: syst (:class:`~pysimm.system.System`) :represents the molecule to be described file_ref (str) : full relative path to the file that will be created """ # Section names in any .mcf file mcf_tags = ['# Atom_Info', '# Bond_Info', '# Angle_Info', '# Dihedral_Info', '# Improper_Info', '# Intra_Scaling', '# Fragment_Info', '# Fragment_Connectivity'] empty_line = '0' def __init__(self, syst, file_ref): self.syst = syst self.file_ref = file_ref self.logger = logging.getLogger('MCF Writer') def write(self, typing='all'): """pysimm.cassandra.McfWriter.write Method creates the .mcf file writing only those sections of it that are marked to be written Args: typing (list) : the list of sections to be written or the text keyword. List items should be as they are defined in :class:`~pysimm.cassandra.McfWriter.mcf_tags` field); default 'all' """ # Initializing output stream with open(self.file_ref, 'w') as out_stream: for (name, is_write) in zip(self.mcf_tags, self.__to_tags__(typing)): if is_write: try: method = getattr(self, '__write_' + str.lower(name[2:]) + '__') method(out_stream) except AttributeError: self.__write_empty__(out_stream, name) else: self.__write_empty__(out_stream, name) out_stream.write('\nEND') out_stream.close() def __write_empty__(self, out, name): out.write('{0:}\n{1:}\n\n'.format(name, self.empty_line)) def __write_atom_info__(self, out): global KCALMOL_2_K text_tag = '# Atom_Info' if self.syst.particles.count > 0: # writing section header out.write('{:}\n'.format(text_tag)) # Verify and fix net system charge self.syst.zero_charge() # writing total number of particles out.write('{0:<6}\n'.format(self.syst.particles.count)) count = 0 line_template = '{l[0]:<6}{l[1]:<7}{l[2]:<5}{l[3]:<8.3f}{l[4]:<10.6f}' \ '{l[5]:<6}{l[6]:<11.3f}{l[7]:<9.3f}\n' warn_flag = False for item in self.syst.particles: line = [count + 1, '', '', 0, 0, 'LJ', 0, 0] if item.charge: line[4] = item.charge if item.type: line[1] = item.type.tag line[2] = item.type.tag if item.type.name: line[1] = item.type.name line[2] = item.type.elem else: warn_flag = True if item.type.mass: line[3] = item.type.mass if item.type.epsilon: line[6] = KCALMOL_2_K * item.type.epsilon if item.type.sigma: line[7] = item.type.sigma else: continue out.write(line_template.format(l=line)) count += 1 if warn_flag: self.logger.warning('Some particle type names (and/or element names) inside the system are not defined.' ' Will use type identifiers instead') else: self.__write_empty__(out, text_tag) out.write('\n') def __write_bond_info__(self, out): text_tag = '# Bond_Info' if self.syst.bonds.count > 0: # writing section header out.write('{:}\n'.format(text_tag)) # writing total number of bonds out.write('{0:<6}\n'.format(self.syst.bonds.count)) line_template = '{l[0]:<6d}{l[1]:<6d}{l[2]:<6d}{l[3]:<9}{l[4]:<6.3f}\n' count = 1 for bond in self.syst.bonds: tmp = 'fixed' # Fixed bond is the only option for CASSANDRA V-1.2 line = [count, bond.a.tag, bond.b.tag, tmp, bond.type.r0] count += 1 out.write(line_template.format(l=line)) out.write('\n') else: self.__write_empty__(out, text_tag) def __write_angle_info__(self, out): text_tag = '# Angle_Info' if self.syst.angles.count > 0: # writing section header out.write('{:}\n'.format(text_tag)) # writing total number of angles out.write('{0:<6}\n'.format(self.syst.angles.count)) count = 1 for angle in self.syst.angles: line_template = '{l[0]:<6d}{l[1]:<6d}{l[2]:<6d}{l[3]:<6d}{l[4]:<10}{l[5]:<13.3f}' line = [count, angle.a.tag, angle.b.tag, angle.c.tag] if hasattr(angle.type, 'is_fixed') and angle.type.is_fixed: addon = ['fixed', angle.type.theta0] else: addon = ['harmonic', KCALMOL_2_K * angle.type.k, angle.type.theta0] line_template += '{l[6]:<13.3f}' count += 1 out.write(line_template.format(l=line + addon) + '\n') out.write('\n') else: self.__write_empty__(out, text_tag) def __write_intra_scaling__(self, out): format_line = '{:<6.2f}{:<6.2f}{:<6.2f}{:<6.2f}' # writing section header out.write('{:}\n'.format('# Intra_Scaling')) # writing vdW scaling: 1-2 1-3 1-4 1-N out.write(format_line.format(0, 0, 0, 0) + '\n') # writing charge scaling: 1-2 1-3 1-4 1-N out.write(format_line.format(0, 0, 0, 0) + '\n\n') def __write_dihedral_info__(self, out): text_tag = '# Dihedral_Info' self.__write_empty__(out, text_tag) def __write_improper_info__(self, out): text_tag = '# Improper_Info' self.__write_empty__(out, text_tag) def __write_fragment_info__(self, out): # writing section header out.write('{:}\n'.format('# Fragment_Info')) # writing indexing out.write('{:}\n'.format(1)) n = len(self.syst.particles) out.write(' '.join('{}'.format(item) for item in [1, n] + range(1, n + 1))) out.write('\n\n') def __write_fragment_connectivity__(self, out): text_tag = '# Fragment_Connectivity' self.__write_empty__(out, text_tag) def __to_tags__(self, inpt): n = len(self.mcf_tags) idxs = [True] * n if inpt.lower() == 'atoms': idxs = [False] * n idxs[self.mcf_tags.index('# Atom_Info')] = True idxs[self.mcf_tags.index('# Intra_Scaling')] = True return idxs def check_cs_exec(): """pysimm.cassandra.check_cs_exec Validates that the absolute path to the CASSANDRA executable is set in the `CASSANDRA_EXEC` environmental variable of the OS. The validation is called once inside the :class:`~Cassandra.run` method. """ global CASSANDRA_EXEC flag = True if CASSANDRA_EXEC is None: print('Please specify the OS environment variable ''CASSANDRA_EXEC'' that points to ' 'CASSANDRA compiled binary file, which is by default cassandra_{compiler-name}[_openMP].exe ') flag = False return flag def make_iterable(obj): """pysimm.cassandra.make_iterable Utility method that forces the attributes be iterable (wrap in a list if it contains of only one item) """ it_obj = obj if not isinstance(obj, Iterable): it_obj = [obj] return it_obj
mit
-1,943,103,293,411,142,400
45.063699
123
0.549686
false
Changron/NTHUOJ_web
nthuoj/settings.py
1
4448
#-*- encoding=UTF-8 -*- """ Django settings for nthuoj project. For more information on this file, see https://docs.djangoproject.com/en/1.7/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.7/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os from utils.config_info import get_config BASE_DIR = os.path.dirname(os.path.dirname(__file__)) PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."),) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'kivl1x)by8$98z6y3b^7texw&+d1arad2qlq-(sn=8g^lw_(+&' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = False TEMPLATE_DEBUG = False ALLOWED_HOSTS = ['*'] # Application definition INSTALLED_APPS = ( 'autocomplete_light', 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'utils', 'problem', 'index', 'contest', 'users', 'team', 'group', 'status', 'axes', 'bootstrapform', 'djangobower', 'datetimewidget', 'ckeditor', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'utils.render_helper.CustomHttpExceptionMiddleware', 'axes.middleware.FailedLoginMiddleware', ) ROOT_URLCONF = 'nthuoj.urls' WSGI_APPLICATION = 'nthuoj.wsgi.application' # Database # https://docs.djangoproject.com/en/1.7/ref/settings/#databases CONFIG_PATH = os.path.join(BASE_DIR, 'nthuoj/config/nthuoj.cfg') DATABASES = { 'default': { 'ENGINE': 'django.db.backends.mysql', 'OPTIONS': { 'read_default_file': CONFIG_PATH, }, } } # Custom User auth AUTH_USER_MODEL = 'users.User' # where @login_required will redirect to LOGIN_URL = '/users/login/' # Internationalization # https://docs.djangoproject.com/en/1.7/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'Asia/Taipei' USE_I18N = True USE_L10N = True USE_TZ = False # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.7/howto/static-files/ STATIC_ROOT = os.path.join(PROJECT_ROOT, 'static') STATIC_URL = '/static/' MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media') MEDIA_URL = '/media/' # django-axes 1.3.8 configurations # https://pypi.python.org/pypi/django-axes/ # redirect to broken page when exceed wrong-try limits AXES_LOCKOUT_URL = '/users/block_wrong_tries' # freeze login access for that ip for 0.1*60 = 6 minites AXES_COOLOFF_TIME = 0.1 EMAIL_USE_TLS = True EMAIL_HOST = 'smtp.gmail.com' EMAIL_HOST_USER = get_config('email', 'user') EMAIL_HOST_PASSWORD = get_config('email', 'password') EMAIL_PORT = 587 # django-ckeditor configurations CKEDITOR_UPLOAD_PATH = 'uploads/' CKEDITOR_IMAGE_BACKEND = 'pillow' CKEDITOR_CONFIGS = { 'default': { 'toolbar': 'full', }, } # django-bower settings BOWER_COMPONENTS_ROOT = os.path.join(PROJECT_ROOT, 'components') BOWER_INSTALLED_APPS = ( 'Chart.js', 'jquery', 'jquery-ui#1.9.2', 'https://github.com/thomaspark/bootswatch.git', # bootswatch 'https://github.com/dimsemenov/Magnific-Popup.git', # Magnific-Popup 'https://github.com/codemirror/CodeMirror.git', # CodeMirror 'http://gregpike.net/demos/bootstrap-file-input/bootstrap.file-input.js', # bootstrap fileinput 'https://github.com/lou/multi-select.git', # multiselect 'https://github.com/riklomas/quicksearch.git', # quicksearch 'https://gantry.googlecode.com/svn/trunk/root/js/jquery.url.min.js', # jquery url plugin ) STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', 'djangobower.finders.BowerFinder', ) #maximum of public users for a single contest MAX_PUBLIC_USER = 200 #public user username prefix PUBLIC_USER_PREFIX = "TEAM" PUBLIC_USER_DEFAULT_PASSWORD = "000"
mit
-6,440,279,533,526,277,000
25.795181
99
0.704586
false
chop-dbhi/varify-data-warehouse
vdw/samples/migrations/0008_force_migrate_default_cohort_and_project.py
1
23232
# encoding: utf-8 import datetime from south.db import db from south.v2 import DataMigration from django.db import models from vdw.samples.models import DEFAULT_COHORT_NAME, DEFAULT_PROJECT_NAME class Migration(DataMigration): def forwards(self, orm): "Write your forwards methods here." Project = orm['samples.Project'] Cohort = orm['samples.Cohort'] now = datetime.datetime.now() # Create default project try: project = Project.objects.get(name=DEFAULT_PROJECT_NAME) except Project.DoesNotExist: project = Project(name=DEFAULT_PROJECT_NAME, label=DEFAULT_PROJECT_NAME, created=now, modified=now) project.save() # Create default cohort try: cohort = Cohort.objects.get(name=DEFAULT_COHORT_NAME) except Cohort.DoesNotExist: cohort = Cohort(name=DEFAULT_COHORT_NAME, published=True, autocreated=True, created=now, modified=now) cohort.save() def backwards(self, orm): "Write your backwards methods here." # There is not guarantee these objects did not already exist # so these should not be deleted models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 11, 27, 16, 57, 27, 697343)'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 11, 27, 16, 57, 27, 697128)'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'avocado.datacontext': { 'Meta': {'object_name': 'DataContext'}, 'archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'composite': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_column': "'_count'"}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'json': ('jsonfield.fields.JSONField', [], {'default': '{}', 'null': 'True', 'blank': 'True'}), 'keywords': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}), 'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'session': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'datacontext+'", 'null': 'True', 'to': "orm['auth.User']"}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'genome.chromosome': { 'Meta': {'ordering': "['order']", 'object_name': 'Chromosome', 'db_table': "'chromosome'"}, 'code': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'value': ('django.db.models.fields.CharField', [], {'max_length': '2', 'db_index': 'True'}) }, 'genome.genome': { 'Meta': {'object_name': 'Genome', 'db_table': "'genome'"}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'released': ('django.db.models.fields.DateField', [], {'null': 'True'}), 'version': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'genome.genotype': { 'Meta': {'object_name': 'Genotype', 'db_table': "'genotype'"}, 'code': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '20'}), 'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'value': ('django.db.models.fields.CharField', [], {'max_length': '3'}) }, 'literature.pubmed': { 'Meta': {'object_name': 'PubMed', 'db_table': "'pubmed'"}, 'pmid': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}) }, 'phenotypes.phenotype': { 'Meta': {'object_name': 'Phenotype', 'db_table': "'phenotype'"}, 'articles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['literature.PubMed']", 'symmetrical': 'False'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'hpo_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'term': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '1000'}) }, 'samples.batch': { 'Meta': {'unique_together': "(('project', 'name'),)", 'object_name': 'Batch', 'db_table': "'batch'"}, 'count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'investigator': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'batches'", 'to': "orm['samples.Project']"}), 'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}) }, 'samples.cohort': { 'Meta': {'object_name': 'Cohort', 'db_table': "'cohort'"}, 'autocreated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'context': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['avocado.DataContext']", 'unique': 'True', 'null': 'True', 'blank': 'True'}), 'count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['samples.Project']", 'null': 'True', 'blank': 'True'}), 'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'samples': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['samples.Sample']", 'through': "orm['samples.CohortSample']", 'symmetrical': 'False'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}) }, 'samples.cohortsample': { 'Meta': {'unique_together': "(('object_set', 'set_object'),)", 'object_name': 'CohortSample', 'db_table': "'cohort_sample'"}, 'added': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'object_set': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['samples.Cohort']", 'db_column': "'cohort_id'"}), 'removed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'set_object': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['samples.Sample']", 'db_column': "'sample_id'"}) }, 'samples.cohortvariant': { 'Meta': {'unique_together': "(('variant', 'cohort'),)", 'object_name': 'CohortVariant', 'db_table': "'cohort_variant'"}, 'af': ('django.db.models.fields.FloatField', [], {'null': 'True', 'db_index': 'True'}), 'cohort': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['samples.Cohort']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'variant': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['variants.Variant']"}) }, 'samples.person': { 'Meta': {'object_name': 'Person', 'db_table': "'person'"}, 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'mrn': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}), 'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'proband': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'relations': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['samples.Person']", 'through': "orm['samples.Relation']", 'symmetrical': 'False'}), 'sex': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}) }, 'samples.project': { 'Meta': {'unique_together': "(('name',),)", 'object_name': 'Project', 'db_table': "'project'"}, 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}) }, 'samples.relation': { 'Meta': {'ordering': "('person', '-generation')", 'object_name': 'Relation', 'db_table': "'relation'"}, 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'generation': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'person': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'family'", 'to': "orm['samples.Person']"}), 'relative': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'relative_of'", 'to': "orm['samples.Person']"}), 'type': ('django.db.models.fields.CharField', [], {'max_length': '20'}) }, 'samples.result': { 'Meta': {'unique_together': "(('sample', 'variant'),)", 'object_name': 'Result', 'db_table': "'sample_result'"}, 'baseq_rank_sum': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'coverage_alt': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'coverage_ref': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'downsampling': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}), 'fisher_strand': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'genotype': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['genome.Genotype']", 'null': 'True', 'blank': 'True'}), 'genotype_quality': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'haplotype_score': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'homopolymer_run': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'in_dbsnp': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'mq': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'mq0': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'mq_rank_sum': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'phred_scaled_likelihood': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'quality': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}), 'quality_by_depth': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'read_depth': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}), 'read_pos_rank_sum': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'sample': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['samples.Sample']"}), 'spanning_deletions': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'strand_bias': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'variant': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['variants.Variant']"}) }, 'samples.sample': { 'Meta': {'unique_together': "(('batch', 'name'),)", 'object_name': 'Sample', 'db_table': "'sample'"}, 'batch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'samples'", 'to': "orm['samples.Batch']"}), 'bio_sample': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'md5': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'person': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'samples'", 'null': 'True', 'to': "orm['samples.Person']"}), 'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'version': ('django.db.models.fields.IntegerField', [], {}) }, 'samples.samplerun': { 'Meta': {'object_name': 'SampleRun', 'db_table': "'sample_run'"}, 'completed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'genome': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['genome.Genome']", 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'sample': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['samples.Sample']"}) }, 'variants.variant': { 'Meta': {'unique_together': "(('chr', 'pos', 'ref', 'alt'),)", 'object_name': 'Variant', 'db_table': "'variant'"}, 'alt': ('django.db.models.fields.TextField', [], {'db_index': 'True'}), 'articles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['literature.PubMed']", 'db_table': "'variant_pubmed'", 'symmetrical': 'False'}), 'chr': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['genome.Chromosome']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'liftover': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}), 'md5': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'phenotypes': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['phenotypes.Phenotype']", 'through': "orm['variants.VariantPhenotype']", 'symmetrical': 'False'}), 'pos': ('django.db.models.fields.IntegerField', [], {}), 'ref': ('django.db.models.fields.TextField', [], {'db_index': 'True'}), 'rsid': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['variants.VariantType']", 'null': 'True'}) }, 'variants.variantphenotype': { 'Meta': {'object_name': 'VariantPhenotype', 'db_table': "'variant_phenotype'"}, 'hgmd_id': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'phenotype': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['phenotypes.Phenotype']"}), 'variant': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['variants.Variant']"}) }, 'variants.varianttype': { 'Meta': {'ordering': "['order']", 'object_name': 'VariantType', 'db_table': "'variant_type'"}, 'code': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '20'}), 'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'value': ('django.db.models.fields.CharField', [], {'max_length': '20'}) } } complete_apps = ['samples']
bsd-2-clause
-32,546,773,490,712,990
78.835052
192
0.544594
false
kingtaurus/cs224d
assignment1/q3_word2vec_sol.py
1
7778
import numpy as np import random from q1_softmax_sol import softmax_sol as softmax from q2_gradcheck import gradcheck_naive from q2_sigmoid_sol import sigmoid_sol as sigmoid from q2_sigmoid_sol import sigmoid_grad_sol as sigmoid_grad def normalizeRows_sol(x): """ Row normalization function """ # Implement a function that normalizes each row of a matrix to have unit length ### YOUR CODE HERE N = x.shape[0] x /= np.sqrt(np.sum(x**2, axis=1)).reshape((N,1)) + 1e-30 ### END YOUR CODE return x def softmaxCostAndGradient_sol(predicted, target, outputVectors, dataset): """ Softmax cost function for word2vec models """ # Implement the cost and gradients for one predicted word vector # and one target word vector as a building block for word2vec # models, assuming the softmax prediction function and cross # entropy loss. # Inputs: # - predicted: numpy ndarray, predicted word vector (\hat{v} in # the written component or \hat{r} in an earlier version) # - target: integer, the index of the target word # - outputVectors: "output" vectors (as rows) for all tokens # - dataset: needed for negative sampling, unused here. # Outputs: # - cost: cross entropy cost for the softmax word prediction # - gradPred: the gradient with respect to the predicted word # vector # - grad: the gradient with respect to all the other word # vectors # We will not provide starter code for this function, but feel # free to reference the code you previously wrote for this # assignment! ### YOUR CODE HERE probabilities = softmax(predicted.dot(outputVectors.T)) cost = -np.log(probabilities[target]) delta = probabilities delta[target] -= 1 N = delta.shape[0] D = predicted.shape[0] grad = delta.reshape((N,1)) * predicted.reshape((1,D)) gradPred = (delta.reshape((1,N)).dot(outputVectors)).flatten() ### END YOUR CODE return cost, gradPred, grad def negSamplingCostAndGradient_sol(predicted, target, outputVectors, dataset, K=10): """ Negative sampling cost function for word2vec models """ # Implement the cost and gradients for one predicted word vector # and one target word vector as a building block for word2vec # models, using the negative sampling technique. K is the sample # size. You might want to use dataset.sampleTokenIdx() to sample # a random word index. # # Note: See test_word2vec below for dataset's initialization. # # Input/Output Specifications: same as softmaxCostAndGradient # We will not provide starter code for this function, but feel # free to reference the code you previously wrote for this # assignment! ### YOUR CODE HERE grad = np.zeros(outputVectors.shape) gradPred = np.zeros(predicted.shape) indices = [target] for k in range(K): newidx = dataset.sampleTokenIdx() while newidx == target: newidx = dataset.sampleTokenIdx() indices += [newidx] labels = np.array([1] + [-1 for k in range(K)]) vecs = outputVectors[indices,:] t = sigmoid(vecs.dot(predicted) * labels) cost = -np.sum(np.log(t)) delta = labels * (t - 1) gradPred = delta.reshape((1,K+1)).dot(vecs).flatten() gradtemp = delta.reshape((K+1,1)).dot(predicted.reshape( (1,predicted.shape[0]))) for k in range(K+1): grad[indices[k]] += gradtemp[k,:] # t = sigmoid(predicted.dot(outputVectors[target,:])) # cost = -np.log(t) # delta = t - 1 # gradPred += delta * outputVectors[target, :] # grad[target, :] += delta * predicted # for k in range(K): # idx = dataset.sampleTokenIdx() # t = sigmoid(-predicted.dot(outputVectors[idx,:])) # cost += -np.log(t) # delta = 1 - t # gradPred += delta * outputVectors[idx, :] # grad[idx, :] += delta * predicted ### END YOUR CODE return cost, gradPred, grad def skipgram_sol(currentWord, C, contextWords, tokens, inputVectors, outputVectors, dataset, word2vecCostAndGradient = softmaxCostAndGradient_sol): """ Skip-gram model in word2vec """ # Implement the skip-gram model in this function. # Inputs: # - currrentWord: a string of the current center word # - C: integer, context size # - contextWords: list of no more than 2*C strings, the context words # - tokens: a dictionary that maps words to their indices in # the word vector list # - inputVectors: "input" word vectors (as rows) for all tokens # - outputVectors: "output" word vectors (as rows) for all tokens # - word2vecCostAndGradient: the cost and gradient function for # a prediction vector given the target word vectors, # could be one of the two cost functions you # implemented above # Outputs: # - cost: the cost function value for the skip-gram model # - grad: the gradient with respect to the word vectors # We will not provide starter code for this function, but feel # free to reference the code you previously wrote for this # assignment! ### YOUR CODE HERE currentI = tokens[currentWord] predicted = inputVectors[currentI, :] cost = 0.0 gradIn = np.zeros(inputVectors.shape) gradOut = np.zeros(outputVectors.shape) for cwd in contextWords: idx = tokens[cwd] cc, gp, gg = word2vecCostAndGradient(predicted, idx, outputVectors, dataset) cost += cc gradOut += gg gradIn[currentI, :] += gp ### END YOUR CODE return cost, gradIn, gradOut def cbow_sol(currentWord, C, contextWords, tokens, inputVectors, outputVectors, dataset, word2vecCostAndGradient = softmaxCostAndGradient_sol): """ CBOW model in word2vec """ # Implement the continuous bag-of-words model in this function. # Input/Output specifications: same as the skip-gram model # We will not provide starter code for this function, but feel # free to reference the code you previously wrote for this # assignment! ################################################################# # IMPLEMENTING CBOW IS EXTRA CREDIT, DERIVATIONS IN THE WRIITEN # # ASSIGNMENT ARE NOT! # ################################################################# cost = 0 gradIn = np.zeros(inputVectors.shape) gradOut = np.zeros(outputVectors.shape) ### YOUR CODE HERE D = inputVectors.shape[1] predicted = np.zeros((D,)) indices = [tokens[cwd] for cwd in contextWords] for idx in indices: predicted += inputVectors[idx, :] cost, gp, gradOut = word2vecCostAndGradient(predicted, tokens[currentWord], outputVectors, dataset) gradIn = np.zeros(inputVectors.shape) for idx in indices: gradIn[idx, :] += gp ### END YOUR CODE return cost, gradIn, gradOut
mit
4,524,440,371,980,069,400
43.193182
120
0.576369
false
taxpon/sverchok
ui/sv_icons.py
1
1302
import bpy import os import glob import bpy.utils.previews # custom icons dictionary _icon_collection = {} def custom_icon(name): load_custom_icons() # load in case they custom icons not already loaded custom_icons = _icon_collection["main"] default = lambda: None # for no icon with given name will return zero default.icon_id = 0 return custom_icons.get(name, default).icon_id def load_custom_icons(): if len(_icon_collection): # return if custom icons already loaded return custom_icons = bpy.utils.previews.new() iconsDir = os.path.join(os.path.dirname(__file__), "icons") iconPattern = "sv_*.png" iconPath = os.path.join(iconsDir, iconPattern) iconFiles = [os.path.basename(x) for x in glob.glob(iconPath)] for iconFile in iconFiles: iconName = os.path.splitext(iconFile)[0] iconID = iconName.upper() custom_icons.load(iconID, os.path.join(iconsDir, iconFile), "IMAGE") _icon_collection["main"] = custom_icons def remove_custom_icons(): for custom_icons in _icon_collection.values(): bpy.utils.previews.remove(custom_icons) _icon_collection.clear() def register(): load_custom_icons() def unregister(): remove_custom_icons() if __name__ == '__main__': register()
gpl-3.0
1,090,800,781,951,556,500
23.111111
76
0.667435
false
rzr/synapse
synapse/handlers/presence.py
1
46696
# -*- coding: utf-8 -*- # Copyright 2014, 2015 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from twisted.internet import defer from synapse.api.errors import SynapseError, AuthError from synapse.api.constants import PresenceState from synapse.util.logcontext import PreserveLoggingContext from synapse.util.logutils import log_function from synapse.types import UserID import synapse.metrics from ._base import BaseHandler import logging logger = logging.getLogger(__name__) metrics = synapse.metrics.get_metrics_for(__name__) # Don't bother bumping "last active" time if it differs by less than 60 seconds LAST_ACTIVE_GRANULARITY = 60*1000 # Keep no more than this number of offline serial revisions MAX_OFFLINE_SERIALS = 1000 # TODO(paul): Maybe there's one of these I can steal from somewhere def partition(l, func): """Partition the list by the result of func applied to each element.""" ret = {} for x in l: key = func(x) if key not in ret: ret[key] = [] ret[key].append(x) return ret def partitionbool(l, func): def boolfunc(x): return bool(func(x)) ret = partition(l, boolfunc) return ret.get(True, []), ret.get(False, []) class PresenceHandler(BaseHandler): STATE_LEVELS = { PresenceState.OFFLINE: 0, PresenceState.UNAVAILABLE: 1, PresenceState.ONLINE: 2, PresenceState.FREE_FOR_CHAT: 3, } def __init__(self, hs): super(PresenceHandler, self).__init__(hs) self.homeserver = hs self.clock = hs.get_clock() distributor = hs.get_distributor() distributor.observe("registered_user", self.registered_user) distributor.observe( "started_user_eventstream", self.started_user_eventstream ) distributor.observe( "stopped_user_eventstream", self.stopped_user_eventstream ) distributor.observe("user_joined_room", self.user_joined_room) distributor.declare("collect_presencelike_data") distributor.declare("changed_presencelike_data") distributor.observe( "changed_presencelike_data", self.changed_presencelike_data ) # outbound signal from the presence module to advertise when a user's # presence has changed distributor.declare("user_presence_changed") self.distributor = distributor self.federation = hs.get_replication_layer() self.federation.register_edu_handler( "m.presence", self.incoming_presence ) self.federation.register_edu_handler( "m.presence_invite", lambda origin, content: self.invite_presence( observed_user=UserID.from_string(content["observed_user"]), observer_user=UserID.from_string(content["observer_user"]), ) ) self.federation.register_edu_handler( "m.presence_accept", lambda origin, content: self.accept_presence( observed_user=UserID.from_string(content["observed_user"]), observer_user=UserID.from_string(content["observer_user"]), ) ) self.federation.register_edu_handler( "m.presence_deny", lambda origin, content: self.deny_presence( observed_user=UserID.from_string(content["observed_user"]), observer_user=UserID.from_string(content["observer_user"]), ) ) # IN-MEMORY store, mapping local userparts to sets of local users to # be informed of state changes. self._local_pushmap = {} # map local users to sets of remote /domain names/ who are interested # in them self._remote_sendmap = {} # map remote users to sets of local users who're interested in them self._remote_recvmap = {} # list of (serial, set of(userids)) tuples, ordered by serial, latest # first self._remote_offline_serials = [] # map any user to a UserPresenceCache self._user_cachemap = {} self._user_cachemap_latest_serial = 0 # map room_ids to the latest presence serial for a member of that # room self._room_serials = {} metrics.register_callback( "userCachemap:size", lambda: len(self._user_cachemap), ) def _get_or_make_usercache(self, user): """If the cache entry doesn't exist, initialise a new one.""" if user not in self._user_cachemap: self._user_cachemap[user] = UserPresenceCache() return self._user_cachemap[user] def _get_or_offline_usercache(self, user): """If the cache entry doesn't exist, return an OFFLINE one but do not store it into the cache.""" if user in self._user_cachemap: return self._user_cachemap[user] else: return UserPresenceCache() def registered_user(self, user): return self.store.create_presence(user.localpart) @defer.inlineCallbacks def is_presence_visible(self, observer_user, observed_user): assert(self.hs.is_mine(observed_user)) if observer_user == observed_user: defer.returnValue(True) if (yield self.store.user_rooms_intersect( [u.to_string() for u in observer_user, observed_user])): defer.returnValue(True) if (yield self.store.is_presence_visible( observed_localpart=observed_user.localpart, observer_userid=observer_user.to_string())): defer.returnValue(True) defer.returnValue(False) @defer.inlineCallbacks def get_state(self, target_user, auth_user, as_event=False, check_auth=True): """Get the current presence state of the given user. Args: target_user (UserID): The user whose presence we want auth_user (UserID): The user requesting the presence, used for checking if said user is allowed to see the persence of the `target_user` as_event (bool): Format the return as an event or not? check_auth (bool): Perform the auth checks or not? Returns: dict: The presence state of the `target_user`, whose format depends on the `as_event` argument. """ if self.hs.is_mine(target_user): if check_auth: visible = yield self.is_presence_visible( observer_user=auth_user, observed_user=target_user ) if not visible: raise SynapseError(404, "Presence information not visible") if target_user in self._user_cachemap: state = self._user_cachemap[target_user].get_state() else: state = yield self.store.get_presence_state(target_user.localpart) if "mtime" in state: del state["mtime"] state["presence"] = state.pop("state") else: # TODO(paul): Have remote server send us permissions set state = self._get_or_offline_usercache(target_user).get_state() if "last_active" in state: state["last_active_ago"] = int( self.clock.time_msec() - state.pop("last_active") ) if as_event: content = state content["user_id"] = target_user.to_string() if "last_active" in content: content["last_active_ago"] = int( self._clock.time_msec() - content.pop("last_active") ) defer.returnValue({"type": "m.presence", "content": content}) else: defer.returnValue(state) @defer.inlineCallbacks def get_states(self, target_users, auth_user, as_event=False, check_auth=True): """A batched version of the `get_state` method that accepts a list of `target_users` Args: target_users (list): The list of UserID's whose presence we want auth_user (UserID): The user requesting the presence, used for checking if said user is allowed to see the persence of the `target_users` as_event (bool): Format the return as an event or not? check_auth (bool): Perform the auth checks or not? Returns: dict: A mapping from user -> presence_state """ local_users, remote_users = partitionbool( target_users, lambda u: self.hs.is_mine(u) ) if check_auth: for user in local_users: visible = yield self.is_presence_visible( observer_user=auth_user, observed_user=user ) if not visible: raise SynapseError(404, "Presence information not visible") results = {} if local_users: for user in local_users: if user in self._user_cachemap: results[user] = self._user_cachemap[user].get_state() local_to_user = {u.localpart: u for u in local_users} states = yield self.store.get_presence_states( [u.localpart for u in local_users if u not in results] ) for local_part, state in states.items(): if state is None: continue res = {"presence": state["state"]} if "status_msg" in state and state["status_msg"]: res["status_msg"] = state["status_msg"] results[local_to_user[local_part]] = res for user in remote_users: # TODO(paul): Have remote server send us permissions set results[user] = self._get_or_offline_usercache(user).get_state() for state in results.values(): if "last_active" in state: state["last_active_ago"] = int( self.clock.time_msec() - state.pop("last_active") ) if as_event: for user, state in results.items(): content = state content["user_id"] = user.to_string() if "last_active" in content: content["last_active_ago"] = int( self._clock.time_msec() - content.pop("last_active") ) results[user] = {"type": "m.presence", "content": content} defer.returnValue(results) @defer.inlineCallbacks @log_function def set_state(self, target_user, auth_user, state): # return # TODO (erikj): Turn this back on. Why did we end up sending EDUs # everywhere? if not self.hs.is_mine(target_user): raise SynapseError(400, "User is not hosted on this Home Server") if target_user != auth_user: raise AuthError(400, "Cannot set another user's presence") if "status_msg" not in state: state["status_msg"] = None for k in state.keys(): if k not in ("presence", "status_msg"): raise SynapseError( 400, "Unexpected presence state key '%s'" % (k,) ) if state["presence"] not in self.STATE_LEVELS: raise SynapseError(400, "'%s' is not a valid presence state" % ( state["presence"], )) logger.debug("Updating presence state of %s to %s", target_user.localpart, state["presence"]) state_to_store = dict(state) state_to_store["state"] = state_to_store.pop("presence") statuscache = self._get_or_offline_usercache(target_user) was_level = self.STATE_LEVELS[statuscache.get_state()["presence"]] now_level = self.STATE_LEVELS[state["presence"]] yield self.store.set_presence_state( target_user.localpart, state_to_store ) yield self.distributor.fire( "collect_presencelike_data", target_user, state ) if now_level > was_level: state["last_active"] = self.clock.time_msec() now_online = state["presence"] != PresenceState.OFFLINE was_polling = target_user in self._user_cachemap if now_online and not was_polling: self.start_polling_presence(target_user, state=state) elif not now_online and was_polling: self.stop_polling_presence(target_user) # TODO(paul): perform a presence push as part of start/stop poll so # we don't have to do this all the time self.changed_presencelike_data(target_user, state) def bump_presence_active_time(self, user, now=None): if now is None: now = self.clock.time_msec() prev_state = self._get_or_make_usercache(user) if now - prev_state.state.get("last_active", 0) < LAST_ACTIVE_GRANULARITY: return self.changed_presencelike_data(user, {"last_active": now}) def get_joined_rooms_for_user(self, user): """Get the list of rooms a user is joined to. Args: user(UserID): The user. Returns: A Deferred of a list of room id strings. """ rm_handler = self.homeserver.get_handlers().room_member_handler return rm_handler.get_joined_rooms_for_user(user) def get_joined_users_for_room_id(self, room_id): rm_handler = self.homeserver.get_handlers().room_member_handler return rm_handler.get_room_members(room_id) @defer.inlineCallbacks def changed_presencelike_data(self, user, state): """Updates the presence state of a local user. Args: user(UserID): The user being updated. state(dict): The new presence state for the user. Returns: A Deferred """ self._user_cachemap_latest_serial += 1 statuscache = yield self.update_presence_cache(user, state) yield self.push_presence(user, statuscache=statuscache) @log_function def started_user_eventstream(self, user): # TODO(paul): Use "last online" state self.set_state(user, user, {"presence": PresenceState.ONLINE}) @log_function def stopped_user_eventstream(self, user): # TODO(paul): Save current state as "last online" state self.set_state(user, user, {"presence": PresenceState.OFFLINE}) @defer.inlineCallbacks def user_joined_room(self, user, room_id): """Called via the distributor whenever a user joins a room. Notifies the new member of the presence of the current members. Notifies the current members of the room of the new member's presence. Args: user(UserID): The user who joined the room. room_id(str): The room id the user joined. """ if self.hs.is_mine(user): # No actual update but we need to bump the serial anyway for the # event source self._user_cachemap_latest_serial += 1 statuscache = yield self.update_presence_cache( user, room_ids=[room_id] ) self.push_update_to_local_and_remote( observed_user=user, room_ids=[room_id], statuscache=statuscache, ) # We also want to tell them about current presence of people. curr_users = yield self.get_joined_users_for_room_id(room_id) for local_user in [c for c in curr_users if self.hs.is_mine(c)]: statuscache = yield self.update_presence_cache( local_user, room_ids=[room_id], add_to_cache=False ) self.push_update_to_local_and_remote( observed_user=local_user, users_to_push=[user], statuscache=statuscache, ) @defer.inlineCallbacks def send_invite(self, observer_user, observed_user): """Request the presence of a local or remote user for a local user""" if not self.hs.is_mine(observer_user): raise SynapseError(400, "User is not hosted on this Home Server") yield self.store.add_presence_list_pending( observer_user.localpart, observed_user.to_string() ) if self.hs.is_mine(observed_user): yield self.invite_presence(observed_user, observer_user) else: yield self.federation.send_edu( destination=observed_user.domain, edu_type="m.presence_invite", content={ "observed_user": observed_user.to_string(), "observer_user": observer_user.to_string(), } ) @defer.inlineCallbacks def _should_accept_invite(self, observed_user, observer_user): if not self.hs.is_mine(observed_user): defer.returnValue(False) row = yield self.store.has_presence_state(observed_user.localpart) if not row: defer.returnValue(False) # TODO(paul): Eventually we'll ask the user's permission for this # before accepting. For now just accept any invite request defer.returnValue(True) @defer.inlineCallbacks def invite_presence(self, observed_user, observer_user): """Handles a m.presence_invite EDU. A remote or local user has requested presence updates for a local user. If the invite is accepted then allow the local or remote user to see the presence of the local user. Args: observed_user(UserID): The local user whose presence is requested. observer_user(UserID): The remote or local user requesting presence. """ accept = yield self._should_accept_invite(observed_user, observer_user) if accept: yield self.store.allow_presence_visible( observed_user.localpart, observer_user.to_string() ) if self.hs.is_mine(observer_user): if accept: yield self.accept_presence(observed_user, observer_user) else: yield self.deny_presence(observed_user, observer_user) else: edu_type = "m.presence_accept" if accept else "m.presence_deny" yield self.federation.send_edu( destination=observer_user.domain, edu_type=edu_type, content={ "observed_user": observed_user.to_string(), "observer_user": observer_user.to_string(), } ) @defer.inlineCallbacks def accept_presence(self, observed_user, observer_user): """Handles a m.presence_accept EDU. Mark a presence invite from a local or remote user as accepted in a local user's presence list. Starts polling for presence updates from the local or remote user. Args: observed_user(UserID): The user to update in the presence list. observer_user(UserID): The owner of the presence list to update. """ yield self.store.set_presence_list_accepted( observer_user.localpart, observed_user.to_string() ) self.start_polling_presence( observer_user, target_user=observed_user ) @defer.inlineCallbacks def deny_presence(self, observed_user, observer_user): """Handle a m.presence_deny EDU. Removes a local or remote user from a local user's presence list. Args: observed_user(UserID): The local or remote user to remove from the list. observer_user(UserID): The local owner of the presence list. Returns: A Deferred. """ yield self.store.del_presence_list( observer_user.localpart, observed_user.to_string() ) # TODO(paul): Inform the user somehow? @defer.inlineCallbacks def drop(self, observed_user, observer_user): """Remove a local or remote user from a local user's presence list and unsubscribe the local user from updates that user. Args: observed_user(UserId): The local or remote user to remove from the list. observer_user(UserId): The local owner of the presence list. Returns: A Deferred. """ if not self.hs.is_mine(observer_user): raise SynapseError(400, "User is not hosted on this Home Server") yield self.store.del_presence_list( observer_user.localpart, observed_user.to_string() ) self.stop_polling_presence( observer_user, target_user=observed_user ) @defer.inlineCallbacks def get_presence_list(self, observer_user, accepted=None): """Get the presence list for a local user. The retured list includes the current presence state for each user listed. Args: observer_user(UserID): The local user whose presence list to fetch. accepted(bool or None): If not none then only include users who have or have not accepted the presence invite request. Returns: A Deferred list of presence state events. """ if not self.hs.is_mine(observer_user): raise SynapseError(400, "User is not hosted on this Home Server") presence_list = yield self.store.get_presence_list( observer_user.localpart, accepted=accepted ) results = [] for row in presence_list: observed_user = UserID.from_string(row["observed_user_id"]) result = { "observed_user": observed_user, "accepted": row["accepted"] } result.update( self._get_or_offline_usercache(observed_user).get_state() ) if "last_active" in result: result["last_active_ago"] = int( self.clock.time_msec() - result.pop("last_active") ) results.append(result) defer.returnValue(results) @defer.inlineCallbacks @log_function def start_polling_presence(self, user, target_user=None, state=None): """Subscribe a local user to presence updates from a local or remote user. If no target_user is supplied then subscribe to all users stored in the presence list for the local user. Additonally this pushes the current presence state of this user to all target_users. That state can be provided directly or will be read from the stored state for the local user. Also this attempts to notify the local user of the current state of any local target users. Args: user(UserID): The local user that whishes for presence updates. target_user(UserID): The local or remote user whose updates are wanted. state(dict): Optional presence state for the local user. """ logger.debug("Start polling for presence from %s", user) if target_user: target_users = set([target_user]) room_ids = [] else: presence = yield self.store.get_presence_list( user.localpart, accepted=True ) target_users = set([ UserID.from_string(x["observed_user_id"]) for x in presence ]) # Also include people in all my rooms room_ids = yield self.get_joined_rooms_for_user(user) if state is None: state = yield self.store.get_presence_state(user.localpart) else: # statuscache = self._get_or_make_usercache(user) # self._user_cachemap_latest_serial += 1 # statuscache.update(state, self._user_cachemap_latest_serial) pass yield self.push_update_to_local_and_remote( observed_user=user, users_to_push=target_users, room_ids=room_ids, statuscache=self._get_or_make_usercache(user), ) for target_user in target_users: if self.hs.is_mine(target_user): self._start_polling_local(user, target_user) # We want to tell the person that just came online # presence state of people they are interested in? self.push_update_to_clients( users_to_push=[user], ) deferreds = [] remote_users = [u for u in target_users if not self.hs.is_mine(u)] remoteusers_by_domain = partition(remote_users, lambda u: u.domain) # Only poll for people in our get_presence_list for domain in remoteusers_by_domain: remoteusers = remoteusers_by_domain[domain] deferreds.append(self._start_polling_remote( user, domain, remoteusers )) yield defer.DeferredList(deferreds, consumeErrors=True) def _start_polling_local(self, user, target_user): """Subscribe a local user to presence updates for a local user Args: user(UserId): The local user that wishes for updates. target_user(UserId): The local users whose updates are wanted. """ target_localpart = target_user.localpart if target_localpart not in self._local_pushmap: self._local_pushmap[target_localpart] = set() self._local_pushmap[target_localpart].add(user) def _start_polling_remote(self, user, domain, remoteusers): """Subscribe a local user to presence updates for remote users on a given remote domain. Args: user(UserID): The local user that wishes for updates. domain(str): The remote server the local user wants updates from. remoteusers(UserID): The remote users that local user wants to be told about. Returns: A Deferred. """ to_poll = set() for u in remoteusers: if u not in self._remote_recvmap: self._remote_recvmap[u] = set() to_poll.add(u) self._remote_recvmap[u].add(user) if not to_poll: return defer.succeed(None) return self.federation.send_edu( destination=domain, edu_type="m.presence", content={"poll": [u.to_string() for u in to_poll]} ) @log_function def stop_polling_presence(self, user, target_user=None): """Unsubscribe a local user from presence updates from a local or remote user. If no target user is supplied then unsubscribe the user from all presence updates that the user had subscribed to. Args: user(UserID): The local user that no longer wishes for updates. target_user(UserID or None): The user whose updates are no longer wanted. Returns: A Deferred. """ logger.debug("Stop polling for presence from %s", user) if not target_user or self.hs.is_mine(target_user): self._stop_polling_local(user, target_user=target_user) deferreds = [] if target_user: if target_user not in self._remote_recvmap: return target_users = set([target_user]) else: target_users = self._remote_recvmap.keys() remoteusers = [u for u in target_users if user in self._remote_recvmap[u]] remoteusers_by_domain = partition(remoteusers, lambda u: u.domain) for domain in remoteusers_by_domain: remoteusers = remoteusers_by_domain[domain] deferreds.append( self._stop_polling_remote(user, domain, remoteusers) ) return defer.DeferredList(deferreds, consumeErrors=True) def _stop_polling_local(self, user, target_user): """Unsubscribe a local user from presence updates from a local user on this server. Args: user(UserID): The local user that no longer wishes for updates. target_user(UserID): The user whose updates are no longer wanted. """ for localpart in self._local_pushmap.keys(): if target_user and localpart != target_user.localpart: continue if user in self._local_pushmap[localpart]: self._local_pushmap[localpart].remove(user) if not self._local_pushmap[localpart]: del self._local_pushmap[localpart] @log_function def _stop_polling_remote(self, user, domain, remoteusers): """Unsubscribe a local user from presence updates from remote users on a given domain. Args: user(UserID): The local user that no longer wishes for updates. domain(str): The remote server to unsubscribe from. remoteusers([UserID]): The users on that remote server that the local user no longer wishes to be updated about. Returns: A Deferred. """ to_unpoll = set() for u in remoteusers: self._remote_recvmap[u].remove(user) if not self._remote_recvmap[u]: del self._remote_recvmap[u] to_unpoll.add(u) if not to_unpoll: return defer.succeed(None) return self.federation.send_edu( destination=domain, edu_type="m.presence", content={"unpoll": [u.to_string() for u in to_unpoll]} ) @defer.inlineCallbacks @log_function def push_presence(self, user, statuscache): """ Notify local and remote users of a change in presence of a local user. Pushes the update to local clients and remote domains that are directly subscribed to the presence of the local user. Also pushes that update to any local user or remote domain that shares a room with the local user. Args: user(UserID): The local user whose presence was updated. statuscache(UserPresenceCache): Cache of the user's presence state Returns: A Deferred. """ assert(self.hs.is_mine(user)) logger.debug("Pushing presence update from %s", user) localusers = set(self._local_pushmap.get(user.localpart, set())) remotedomains = set(self._remote_sendmap.get(user.localpart, set())) # Reflect users' status changes back to themselves, so UIs look nice # and also user is informed of server-forced pushes localusers.add(user) room_ids = yield self.get_joined_rooms_for_user(user) if not localusers and not room_ids: defer.returnValue(None) yield self.push_update_to_local_and_remote( observed_user=user, users_to_push=localusers, remote_domains=remotedomains, room_ids=room_ids, statuscache=statuscache, ) yield self.distributor.fire("user_presence_changed", user, statuscache) @defer.inlineCallbacks def incoming_presence(self, origin, content): """Handle an incoming m.presence EDU. For each presence update in the "push" list update our local cache and notify the appropriate local clients. Only clients that share a room or are directly subscribed to the presence for a user should be notified of the update. For each subscription request in the "poll" list start pushing presence updates to the remote server. For unsubscribe request in the "unpoll" list stop pushing presence updates to the remote server. Args: orgin(str): The source of this m.presence EDU. content(dict): The content of this m.presence EDU. Returns: A Deferred. """ deferreds = [] for push in content.get("push", []): user = UserID.from_string(push["user_id"]) logger.debug("Incoming presence update from %s", user) observers = set(self._remote_recvmap.get(user, set())) if observers: logger.debug( " | %d interested local observers %r", len(observers), observers ) room_ids = yield self.get_joined_rooms_for_user(user) if room_ids: logger.debug(" | %d interested room IDs %r", len(room_ids), room_ids) state = dict(push) del state["user_id"] if "presence" not in state: logger.warning( "Received a presence 'push' EDU from %s without a" " 'presence' key", origin ) continue if "last_active_ago" in state: state["last_active"] = int( self.clock.time_msec() - state.pop("last_active_ago") ) self._user_cachemap_latest_serial += 1 yield self.update_presence_cache(user, state, room_ids=room_ids) if not observers and not room_ids: logger.debug(" | no interested observers or room IDs") continue self.push_update_to_clients( users_to_push=observers, room_ids=room_ids ) user_id = user.to_string() if state["presence"] == PresenceState.OFFLINE: self._remote_offline_serials.insert( 0, (self._user_cachemap_latest_serial, set([user_id])) ) while len(self._remote_offline_serials) > MAX_OFFLINE_SERIALS: self._remote_offline_serials.pop() # remove the oldest del self._user_cachemap[user] else: # Remove the user from remote_offline_serials now that they're # no longer offline for idx, elem in enumerate(self._remote_offline_serials): (_, user_ids) = elem user_ids.discard(user_id) if not user_ids: self._remote_offline_serials.pop(idx) for poll in content.get("poll", []): user = UserID.from_string(poll) if not self.hs.is_mine(user): continue # TODO(paul) permissions checks if user not in self._remote_sendmap: self._remote_sendmap[user] = set() self._remote_sendmap[user].add(origin) deferreds.append(self._push_presence_remote(user, origin)) for unpoll in content.get("unpoll", []): user = UserID.from_string(unpoll) if not self.hs.is_mine(user): continue if user in self._remote_sendmap: self._remote_sendmap[user].remove(origin) if not self._remote_sendmap[user]: del self._remote_sendmap[user] yield defer.DeferredList(deferreds, consumeErrors=True) @defer.inlineCallbacks def update_presence_cache(self, user, state={}, room_ids=None, add_to_cache=True): """Update the presence cache for a user with a new state and bump the serial to the latest value. Args: user(UserID): The user being updated state(dict): The presence state being updated room_ids(None or list of str): A list of room_ids to update. If room_ids is None then fetch the list of room_ids the user is joined to. add_to_cache: Whether to add an entry to the presence cache if the user isn't already in the cache. Returns: A Deferred UserPresenceCache for the user being updated. """ if room_ids is None: room_ids = yield self.get_joined_rooms_for_user(user) for room_id in room_ids: self._room_serials[room_id] = self._user_cachemap_latest_serial if add_to_cache: statuscache = self._get_or_make_usercache(user) else: statuscache = self._get_or_offline_usercache(user) statuscache.update(state, serial=self._user_cachemap_latest_serial) defer.returnValue(statuscache) @defer.inlineCallbacks def push_update_to_local_and_remote(self, observed_user, statuscache, users_to_push=[], room_ids=[], remote_domains=[]): """Notify local clients and remote servers of a change in the presence of a user. Args: observed_user(UserID): The user to push the presence state for. statuscache(UserPresenceCache): The cache for the presence state to push. users_to_push([UserID]): A list of local and remote users to notify. room_ids([str]): Notify the local and remote occupants of these rooms. remote_domains([str]): A list of remote servers to notify in addition to those implied by the users_to_push and the room_ids. Returns: A Deferred. """ localusers, remoteusers = partitionbool( users_to_push, lambda u: self.hs.is_mine(u) ) localusers = set(localusers) self.push_update_to_clients( users_to_push=localusers, room_ids=room_ids ) remote_domains = set(remote_domains) remote_domains |= set([r.domain for r in remoteusers]) for room_id in room_ids: remote_domains.update( (yield self.store.get_joined_hosts_for_room(room_id)) ) remote_domains.discard(self.hs.hostname) deferreds = [] for domain in remote_domains: logger.debug(" | push to remote domain %s", domain) deferreds.append( self._push_presence_remote( observed_user, domain, state=statuscache.get_state() ) ) yield defer.DeferredList(deferreds, consumeErrors=True) defer.returnValue((localusers, remote_domains)) def push_update_to_clients(self, users_to_push=[], room_ids=[]): """Notify clients of a new presence event. Args: users_to_push([UserID]): List of users to notify. room_ids([str]): List of room_ids to notify. """ with PreserveLoggingContext(): self.notifier.on_new_event( "presence_key", self._user_cachemap_latest_serial, users_to_push, room_ids, ) @defer.inlineCallbacks def _push_presence_remote(self, user, destination, state=None): """Push a user's presence to a remote server. If a presence state event that event is sent. Otherwise a new state event is constructed from the stored presence state. The last_active is replaced with last_active_ago in case the wallclock time on the remote server is different to the time on this server. Sends an EDU to the remote server with the current presence state. Args: user(UserID): The user to push the presence state for. destination(str): The remote server to send state to. state(dict): The state to push, or None to use the current stored state. Returns: A Deferred. """ if state is None: state = yield self.store.get_presence_state(user.localpart) del state["mtime"] state["presence"] = state.pop("state") if user in self._user_cachemap: state["last_active"] = ( self._user_cachemap[user].get_state()["last_active"] ) yield self.distributor.fire( "collect_presencelike_data", user, state ) if "last_active" in state: state = dict(state) state["last_active_ago"] = int( self.clock.time_msec() - state.pop("last_active") ) user_state = {"user_id": user.to_string(), } user_state.update(state) yield self.federation.send_edu( destination=destination, edu_type="m.presence", content={"push": [user_state, ], } ) class PresenceEventSource(object): def __init__(self, hs): self.hs = hs self.clock = hs.get_clock() @defer.inlineCallbacks @log_function def get_new_events_for_user(self, user, from_key, limit): from_key = int(from_key) presence = self.hs.get_handlers().presence_handler cachemap = presence._user_cachemap max_serial = presence._user_cachemap_latest_serial clock = self.clock latest_serial = 0 user_ids_to_check = {user} presence_list = yield presence.store.get_presence_list( user.localpart, accepted=True ) if presence_list is not None: user_ids_to_check |= set( UserID.from_string(p["observed_user_id"]) for p in presence_list ) room_ids = yield presence.get_joined_rooms_for_user(user) for room_id in set(room_ids) & set(presence._room_serials): if presence._room_serials[room_id] > from_key: joined = yield presence.get_joined_users_for_room_id(room_id) user_ids_to_check |= set(joined) updates = [] for observed_user in user_ids_to_check & set(cachemap): cached = cachemap[observed_user] if cached.serial <= from_key or cached.serial > max_serial: continue latest_serial = max(cached.serial, latest_serial) updates.append(cached.make_event(user=observed_user, clock=clock)) # TODO(paul): limit for serial, user_ids in presence._remote_offline_serials: if serial <= from_key: break if serial > max_serial: continue latest_serial = max(latest_serial, serial) for u in user_ids: updates.append({ "type": "m.presence", "content": {"user_id": u, "presence": PresenceState.OFFLINE}, }) # TODO(paul): For the v2 API we want to tell the client their from_key # is too old if we fell off the end of the _remote_offline_serials # list, and get them to invalidate+resync. In v1 we have no such # concept so this is a best-effort result. if updates: defer.returnValue((updates, latest_serial)) else: defer.returnValue(([], presence._user_cachemap_latest_serial)) def get_current_key(self): presence = self.hs.get_handlers().presence_handler return presence._user_cachemap_latest_serial @defer.inlineCallbacks def get_pagination_rows(self, user, pagination_config, key): # TODO (erikj): Does this make sense? Ordering? from_key = int(pagination_config.from_key) if pagination_config.to_key: to_key = int(pagination_config.to_key) else: to_key = -1 presence = self.hs.get_handlers().presence_handler cachemap = presence._user_cachemap user_ids_to_check = {user} presence_list = yield presence.store.get_presence_list( user.localpart, accepted=True ) if presence_list is not None: user_ids_to_check |= set( UserID.from_string(p["observed_user_id"]) for p in presence_list ) room_ids = yield presence.get_joined_rooms_for_user(user) for room_id in set(room_ids) & set(presence._room_serials): if presence._room_serials[room_id] >= from_key: joined = yield presence.get_joined_users_for_room_id(room_id) user_ids_to_check |= set(joined) updates = [] for observed_user in user_ids_to_check & set(cachemap): if not (to_key < cachemap[observed_user].serial <= from_key): continue updates.append((observed_user, cachemap[observed_user])) # TODO(paul): limit if updates: clock = self.clock earliest_serial = max([x[1].serial for x in updates]) data = [x[1].make_event(user=x[0], clock=clock) for x in updates] defer.returnValue((data, earliest_serial)) else: defer.returnValue(([], 0)) class UserPresenceCache(object): """Store an observed user's state and status message. Includes the update timestamp. """ def __init__(self): self.state = {"presence": PresenceState.OFFLINE} self.serial = None def update(self, state, serial): assert("mtime_age" not in state) self.state.update(state) # Delete keys that are now 'None' for k in self.state.keys(): if self.state[k] is None: del self.state[k] self.serial = serial if "status_msg" in state: self.status_msg = state["status_msg"] else: self.status_msg = None def get_state(self): # clone it so caller can't break our cache state = dict(self.state) return state def make_event(self, user, clock): content = self.get_state() content["user_id"] = user.to_string() if "last_active" in content: content["last_active_ago"] = int( clock.time_msec() - content.pop("last_active") ) return {"type": "m.presence", "content": content}
apache-2.0
2,773,154,445,091,974,700
35.030864
85
0.580992
false
mushtaqak/edx-platform
lms/djangoapps/commerce/tests/test_views.py
1
17521
""" Tests for commerce views. """ import json from uuid import uuid4 from nose.plugins.attrib import attr import ddt from django.conf import settings from django.core.urlresolvers import reverse from django.test import TestCase from django.test.utils import override_settings import mock from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase from xmodule.modulestore.tests.factories import CourseFactory from ecommerce_api_client import exceptions from commerce.constants import Messages from commerce.tests import TEST_BASKET_ID, TEST_ORDER_NUMBER, TEST_PAYMENT_DATA, TEST_API_URL, TEST_API_SIGNING_KEY from commerce.tests.mocks import mock_basket_order, mock_create_basket from course_modes.models import CourseMode from embargo.test_utils import restrict_course from openedx.core.lib.django_test_client_utils import get_absolute_url from enrollment.api import get_enrollment from student.models import CourseEnrollment from student.tests.factories import UserFactory, CourseModeFactory from student.tests.tests import EnrollmentEventTestMixin class UserMixin(object): """ Mixin for tests involving users. """ def setUp(self): super(UserMixin, self).setUp() self.user = UserFactory() def _login(self): """ Log into LMS. """ self.client.login(username=self.user.username, password='test') @attr('shard_1') @ddt.ddt @override_settings(ECOMMERCE_API_URL=TEST_API_URL, ECOMMERCE_API_SIGNING_KEY=TEST_API_SIGNING_KEY) class BasketsViewTests(EnrollmentEventTestMixin, UserMixin, ModuleStoreTestCase): """ Tests for the commerce orders view. """ def _post_to_view(self, course_id=None): """ POST to the view being tested. Arguments course_id (str) -- ID of course for which a seat should be ordered. :return: Response """ course_id = unicode(course_id or self.course.id) return self.client.post(self.url, {'course_id': course_id}) def assertResponseMessage(self, response, expected_msg): """ Asserts the detail field in the response's JSON body equals the expected message. """ actual = json.loads(response.content)['detail'] self.assertEqual(actual, expected_msg) def assertResponsePaymentData(self, response): """ Asserts correctness of a JSON body containing payment information. """ actual_response = json.loads(response.content) self.assertEqual(actual_response, TEST_PAYMENT_DATA) def assertValidEcommerceInternalRequestErrorResponse(self, response): """ Asserts the response is a valid response sent when the E-Commerce API is unavailable. """ self.assertEqual(response.status_code, 500) actual = json.loads(response.content)['detail'] self.assertIn('Call to E-Commerce API failed', actual) def assertUserNotEnrolled(self): """ Asserts that the user is NOT enrolled in the course, and that an enrollment event was NOT fired. """ self.assertFalse(CourseEnrollment.is_enrolled(self.user, self.course.id)) self.assert_no_events_were_emitted() def setUp(self): super(BasketsViewTests, self).setUp() self.url = reverse('commerce:baskets') self._login() self.course = CourseFactory.create() # TODO Verify this is the best method to create CourseMode objects. # TODO Find/create constants for the modes. for mode in [CourseMode.HONOR, CourseMode.VERIFIED, CourseMode.AUDIT]: CourseModeFactory.create( course_id=self.course.id, mode_slug=mode, mode_display_name=mode, sku=uuid4().hex.decode('ascii') ) # Ignore events fired from UserFactory creation self.reset_tracker() @mock.patch.dict(settings.FEATURES, {'EMBARGO': True}) def test_embargo_restriction(self): """ The view should return HTTP 403 status if the course is embargoed. """ with restrict_course(self.course.id) as redirect_url: response = self._post_to_view() self.assertEqual(403, response.status_code) body = json.loads(response.content) self.assertEqual(get_absolute_url(redirect_url), body['user_message_url']) def test_login_required(self): """ The view should return HTTP 403 status if the user is not logged in. """ self.client.logout() self.assertEqual(403, self._post_to_view().status_code) @ddt.data('delete', 'get', 'put') def test_post_required(self, method): """ Verify that the view only responds to POST operations. """ response = getattr(self.client, method)(self.url) self.assertEqual(405, response.status_code) def test_invalid_course(self): """ If the course does not exist, the view should return HTTP 406. """ # TODO Test inactive courses, and those not open for enrollment. self.assertEqual(406, self._post_to_view('aaa/bbb/ccc').status_code) def test_invalid_request_data(self): """ If invalid data is supplied with the request, the view should return HTTP 406. """ self.assertEqual(406, self.client.post(self.url, {}).status_code) self.assertEqual(406, self.client.post(self.url, {'not_course_id': ''}).status_code) def test_ecommerce_api_timeout(self): """ If the call to the E-Commerce API times out, the view should log an error and return an HTTP 503 status. """ with mock_create_basket(exception=exceptions.Timeout): response = self._post_to_view() self.assertValidEcommerceInternalRequestErrorResponse(response) self.assertUserNotEnrolled() def test_ecommerce_api_error(self): """ If the E-Commerce API raises an error, the view should return an HTTP 503 status. """ with mock_create_basket(exception=exceptions.SlumberBaseException): response = self._post_to_view() self.assertValidEcommerceInternalRequestErrorResponse(response) self.assertUserNotEnrolled() def _test_successful_ecommerce_api_call(self, is_completed=True): """ Verifies that the view contacts the E-Commerce API with the correct data and headers. """ response = self._post_to_view() # Validate the response content if is_completed: msg = Messages.ORDER_COMPLETED.format(order_number=TEST_ORDER_NUMBER) self.assertResponseMessage(response, msg) else: self.assertResponsePaymentData(response) @ddt.data(True, False) def test_course_with_honor_seat_sku(self, user_is_active): """ If the course has a SKU, the view should get authorization from the E-Commerce API before enrolling the user in the course. If authorization is approved, the user should be redirected to the user dashboard. """ # Set user's active flag self.user.is_active = user_is_active self.user.save() # pylint: disable=no-member return_value = {'id': TEST_BASKET_ID, 'payment_data': None, 'order': {'number': TEST_ORDER_NUMBER}} with mock_create_basket(response=return_value): self._test_successful_ecommerce_api_call() @ddt.data(True, False) def test_course_with_paid_seat_sku(self, user_is_active): """ If the course has a SKU, the view should return data that the client will use to redirect the user to an external payment processor. """ # Set user's active flag self.user.is_active = user_is_active self.user.save() # pylint: disable=no-member return_value = {'id': TEST_BASKET_ID, 'payment_data': TEST_PAYMENT_DATA, 'order': None} with mock_create_basket(response=return_value): self._test_successful_ecommerce_api_call(False) def _test_course_without_sku(self): """ Validates the view bypasses the E-Commerce API when the course has no CourseModes with SKUs. """ # Place an order with mock_create_basket(expect_called=False): response = self._post_to_view() # Validate the response content self.assertEqual(response.status_code, 200) msg = Messages.NO_SKU_ENROLLED.format(enrollment_mode='honor', course_id=self.course.id, username=self.user.username) self.assertResponseMessage(response, msg) def test_course_without_sku(self): """ If the course does NOT have a SKU, the user should be enrolled in the course (under the honor mode) and redirected to the user dashboard. """ # Remove SKU from all course modes for course_mode in CourseMode.objects.filter(course_id=self.course.id): course_mode.sku = None course_mode.save() self._test_course_without_sku() @override_settings(ECOMMERCE_API_URL=None, ECOMMERCE_API_SIGNING_KEY=None) def test_ecommerce_service_not_configured(self): """ If the E-Commerce Service is not configured, the view should enroll the user. """ with mock_create_basket(expect_called=False): response = self._post_to_view() # Validate the response self.assertEqual(response.status_code, 200) msg = Messages.NO_ECOM_API.format(username=self.user.username, course_id=self.course.id) self.assertResponseMessage(response, msg) # Ensure that the user is not enrolled and that no calls were made to the E-Commerce API self.assertTrue(CourseEnrollment.is_enrolled(self.user, self.course.id)) def assertProfessionalModeBypassed(self): """ Verifies that the view returns HTTP 406 when a course with no honor mode is encountered. """ CourseMode.objects.filter(course_id=self.course.id).delete() mode = CourseMode.NO_ID_PROFESSIONAL_MODE CourseModeFactory.create(course_id=self.course.id, mode_slug=mode, mode_display_name=mode, sku=uuid4().hex.decode('ascii')) with mock_create_basket(expect_called=False): response = self._post_to_view() # The view should return an error status code self.assertEqual(response.status_code, 406) msg = Messages.NO_HONOR_MODE.format(course_id=self.course.id) self.assertResponseMessage(response, msg) def test_course_with_professional_mode_only(self): """ Verifies that the view behaves appropriately when the course only has a professional mode. """ self.assertProfessionalModeBypassed() @override_settings(ECOMMERCE_API_URL=None, ECOMMERCE_API_SIGNING_KEY=None) def test_professional_mode_only_and_ecommerce_service_not_configured(self): """ Verifies that the view behaves appropriately when the course only has a professional mode and the E-Commerce Service is not configured. """ self.assertProfessionalModeBypassed() def test_empty_sku(self): """ If the CourseMode has an empty string for a SKU, the API should not be used. """ # Set SKU to empty string for all modes. for course_mode in CourseMode.objects.filter(course_id=self.course.id): course_mode.sku = '' course_mode.save() self._test_course_without_sku() def test_existing_active_enrollment(self): """ The view should respond with HTTP 409 if the user has an existing active enrollment for the course. """ # Enroll user in the course CourseEnrollment.enroll(self.user, self.course.id) self.assertTrue(CourseEnrollment.is_enrolled(self.user, self.course.id)) response = self._post_to_view() self.assertEqual(response.status_code, 409) msg = Messages.ENROLLMENT_EXISTS.format(username=self.user.username, course_id=self.course.id) self.assertResponseMessage(response, msg) def test_existing_inactive_enrollment(self): """ If the user has an inactive enrollment for the course, the view should behave as if the user has no enrollment. """ # Create an inactive enrollment CourseEnrollment.enroll(self.user, self.course.id) CourseEnrollment.unenroll(self.user, self.course.id, True) self.assertFalse(CourseEnrollment.is_enrolled(self.user, self.course.id)) self.assertIsNotNone(get_enrollment(self.user.username, unicode(self.course.id))) with mock_create_basket(): self._test_successful_ecommerce_api_call(False) class OrdersViewTests(BasketsViewTests): """ Ensures that /orders/ points to and behaves like /baskets/, for backward compatibility with stale js clients during updates. (XCOM-214) remove after release. """ def setUp(self): super(OrdersViewTests, self).setUp() self.url = reverse('commerce:orders') @attr('shard_1') @override_settings(ECOMMERCE_API_URL=TEST_API_URL, ECOMMERCE_API_SIGNING_KEY=TEST_API_SIGNING_KEY) class BasketOrderViewTests(UserMixin, TestCase): """ Tests for the basket order view. """ view_name = 'commerce:basket_order' MOCK_ORDER = {'number': 1} path = reverse(view_name, kwargs={'basket_id': 1}) def setUp(self): super(BasketOrderViewTests, self).setUp() self._login() def test_order_found(self): """ If the order is located, the view should pass the data from the API. """ with mock_basket_order(basket_id=1, response=self.MOCK_ORDER): response = self.client.get(self.path) self.assertEqual(response.status_code, 200) actual = json.loads(response.content) self.assertEqual(actual, self.MOCK_ORDER) def test_order_not_found(self): """ If the order is not found, the view should return a 404. """ with mock_basket_order(basket_id=1, exception=exceptions.HttpNotFoundError): response = self.client.get(self.path) self.assertEqual(response.status_code, 404) def test_login_required(self): """ The view should return 403 if the user is not logged in. """ self.client.logout() response = self.client.get(self.path) self.assertEqual(response.status_code, 403) @attr('shard_1') @ddt.ddt class ReceiptViewTests(UserMixin, TestCase): """ Tests for the receipt view. """ def test_login_required(self): """ The view should redirect to the login page if the user is not logged in. """ self.client.logout() response = self.client.post(reverse('commerce:checkout_receipt')) self.assertEqual(response.status_code, 302) def post_to_receipt_page(self, post_data): """ DRY helper """ response = self.client.post(reverse('commerce:checkout_receipt'), params={'basket_id': 1}, data=post_data) self.assertEqual(response.status_code, 200) return response @ddt.data('decision', 'reason_code', 'signed_field_names', None) def test_is_cybersource(self, post_key): """ Ensure the view uses three specific POST keys to detect a request initiated by Cybersource. """ self._login() post_data = {'decision': 'REJECT', 'reason_code': '200', 'signed_field_names': 'dummy'} if post_key is not None: # a key will be missing; we will not expect the receipt page to handle a cybersource decision del post_data[post_key] expected_pattern = r"<title>(\s+)Receipt" else: expected_pattern = r"<title>(\s+)Payment Failed" response = self.post_to_receipt_page(post_data) self.assertRegexpMatches(response.content, expected_pattern) @ddt.data('ACCEPT', 'REJECT', 'ERROR') def test_cybersource_decision(self, decision): """ Ensure the view renders a page appropriately depending on the Cybersource decision. """ self._login() post_data = {'decision': decision, 'reason_code': '200', 'signed_field_names': 'dummy'} expected_pattern = r"<title>(\s+)Receipt" if decision == 'ACCEPT' else r"<title>(\s+)Payment Failed" response = self.post_to_receipt_page(post_data) self.assertRegexpMatches(response.content, expected_pattern) @ddt.data(True, False) @mock.patch('commerce.views.is_user_payment_error') def test_cybersource_message(self, is_user_message_expected, mock_is_user_payment_error): """ Ensure that the page displays the right message for the reason_code (it may be a user error message or a system error message). """ mock_is_user_payment_error.return_value = is_user_message_expected self._login() response = self.post_to_receipt_page({'decision': 'REJECT', 'reason_code': '99', 'signed_field_names': 'dummy'}) self.assertTrue(mock_is_user_payment_error.called) self.assertTrue(mock_is_user_payment_error.call_args[0][0], '99') user_message = "There was a problem with this transaction" system_message = "A system error occurred while processing your payment" self.assertRegexpMatches(response.content, user_message if is_user_message_expected else system_message) self.assertNotRegexpMatches(response.content, user_message if not is_user_message_expected else system_message)
agpl-3.0
3,068,241,254,103,954,400
41.016787
120
0.662919
false
romeotestuser/glimsol_report
report/billing_statement.py
1
2348
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import time from openerp import netsvc from openerp.netsvc import Service for x in ['report.glimsol.billing.statement']: try: del Service._services[x] except: pass from openerp.report import report_sxw class billing(report_sxw.rml_parse): def __init__(self, cr, uid, name, context=None): super(billing, self).__init__(cr, uid, name, context=context) self.localcontext.update({ 'time': time, 'get_line':self._get_line, 'get_total_si_amount':self._get_total_si_amount, 'get_total_ticket_amount':self._get_total_ticket_amount, 'get_user_ref':self._get_user_ref, }) def _get_line(self,obj): res=[] return res def _get_total_si_amount(self,obj): res=[] return res def _get_total_ticket_amount(self,obj): res=[] return res def _get_user_ref(self,obj,trigger): for target_trigger in ['sales executive','courier','customer']: if target_trigger != trigger: continue res = [] return res report_sxw.report_sxw('report.glimsol.billing.statement', 'account.billing', 'addons/glimsol_report/report/billing_statement.rml', parser=billing, header="external")
gpl-2.0
385,092,275,381,778,500
32.557143
165
0.57879
false
b-carter/numpy
numpy/core/tests/test_multiarray.py
1
260744
from __future__ import division, absolute_import, print_function import collections import tempfile import sys import shutil import warnings import operator import io import itertools import functools import ctypes import os import gc from contextlib import contextmanager if sys.version_info[0] >= 3: import builtins else: import __builtin__ as builtins from decimal import Decimal import numpy as np from numpy.compat import strchar, unicode from .test_print import in_foreign_locale from numpy.core.multiarray_tests import ( test_neighborhood_iterator, test_neighborhood_iterator_oob, test_pydatamem_seteventhook_start, test_pydatamem_seteventhook_end, test_inplace_increment, get_buffer_info, test_as_c_array, ) from numpy.testing import ( run_module_suite, assert_, assert_raises, assert_warns, assert_equal, assert_almost_equal, assert_array_equal, assert_raises_regex, assert_array_almost_equal, assert_allclose, IS_PYPY, HAS_REFCOUNT, assert_array_less, runstring, dec, SkipTest, temppath, suppress_warnings ) # Need to test an object that does not fully implement math interface from datetime import timedelta, datetime if sys.version_info[:2] > (3, 2): # In Python 3.3 the representation of empty shape, strides and sub-offsets # is an empty tuple instead of None. # http://docs.python.org/dev/whatsnew/3.3.html#api-changes EMPTY = () else: EMPTY = None def _aligned_zeros(shape, dtype=float, order="C", align=None): """Allocate a new ndarray with aligned memory.""" dtype = np.dtype(dtype) if dtype == np.dtype(object): # Can't do this, fall back to standard allocation (which # should always be sufficiently aligned) if align is not None: raise ValueError("object array alignment not supported") return np.zeros(shape, dtype=dtype, order=order) if align is None: align = dtype.alignment if not hasattr(shape, '__len__'): shape = (shape,) size = functools.reduce(operator.mul, shape) * dtype.itemsize buf = np.empty(size + align + 1, np.uint8) offset = buf.__array_interface__['data'][0] % align if offset != 0: offset = align - offset # Note: slices producing 0-size arrays do not necessarily change # data pointer --- so we use and allocate size+1 buf = buf[offset:offset+size+1][:-1] data = np.ndarray(shape, dtype, buf, order=order) data.fill(0) return data class TestFlags(object): def setup(self): self.a = np.arange(10) def test_writeable(self): mydict = locals() self.a.flags.writeable = False assert_raises(ValueError, runstring, 'self.a[0] = 3', mydict) assert_raises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict) self.a.flags.writeable = True self.a[0] = 5 self.a[0] = 0 def test_otherflags(self): assert_equal(self.a.flags.carray, True) assert_equal(self.a.flags.farray, False) assert_equal(self.a.flags.behaved, True) assert_equal(self.a.flags.fnc, False) assert_equal(self.a.flags.forc, True) assert_equal(self.a.flags.owndata, True) assert_equal(self.a.flags.writeable, True) assert_equal(self.a.flags.aligned, True) assert_equal(self.a.flags.updateifcopy, False) def test_string_align(self): a = np.zeros(4, dtype=np.dtype('|S4')) assert_(a.flags.aligned) # not power of two are accessed byte-wise and thus considered aligned a = np.zeros(5, dtype=np.dtype('|S4')) assert_(a.flags.aligned) def test_void_align(self): a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")])) assert_(a.flags.aligned) class TestHash(object): # see #3793 def test_int(self): for st, ut, s in [(np.int8, np.uint8, 8), (np.int16, np.uint16, 16), (np.int32, np.uint32, 32), (np.int64, np.uint64, 64)]: for i in range(1, s): assert_equal(hash(st(-2**i)), hash(-2**i), err_msg="%r: -2**%d" % (st, i)) assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)), err_msg="%r: 2**%d" % (st, i - 1)) assert_equal(hash(st(2**i - 1)), hash(2**i - 1), err_msg="%r: 2**%d - 1" % (st, i)) i = max(i - 1, 1) assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)), err_msg="%r: 2**%d" % (ut, i - 1)) assert_equal(hash(ut(2**i - 1)), hash(2**i - 1), err_msg="%r: 2**%d - 1" % (ut, i)) class TestAttributes(object): def setup(self): self.one = np.arange(10) self.two = np.arange(20).reshape(4, 5) self.three = np.arange(60, dtype=np.float64).reshape(2, 5, 6) def test_attributes(self): assert_equal(self.one.shape, (10,)) assert_equal(self.two.shape, (4, 5)) assert_equal(self.three.shape, (2, 5, 6)) self.three.shape = (10, 3, 2) assert_equal(self.three.shape, (10, 3, 2)) self.three.shape = (2, 5, 6) assert_equal(self.one.strides, (self.one.itemsize,)) num = self.two.itemsize assert_equal(self.two.strides, (5*num, num)) num = self.three.itemsize assert_equal(self.three.strides, (30*num, 6*num, num)) assert_equal(self.one.ndim, 1) assert_equal(self.two.ndim, 2) assert_equal(self.three.ndim, 3) num = self.two.itemsize assert_equal(self.two.size, 20) assert_equal(self.two.nbytes, 20*num) assert_equal(self.two.itemsize, self.two.dtype.itemsize) assert_equal(self.two.base, np.arange(20)) def test_dtypeattr(self): assert_equal(self.one.dtype, np.dtype(np.int_)) assert_equal(self.three.dtype, np.dtype(np.float_)) assert_equal(self.one.dtype.char, 'l') assert_equal(self.three.dtype.char, 'd') assert_(self.three.dtype.str[0] in '<>') assert_equal(self.one.dtype.str[1], 'i') assert_equal(self.three.dtype.str[1], 'f') def test_int_subclassing(self): # Regression test for https://github.com/numpy/numpy/pull/3526 numpy_int = np.int_(0) if sys.version_info[0] >= 3: # On Py3k int_ should not inherit from int, because it's not # fixed-width anymore assert_equal(isinstance(numpy_int, int), False) else: # Otherwise, it should inherit from int... assert_equal(isinstance(numpy_int, int), True) # ... and fast-path checks on C-API level should also work from numpy.core.multiarray_tests import test_int_subclass assert_equal(test_int_subclass(numpy_int), True) def test_stridesattr(self): x = self.one def make_array(size, offset, strides): return np.ndarray(size, buffer=x, dtype=int, offset=offset*x.itemsize, strides=strides*x.itemsize) assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1])) assert_raises(ValueError, make_array, 4, 4, -2) assert_raises(ValueError, make_array, 4, 2, -1) assert_raises(ValueError, make_array, 8, 3, 1) assert_equal(make_array(8, 3, 0), np.array([3]*8)) # Check behavior reported in gh-2503: assert_raises(ValueError, make_array, (2, 3), 5, np.array([-2, -3])) make_array(0, 0, 10) def test_set_stridesattr(self): x = self.one def make_array(size, offset, strides): try: r = np.ndarray([size], dtype=int, buffer=x, offset=offset*x.itemsize) except Exception as e: raise RuntimeError(e) r.strides = strides = strides*x.itemsize return r assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1])) assert_equal(make_array(7, 3, 1), np.array([3, 4, 5, 6, 7, 8, 9])) assert_raises(ValueError, make_array, 4, 4, -2) assert_raises(ValueError, make_array, 4, 2, -1) assert_raises(RuntimeError, make_array, 8, 3, 1) # Check that the true extent of the array is used. # Test relies on as_strided base not exposing a buffer. x = np.lib.stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0)) def set_strides(arr, strides): arr.strides = strides assert_raises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize)) # Test for offset calculations: x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1], shape=(10,), strides=(-1,)) assert_raises(ValueError, set_strides, x[::-1], -1) a = x[::-1] a.strides = 1 a[::2].strides = 2 def test_fill(self): for t in "?bhilqpBHILQPfdgFDGO": x = np.empty((3, 2, 1), t) y = np.empty((3, 2, 1), t) x.fill(1) y[...] = 1 assert_equal(x, y) def test_fill_max_uint64(self): x = np.empty((3, 2, 1), dtype=np.uint64) y = np.empty((3, 2, 1), dtype=np.uint64) value = 2**64 - 1 y[...] = value x.fill(value) assert_array_equal(x, y) def test_fill_struct_array(self): # Filling from a scalar x = np.array([(0, 0.0), (1, 1.0)], dtype='i4,f8') x.fill(x[0]) assert_equal(x['f1'][1], x['f1'][0]) # Filling from a tuple that can be converted # to a scalar x = np.zeros(2, dtype=[('a', 'f8'), ('b', 'i4')]) x.fill((3.5, -2)) assert_array_equal(x['a'], [3.5, 3.5]) assert_array_equal(x['b'], [-2, -2]) class TestArrayConstruction(object): def test_array(self): d = np.ones(6) r = np.array([d, d]) assert_equal(r, np.ones((2, 6))) d = np.ones(6) tgt = np.ones((2, 6)) r = np.array([d, d]) assert_equal(r, tgt) tgt[1] = 2 r = np.array([d, d + 1]) assert_equal(r, tgt) d = np.ones(6) r = np.array([[d, d]]) assert_equal(r, np.ones((1, 2, 6))) d = np.ones(6) r = np.array([[d, d], [d, d]]) assert_equal(r, np.ones((2, 2, 6))) d = np.ones((6, 6)) r = np.array([d, d]) assert_equal(r, np.ones((2, 6, 6))) d = np.ones((6, )) r = np.array([[d, d + 1], d + 2]) assert_equal(len(r), 2) assert_equal(r[0], [d, d + 1]) assert_equal(r[1], d + 2) tgt = np.ones((2, 3), dtype=bool) tgt[0, 2] = False tgt[1, 0:2] = False r = np.array([[True, True, False], [False, False, True]]) assert_equal(r, tgt) r = np.array([[True, False], [True, False], [False, True]]) assert_equal(r, tgt.T) def test_array_empty(self): assert_raises(TypeError, np.array) def test_array_copy_false(self): d = np.array([1, 2, 3]) e = np.array(d, copy=False) d[1] = 3 assert_array_equal(e, [1, 3, 3]) e = np.array(d, copy=False, order='F') d[1] = 4 assert_array_equal(e, [1, 4, 3]) e[2] = 7 assert_array_equal(d, [1, 4, 7]) def test_array_copy_true(self): d = np.array([[1,2,3], [1, 2, 3]]) e = np.array(d, copy=True) d[0, 1] = 3 e[0, 2] = -7 assert_array_equal(e, [[1, 2, -7], [1, 2, 3]]) assert_array_equal(d, [[1, 3, 3], [1, 2, 3]]) e = np.array(d, copy=True, order='F') d[0, 1] = 5 e[0, 2] = 7 assert_array_equal(e, [[1, 3, 7], [1, 2, 3]]) assert_array_equal(d, [[1, 5, 3], [1,2,3]]) def test_array_cont(self): d = np.ones(10)[::2] assert_(np.ascontiguousarray(d).flags.c_contiguous) assert_(np.ascontiguousarray(d).flags.f_contiguous) assert_(np.asfortranarray(d).flags.c_contiguous) assert_(np.asfortranarray(d).flags.f_contiguous) d = np.ones((10, 10))[::2,::2] assert_(np.ascontiguousarray(d).flags.c_contiguous) assert_(np.asfortranarray(d).flags.f_contiguous) class TestAssignment(object): def test_assignment_broadcasting(self): a = np.arange(6).reshape(2, 3) # Broadcasting the input to the output a[...] = np.arange(3) assert_equal(a, [[0, 1, 2], [0, 1, 2]]) a[...] = np.arange(2).reshape(2, 1) assert_equal(a, [[0, 0, 0], [1, 1, 1]]) # For compatibility with <= 1.5, a limited version of broadcasting # the output to the input. # # This behavior is inconsistent with NumPy broadcasting # in general, because it only uses one of the two broadcasting # rules (adding a new "1" dimension to the left of the shape), # applied to the output instead of an input. In NumPy 2.0, this kind # of broadcasting assignment will likely be disallowed. a[...] = np.arange(6)[::-1].reshape(1, 2, 3) assert_equal(a, [[5, 4, 3], [2, 1, 0]]) # The other type of broadcasting would require a reduction operation. def assign(a, b): a[...] = b assert_raises(ValueError, assign, a, np.arange(12).reshape(2, 2, 3)) def test_assignment_errors(self): # Address issue #2276 class C: pass a = np.zeros(1) def assign(v): a[0] = v assert_raises((AttributeError, TypeError), assign, C()) assert_raises(ValueError, assign, [1]) def test_unicode_assignment(self): # gh-5049 from numpy.core.numeric import set_string_function @contextmanager def inject_str(s): """ replace ndarray.__str__ temporarily """ set_string_function(lambda x: s, repr=False) try: yield finally: set_string_function(None, repr=False) a1d = np.array([u'test']) a0d = np.array(u'done') with inject_str(u'bad'): a1d[0] = a0d # previously this would invoke __str__ assert_equal(a1d[0], u'done') # this would crash for the same reason np.array([np.array(u'\xe5\xe4\xf6')]) def test_stringlike_empty_list(self): # gh-8902 u = np.array([u'done']) b = np.array([b'done']) class bad_sequence(object): def __getitem__(self): pass def __len__(self): raise RuntimeError assert_raises(ValueError, operator.setitem, u, 0, []) assert_raises(ValueError, operator.setitem, b, 0, []) assert_raises(ValueError, operator.setitem, u, 0, bad_sequence()) assert_raises(ValueError, operator.setitem, b, 0, bad_sequence()) def test_longdouble_assignment(self): # only relevant if longdouble is larger than float # we're looking for loss of precision # gh-8902 tinyb = np.nextafter(np.longdouble(0), 1) tinya = np.nextafter(np.longdouble(0), -1) tiny1d = np.array([tinya]) assert_equal(tiny1d[0], tinya) # scalar = scalar tiny1d[0] = tinyb assert_equal(tiny1d[0], tinyb) # 0d = scalar tiny1d[0, ...] = tinya assert_equal(tiny1d[0], tinya) # 0d = 0d tiny1d[0, ...] = tinyb[...] assert_equal(tiny1d[0], tinyb) # scalar = 0d tiny1d[0] = tinyb[...] assert_equal(tiny1d[0], tinyb) arr = np.array([np.array(tinya)]) assert_equal(arr[0], tinya) class TestDtypedescr(object): def test_construction(self): d1 = np.dtype('i4') assert_equal(d1, np.dtype(np.int32)) d2 = np.dtype('f8') assert_equal(d2, np.dtype(np.float64)) def test_byteorders(self): assert_(np.dtype('<i4') != np.dtype('>i4')) assert_(np.dtype([('a', '<i4')]) != np.dtype([('a', '>i4')])) class TestZeroRank(object): def setup(self): self.d = np.array(0), np.array('x', object) def test_ellipsis_subscript(self): a, b = self.d assert_equal(a[...], 0) assert_equal(b[...], 'x') assert_(a[...].base is a) # `a[...] is a` in numpy <1.9. assert_(b[...].base is b) # `b[...] is b` in numpy <1.9. def test_empty_subscript(self): a, b = self.d assert_equal(a[()], 0) assert_equal(b[()], 'x') assert_(type(a[()]) is a.dtype.type) assert_(type(b[()]) is str) def test_invalid_subscript(self): a, b = self.d assert_raises(IndexError, lambda x: x[0], a) assert_raises(IndexError, lambda x: x[0], b) assert_raises(IndexError, lambda x: x[np.array([], int)], a) assert_raises(IndexError, lambda x: x[np.array([], int)], b) def test_ellipsis_subscript_assignment(self): a, b = self.d a[...] = 42 assert_equal(a, 42) b[...] = '' assert_equal(b.item(), '') def test_empty_subscript_assignment(self): a, b = self.d a[()] = 42 assert_equal(a, 42) b[()] = '' assert_equal(b.item(), '') def test_invalid_subscript_assignment(self): a, b = self.d def assign(x, i, v): x[i] = v assert_raises(IndexError, assign, a, 0, 42) assert_raises(IndexError, assign, b, 0, '') assert_raises(ValueError, assign, a, (), '') def test_newaxis(self): a, b = self.d assert_equal(a[np.newaxis].shape, (1,)) assert_equal(a[..., np.newaxis].shape, (1,)) assert_equal(a[np.newaxis, ...].shape, (1,)) assert_equal(a[..., np.newaxis].shape, (1,)) assert_equal(a[np.newaxis, ..., np.newaxis].shape, (1, 1)) assert_equal(a[..., np.newaxis, np.newaxis].shape, (1, 1)) assert_equal(a[np.newaxis, np.newaxis, ...].shape, (1, 1)) assert_equal(a[(np.newaxis,)*10].shape, (1,)*10) def test_invalid_newaxis(self): a, b = self.d def subscript(x, i): x[i] assert_raises(IndexError, subscript, a, (np.newaxis, 0)) assert_raises(IndexError, subscript, a, (np.newaxis,)*50) def test_constructor(self): x = np.ndarray(()) x[()] = 5 assert_equal(x[()], 5) y = np.ndarray((), buffer=x) y[()] = 6 assert_equal(x[()], 6) def test_output(self): x = np.array(2) assert_raises(ValueError, np.add, x, [1], x) class TestScalarIndexing(object): def setup(self): self.d = np.array([0, 1])[0] def test_ellipsis_subscript(self): a = self.d assert_equal(a[...], 0) assert_equal(a[...].shape, ()) def test_empty_subscript(self): a = self.d assert_equal(a[()], 0) assert_equal(a[()].shape, ()) def test_invalid_subscript(self): a = self.d assert_raises(IndexError, lambda x: x[0], a) assert_raises(IndexError, lambda x: x[np.array([], int)], a) def test_invalid_subscript_assignment(self): a = self.d def assign(x, i, v): x[i] = v assert_raises(TypeError, assign, a, 0, 42) def test_newaxis(self): a = self.d assert_equal(a[np.newaxis].shape, (1,)) assert_equal(a[..., np.newaxis].shape, (1,)) assert_equal(a[np.newaxis, ...].shape, (1,)) assert_equal(a[..., np.newaxis].shape, (1,)) assert_equal(a[np.newaxis, ..., np.newaxis].shape, (1, 1)) assert_equal(a[..., np.newaxis, np.newaxis].shape, (1, 1)) assert_equal(a[np.newaxis, np.newaxis, ...].shape, (1, 1)) assert_equal(a[(np.newaxis,)*10].shape, (1,)*10) def test_invalid_newaxis(self): a = self.d def subscript(x, i): x[i] assert_raises(IndexError, subscript, a, (np.newaxis, 0)) assert_raises(IndexError, subscript, a, (np.newaxis,)*50) def test_overlapping_assignment(self): # With positive strides a = np.arange(4) a[:-1] = a[1:] assert_equal(a, [1, 2, 3, 3]) a = np.arange(4) a[1:] = a[:-1] assert_equal(a, [0, 0, 1, 2]) # With positive and negative strides a = np.arange(4) a[:] = a[::-1] assert_equal(a, [3, 2, 1, 0]) a = np.arange(6).reshape(2, 3) a[::-1,:] = a[:, ::-1] assert_equal(a, [[5, 4, 3], [2, 1, 0]]) a = np.arange(6).reshape(2, 3) a[::-1, ::-1] = a[:, ::-1] assert_equal(a, [[3, 4, 5], [0, 1, 2]]) # With just one element overlapping a = np.arange(5) a[:3] = a[2:] assert_equal(a, [2, 3, 4, 3, 4]) a = np.arange(5) a[2:] = a[:3] assert_equal(a, [0, 1, 0, 1, 2]) a = np.arange(5) a[2::-1] = a[2:] assert_equal(a, [4, 3, 2, 3, 4]) a = np.arange(5) a[2:] = a[2::-1] assert_equal(a, [0, 1, 2, 1, 0]) a = np.arange(5) a[2::-1] = a[:1:-1] assert_equal(a, [2, 3, 4, 3, 4]) a = np.arange(5) a[:1:-1] = a[2::-1] assert_equal(a, [0, 1, 0, 1, 2]) class TestCreation(object): def test_from_attribute(self): class x(object): def __array__(self, dtype=None): pass assert_raises(ValueError, np.array, x()) def test_from_string(self): types = np.typecodes['AllInteger'] + np.typecodes['Float'] nstr = ['123', '123'] result = np.array([123, 123], dtype=int) for type in types: msg = 'String conversion for %s' % type assert_equal(np.array(nstr, dtype=type), result, err_msg=msg) def test_void(self): arr = np.array([], dtype='V') assert_equal(arr.dtype.kind, 'V') def test_too_big_error(self): # 45341 is the smallest integer greater than sqrt(2**31 - 1). # 3037000500 is the smallest integer greater than sqrt(2**63 - 1). # We want to make sure that the square byte array with those dimensions # is too big on 32 or 64 bit systems respectively. if np.iinfo('intp').max == 2**31 - 1: shape = (46341, 46341) elif np.iinfo('intp').max == 2**63 - 1: shape = (3037000500, 3037000500) else: return assert_raises(ValueError, np.empty, shape, dtype=np.int8) assert_raises(ValueError, np.zeros, shape, dtype=np.int8) assert_raises(ValueError, np.ones, shape, dtype=np.int8) def test_zeros(self): types = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] for dt in types: d = np.zeros((13,), dtype=dt) assert_equal(np.count_nonzero(d), 0) # true for ieee floats assert_equal(d.sum(), 0) assert_(not d.any()) d = np.zeros(2, dtype='(2,4)i4') assert_equal(np.count_nonzero(d), 0) assert_equal(d.sum(), 0) assert_(not d.any()) d = np.zeros(2, dtype='4i4') assert_equal(np.count_nonzero(d), 0) assert_equal(d.sum(), 0) assert_(not d.any()) d = np.zeros(2, dtype='(2,4)i4, (2,4)i4') assert_equal(np.count_nonzero(d), 0) @dec.slow def test_zeros_big(self): # test big array as they might be allocated different by the system types = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] for dt in types: d = np.zeros((30 * 1024**2,), dtype=dt) assert_(not d.any()) # This test can fail on 32-bit systems due to insufficient # contiguous memory. Deallocating the previous array increases the # chance of success. del(d) def test_zeros_obj(self): # test initialization from PyLong(0) d = np.zeros((13,), dtype=object) assert_array_equal(d, [0] * 13) assert_equal(np.count_nonzero(d), 0) def test_zeros_obj_obj(self): d = np.zeros(10, dtype=[('k', object, 2)]) assert_array_equal(d['k'], 0) def test_zeros_like_like_zeros(self): # test zeros_like returns the same as zeros for c in np.typecodes['All']: if c == 'V': continue d = np.zeros((3,3), dtype=c) assert_array_equal(np.zeros_like(d), d) assert_equal(np.zeros_like(d).dtype, d.dtype) # explicitly check some special cases d = np.zeros((3,3), dtype='S5') assert_array_equal(np.zeros_like(d), d) assert_equal(np.zeros_like(d).dtype, d.dtype) d = np.zeros((3,3), dtype='U5') assert_array_equal(np.zeros_like(d), d) assert_equal(np.zeros_like(d).dtype, d.dtype) d = np.zeros((3,3), dtype='<i4') assert_array_equal(np.zeros_like(d), d) assert_equal(np.zeros_like(d).dtype, d.dtype) d = np.zeros((3,3), dtype='>i4') assert_array_equal(np.zeros_like(d), d) assert_equal(np.zeros_like(d).dtype, d.dtype) d = np.zeros((3,3), dtype='<M8[s]') assert_array_equal(np.zeros_like(d), d) assert_equal(np.zeros_like(d).dtype, d.dtype) d = np.zeros((3,3), dtype='>M8[s]') assert_array_equal(np.zeros_like(d), d) assert_equal(np.zeros_like(d).dtype, d.dtype) d = np.zeros((3,3), dtype='f4,f4') assert_array_equal(np.zeros_like(d), d) assert_equal(np.zeros_like(d).dtype, d.dtype) def test_empty_unicode(self): # don't throw decode errors on garbage memory for i in range(5, 100, 5): d = np.empty(i, dtype='U') str(d) def test_sequence_non_homogenous(self): assert_equal(np.array([4, 2**80]).dtype, object) assert_equal(np.array([4, 2**80, 4]).dtype, object) assert_equal(np.array([2**80, 4]).dtype, object) assert_equal(np.array([2**80] * 3).dtype, object) assert_equal(np.array([[1, 1],[1j, 1j]]).dtype, complex) assert_equal(np.array([[1j, 1j],[1, 1]]).dtype, complex) assert_equal(np.array([[1, 1, 1],[1, 1j, 1.], [1, 1, 1]]).dtype, complex) @dec.skipif(sys.version_info[0] >= 3) def test_sequence_long(self): assert_equal(np.array([long(4), long(4)]).dtype, np.long) assert_equal(np.array([long(4), 2**80]).dtype, object) assert_equal(np.array([long(4), 2**80, long(4)]).dtype, object) assert_equal(np.array([2**80, long(4)]).dtype, object) def test_non_sequence_sequence(self): """Should not segfault. Class Fail breaks the sequence protocol for new style classes, i.e., those derived from object. Class Map is a mapping type indicated by raising a ValueError. At some point we may raise a warning instead of an error in the Fail case. """ class Fail(object): def __len__(self): return 1 def __getitem__(self, index): raise ValueError() class Map(object): def __len__(self): return 1 def __getitem__(self, index): raise KeyError() a = np.array([Map()]) assert_(a.shape == (1,)) assert_(a.dtype == np.dtype(object)) assert_raises(ValueError, np.array, [Fail()]) def test_no_len_object_type(self): # gh-5100, want object array from iterable object without len() class Point2: def __init__(self): pass def __getitem__(self, ind): if ind in [0, 1]: return ind else: raise IndexError() d = np.array([Point2(), Point2(), Point2()]) assert_equal(d.dtype, np.dtype(object)) def test_false_len_sequence(self): # gh-7264, segfault for this example class C: def __getitem__(self, i): raise IndexError def __len__(self): return 42 assert_raises(ValueError, np.array, C()) # segfault? def test_failed_len_sequence(self): # gh-7393 class A(object): def __init__(self, data): self._data = data def __getitem__(self, item): return type(self)(self._data[item]) def __len__(self): return len(self._data) # len(d) should give 3, but len(d[0]) will fail d = A([1,2,3]) assert_equal(len(np.array(d)), 3) def test_array_too_big(self): # Test that array creation succeeds for arrays addressable by intp # on the byte level and fails for too large arrays. buf = np.zeros(100) max_bytes = np.iinfo(np.intp).max for dtype in ["intp", "S20", "b"]: dtype = np.dtype(dtype) itemsize = dtype.itemsize np.ndarray(buffer=buf, strides=(0,), shape=(max_bytes//itemsize,), dtype=dtype) assert_raises(ValueError, np.ndarray, buffer=buf, strides=(0,), shape=(max_bytes//itemsize + 1,), dtype=dtype) class TestStructured(object): def test_subarray_field_access(self): a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))]) a['a'] = np.arange(60).reshape(3, 5, 2, 2) # Since the subarray is always in C-order, a transpose # does not swap the subarray: assert_array_equal(a.T['a'], a['a'].transpose(1, 0, 2, 3)) # In Fortran order, the subarray gets appended # like in all other cases, not prepended as a special case b = a.copy(order='F') assert_equal(a['a'].shape, b['a'].shape) assert_equal(a.T['a'].shape, a.T.copy()['a'].shape) def test_subarray_comparison(self): # Check that comparisons between record arrays with # multi-dimensional field types work properly a = np.rec.fromrecords( [([1, 2, 3], 'a', [[1, 2], [3, 4]]), ([3, 3, 3], 'b', [[0, 0], [0, 0]])], dtype=[('a', ('f4', 3)), ('b', object), ('c', ('i4', (2, 2)))]) b = a.copy() assert_equal(a == b, [True, True]) assert_equal(a != b, [False, False]) b[1].b = 'c' assert_equal(a == b, [True, False]) assert_equal(a != b, [False, True]) for i in range(3): b[0].a = a[0].a b[0].a[i] = 5 assert_equal(a == b, [False, False]) assert_equal(a != b, [True, True]) for i in range(2): for j in range(2): b = a.copy() b[0].c[i, j] = 10 assert_equal(a == b, [False, True]) assert_equal(a != b, [True, False]) # Check that broadcasting with a subarray works a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8')]) b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8')]) assert_equal(a == b, [[True, True, False], [False, False, True]]) assert_equal(b == a, [[True, True, False], [False, False, True]]) a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8', (1,))]) b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8', (1,))]) assert_equal(a == b, [[True, True, False], [False, False, True]]) assert_equal(b == a, [[True, True, False], [False, False, True]]) a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))]) b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))]) assert_equal(a == b, [[True, False, False], [False, False, True]]) assert_equal(b == a, [[True, False, False], [False, False, True]]) # Check that broadcasting Fortran-style arrays with a subarray work a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))], order='F') b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))]) assert_equal(a == b, [[True, False, False], [False, False, True]]) assert_equal(b == a, [[True, False, False], [False, False, True]]) # Check that incompatible sub-array shapes don't result to broadcasting x = np.zeros((1,), dtype=[('a', ('f4', (1, 2))), ('b', 'i1')]) y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')]) # This comparison invokes deprecated behaviour, and will probably # start raising an error eventually. What we really care about in this # test is just that it doesn't return True. with suppress_warnings() as sup: sup.filter(FutureWarning, "elementwise == comparison failed") assert_equal(x == y, False) x = np.zeros((1,), dtype=[('a', ('f4', (2, 1))), ('b', 'i1')]) y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')]) # This comparison invokes deprecated behaviour, and will probably # start raising an error eventually. What we really care about in this # test is just that it doesn't return True. with suppress_warnings() as sup: sup.filter(FutureWarning, "elementwise == comparison failed") assert_equal(x == y, False) # Check that structured arrays that are different only in # byte-order work a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i8'), ('b', '<f8')]) b = np.array([(5, 43), (10, 1)], dtype=[('a', '<i8'), ('b', '>f8')]) assert_equal(a == b, [False, True]) def test_casting(self): # Check that casting a structured array to change its byte order # works a = np.array([(1,)], dtype=[('a', '<i4')]) assert_(np.can_cast(a.dtype, [('a', '>i4')], casting='unsafe')) b = a.astype([('a', '>i4')]) assert_equal(b, a.byteswap().newbyteorder()) assert_equal(a['a'][0], b['a'][0]) # Check that equality comparison works on structured arrays if # they are 'equiv'-castable a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i4'), ('b', '<f8')]) b = np.array([(42, 5), (1, 10)], dtype=[('b', '>f8'), ('a', '<i4')]) assert_(np.can_cast(a.dtype, b.dtype, casting='equiv')) assert_equal(a == b, [True, True]) # Check that 'equiv' casting can reorder fields and change byte # order # New in 1.12: This behavior changes in 1.13, test for dep warning assert_(np.can_cast(a.dtype, b.dtype, casting='equiv')) with assert_warns(FutureWarning): c = a.astype(b.dtype, casting='equiv') assert_equal(a == c, [True, True]) # Check that 'safe' casting can change byte order and up-cast # fields t = [('a', '<i8'), ('b', '>f8')] assert_(np.can_cast(a.dtype, t, casting='safe')) c = a.astype(t, casting='safe') assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)), [True, True]) # Check that 'same_kind' casting can change byte order and # change field widths within a "kind" t = [('a', '<i4'), ('b', '>f4')] assert_(np.can_cast(a.dtype, t, casting='same_kind')) c = a.astype(t, casting='same_kind') assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)), [True, True]) # Check that casting fails if the casting rule should fail on # any of the fields t = [('a', '>i8'), ('b', '<f4')] assert_(not np.can_cast(a.dtype, t, casting='safe')) assert_raises(TypeError, a.astype, t, casting='safe') t = [('a', '>i2'), ('b', '<f8')] assert_(not np.can_cast(a.dtype, t, casting='equiv')) assert_raises(TypeError, a.astype, t, casting='equiv') t = [('a', '>i8'), ('b', '<i2')] assert_(not np.can_cast(a.dtype, t, casting='same_kind')) assert_raises(TypeError, a.astype, t, casting='same_kind') assert_(not np.can_cast(a.dtype, b.dtype, casting='no')) assert_raises(TypeError, a.astype, b.dtype, casting='no') # Check that non-'unsafe' casting can't change the set of field names for casting in ['no', 'safe', 'equiv', 'same_kind']: t = [('a', '>i4')] assert_(not np.can_cast(a.dtype, t, casting=casting)) t = [('a', '>i4'), ('b', '<f8'), ('c', 'i4')] assert_(not np.can_cast(a.dtype, t, casting=casting)) def test_objview(self): # https://github.com/numpy/numpy/issues/3286 a = np.array([], dtype=[('a', 'f'), ('b', 'f'), ('c', 'O')]) a[['a', 'b']] # TypeError? # https://github.com/numpy/numpy/issues/3253 dat2 = np.zeros(3, [('A', 'i'), ('B', '|O')]) dat2[['B', 'A']] # TypeError? def test_setfield(self): # https://github.com/numpy/numpy/issues/3126 struct_dt = np.dtype([('elem', 'i4', 5),]) dt = np.dtype([('field', 'i4', 10),('struct', struct_dt)]) x = np.zeros(1, dt) x[0]['field'] = np.ones(10, dtype='i4') x[0]['struct'] = np.ones(1, dtype=struct_dt) assert_equal(x[0]['field'], np.ones(10, dtype='i4')) def test_setfield_object(self): # make sure object field assignment with ndarray value # on void scalar mimics setitem behavior b = np.zeros(1, dtype=[('x', 'O')]) # next line should work identically to b['x'][0] = np.arange(3) b[0]['x'] = np.arange(3) assert_equal(b[0]['x'], np.arange(3)) # check that broadcasting check still works c = np.zeros(1, dtype=[('x', 'O', 5)]) def testassign(): c[0]['x'] = np.arange(3) assert_raises(ValueError, testassign) def test_zero_width_string(self): # Test for PR #6430 / issues #473, #4955, #2585 dt = np.dtype([('I', int), ('S', 'S0')]) x = np.zeros(4, dtype=dt) assert_equal(x['S'], [b'', b'', b'', b'']) assert_equal(x['S'].itemsize, 0) x['S'] = ['a', 'b', 'c', 'd'] assert_equal(x['S'], [b'', b'', b'', b'']) assert_equal(x['I'], [0, 0, 0, 0]) # Variation on test case from #4955 x['S'][x['I'] == 0] = 'hello' assert_equal(x['S'], [b'', b'', b'', b'']) assert_equal(x['I'], [0, 0, 0, 0]) # Variation on test case from #2585 x['S'] = 'A' assert_equal(x['S'], [b'', b'', b'', b'']) assert_equal(x['I'], [0, 0, 0, 0]) # Allow zero-width dtypes in ndarray constructor y = np.ndarray(4, dtype=x['S'].dtype) assert_equal(y.itemsize, 0) assert_equal(x['S'], y) # More tests for indexing an array with zero-width fields assert_equal(np.zeros(4, dtype=[('a', 'S0,S0'), ('b', 'u1')])['a'].itemsize, 0) assert_equal(np.empty(3, dtype='S0,S0').itemsize, 0) assert_equal(np.zeros(4, dtype='S0,u1')['f0'].itemsize, 0) xx = x['S'].reshape((2, 2)) assert_equal(xx.itemsize, 0) assert_equal(xx, [[b'', b''], [b'', b'']]) # check for no uninitialized memory due to viewing S0 array assert_equal(xx[:].dtype, xx.dtype) assert_array_equal(eval(repr(xx), dict(array=np.array)), xx) b = io.BytesIO() np.save(b, xx) b.seek(0) yy = np.load(b) assert_equal(yy.itemsize, 0) assert_equal(xx, yy) with temppath(suffix='.npy') as tmp: np.save(tmp, xx) yy = np.load(tmp) assert_equal(yy.itemsize, 0) assert_equal(xx, yy) def test_base_attr(self): a = np.zeros(3, dtype='i4,f4') b = a[0] assert_(b.base is a) class TestBool(object): def test_test_interning(self): a0 = np.bool_(0) b0 = np.bool_(False) assert_(a0 is b0) a1 = np.bool_(1) b1 = np.bool_(True) assert_(a1 is b1) assert_(np.array([True])[0] is a1) assert_(np.array(True)[()] is a1) def test_sum(self): d = np.ones(101, dtype=bool) assert_equal(d.sum(), d.size) assert_equal(d[::2].sum(), d[::2].size) assert_equal(d[::-2].sum(), d[::-2].size) d = np.frombuffer(b'\xff\xff' * 100, dtype=bool) assert_equal(d.sum(), d.size) assert_equal(d[::2].sum(), d[::2].size) assert_equal(d[::-2].sum(), d[::-2].size) def check_count_nonzero(self, power, length): powers = [2 ** i for i in range(length)] for i in range(2**power): l = [(i & x) != 0 for x in powers] a = np.array(l, dtype=bool) c = builtins.sum(l) assert_equal(np.count_nonzero(a), c) av = a.view(np.uint8) av *= 3 assert_equal(np.count_nonzero(a), c) av *= 4 assert_equal(np.count_nonzero(a), c) av[av != 0] = 0xFF assert_equal(np.count_nonzero(a), c) def test_count_nonzero(self): # check all 12 bit combinations in a length 17 array # covers most cases of the 16 byte unrolled code self.check_count_nonzero(12, 17) @dec.slow def test_count_nonzero_all(self): # check all combinations in a length 17 array # covers all cases of the 16 byte unrolled code self.check_count_nonzero(17, 17) def test_count_nonzero_unaligned(self): # prevent mistakes as e.g. gh-4060 for o in range(7): a = np.zeros((18,), dtype=bool)[o+1:] a[:o] = True assert_equal(np.count_nonzero(a), builtins.sum(a.tolist())) a = np.ones((18,), dtype=bool)[o+1:] a[:o] = False assert_equal(np.count_nonzero(a), builtins.sum(a.tolist())) class TestMethods(object): def test_compress(self): tgt = [[5, 6, 7, 8, 9]] arr = np.arange(10).reshape(2, 5) out = arr.compress([0, 1], axis=0) assert_equal(out, tgt) tgt = [[1, 3], [6, 8]] out = arr.compress([0, 1, 0, 1, 0], axis=1) assert_equal(out, tgt) tgt = [[1], [6]] arr = np.arange(10).reshape(2, 5) out = arr.compress([0, 1], axis=1) assert_equal(out, tgt) arr = np.arange(10).reshape(2, 5) out = arr.compress([0, 1]) assert_equal(out, 1) def test_choose(self): x = 2*np.ones((3,), dtype=int) y = 3*np.ones((3,), dtype=int) x2 = 2*np.ones((2, 3), dtype=int) y2 = 3*np.ones((2, 3), dtype=int) ind = np.array([0, 0, 1]) A = ind.choose((x, y)) assert_equal(A, [2, 2, 3]) A = ind.choose((x2, y2)) assert_equal(A, [[2, 2, 3], [2, 2, 3]]) A = ind.choose((x, y2)) assert_equal(A, [[2, 2, 3], [2, 2, 3]]) def test_prod(self): ba = [1, 2, 10, 11, 6, 5, 4] ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]] for ctype in [np.int16, np.uint16, np.int32, np.uint32, np.float32, np.float64, np.complex64, np.complex128]: a = np.array(ba, ctype) a2 = np.array(ba2, ctype) if ctype in ['1', 'b']: assert_raises(ArithmeticError, a.prod) assert_raises(ArithmeticError, a2.prod, axis=1) else: assert_equal(a.prod(axis=0), 26400) assert_array_equal(a2.prod(axis=0), np.array([50, 36, 84, 180], ctype)) assert_array_equal(a2.prod(axis=-1), np.array([24, 1890, 600], ctype)) def test_repeat(self): m = np.array([1, 2, 3, 4, 5, 6]) m_rect = m.reshape((2, 3)) A = m.repeat([1, 3, 2, 1, 1, 2]) assert_equal(A, [1, 2, 2, 2, 3, 3, 4, 5, 6, 6]) A = m.repeat(2) assert_equal(A, [1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6]) A = m_rect.repeat([2, 1], axis=0) assert_equal(A, [[1, 2, 3], [1, 2, 3], [4, 5, 6]]) A = m_rect.repeat([1, 3, 2], axis=1) assert_equal(A, [[1, 2, 2, 2, 3, 3], [4, 5, 5, 5, 6, 6]]) A = m_rect.repeat(2, axis=0) assert_equal(A, [[1, 2, 3], [1, 2, 3], [4, 5, 6], [4, 5, 6]]) A = m_rect.repeat(2, axis=1) assert_equal(A, [[1, 1, 2, 2, 3, 3], [4, 4, 5, 5, 6, 6]]) def test_reshape(self): arr = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]) tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]] assert_equal(arr.reshape(2, 6), tgt) tgt = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]] assert_equal(arr.reshape(3, 4), tgt) tgt = [[1, 10, 8, 6], [4, 2, 11, 9], [7, 5, 3, 12]] assert_equal(arr.reshape((3, 4), order='F'), tgt) tgt = [[1, 4, 7, 10], [2, 5, 8, 11], [3, 6, 9, 12]] assert_equal(arr.T.reshape((3, 4), order='C'), tgt) def test_round(self): def check_round(arr, expected, *round_args): assert_equal(arr.round(*round_args), expected) # With output array out = np.zeros_like(arr) res = arr.round(*round_args, out=out) assert_equal(out, expected) assert_equal(out, res) check_round(np.array([1.2, 1.5]), [1, 2]) check_round(np.array(1.5), 2) check_round(np.array([12.2, 15.5]), [10, 20], -1) check_round(np.array([12.15, 15.51]), [12.2, 15.5], 1) # Complex rounding check_round(np.array([4.5 + 1.5j]), [4 + 2j]) check_round(np.array([12.5 + 15.5j]), [10 + 20j], -1) def test_squeeze(self): a = np.array([[[1], [2], [3]]]) assert_equal(a.squeeze(), [1, 2, 3]) assert_equal(a.squeeze(axis=(0,)), [[1], [2], [3]]) assert_raises(ValueError, a.squeeze, axis=(1,)) assert_equal(a.squeeze(axis=(2,)), [[1, 2, 3]]) def test_transpose(self): a = np.array([[1, 2], [3, 4]]) assert_equal(a.transpose(), [[1, 3], [2, 4]]) assert_raises(ValueError, lambda: a.transpose(0)) assert_raises(ValueError, lambda: a.transpose(0, 0)) assert_raises(ValueError, lambda: a.transpose(0, 1, 2)) def test_sort(self): # test ordering for floats and complex containing nans. It is only # necessary to check the less-than comparison, so sorts that # only follow the insertion sort path are sufficient. We only # test doubles and complex doubles as the logic is the same. # check doubles msg = "Test real sort order with nans" a = np.array([np.nan, 1, 0]) b = np.sort(a) assert_equal(b, a[::-1], msg) # check complex msg = "Test complex sort order with nans" a = np.zeros(9, dtype=np.complex128) a.real += [np.nan, np.nan, np.nan, 1, 0, 1, 1, 0, 0] a.imag += [np.nan, 1, 0, np.nan, np.nan, 1, 0, 1, 0] b = np.sort(a) assert_equal(b, a[::-1], msg) # all c scalar sorts use the same code with different types # so it suffices to run a quick check with one type. The number # of sorted items must be greater than ~50 to check the actual # algorithm because quick and merge sort fall over to insertion # sort for small arrays. a = np.arange(101) b = a[::-1].copy() for kind in ['q', 'm', 'h']: msg = "scalar sort, kind=%s" % kind c = a.copy() c.sort(kind=kind) assert_equal(c, a, msg) c = b.copy() c.sort(kind=kind) assert_equal(c, a, msg) # test complex sorts. These use the same code as the scalars # but the compare function differs. ai = a*1j + 1 bi = b*1j + 1 for kind in ['q', 'm', 'h']: msg = "complex sort, real part == 1, kind=%s" % kind c = ai.copy() c.sort(kind=kind) assert_equal(c, ai, msg) c = bi.copy() c.sort(kind=kind) assert_equal(c, ai, msg) ai = a + 1j bi = b + 1j for kind in ['q', 'm', 'h']: msg = "complex sort, imag part == 1, kind=%s" % kind c = ai.copy() c.sort(kind=kind) assert_equal(c, ai, msg) c = bi.copy() c.sort(kind=kind) assert_equal(c, ai, msg) # test sorting of complex arrays requiring byte-swapping, gh-5441 for endianess in '<>': for dt in np.typecodes['Complex']: arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianess + dt) c = arr.copy() c.sort() msg = 'byte-swapped complex sort, dtype={0}'.format(dt) assert_equal(c, arr, msg) # test string sorts. s = 'aaaaaaaa' a = np.array([s + chr(i) for i in range(101)]) b = a[::-1].copy() for kind in ['q', 'm', 'h']: msg = "string sort, kind=%s" % kind c = a.copy() c.sort(kind=kind) assert_equal(c, a, msg) c = b.copy() c.sort(kind=kind) assert_equal(c, a, msg) # test unicode sorts. s = 'aaaaaaaa' a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode) b = a[::-1].copy() for kind in ['q', 'm', 'h']: msg = "unicode sort, kind=%s" % kind c = a.copy() c.sort(kind=kind) assert_equal(c, a, msg) c = b.copy() c.sort(kind=kind) assert_equal(c, a, msg) # test object array sorts. a = np.empty((101,), dtype=object) a[:] = list(range(101)) b = a[::-1] for kind in ['q', 'h', 'm']: msg = "object sort, kind=%s" % kind c = a.copy() c.sort(kind=kind) assert_equal(c, a, msg) c = b.copy() c.sort(kind=kind) assert_equal(c, a, msg) # test record array sorts. dt = np.dtype([('f', float), ('i', int)]) a = np.array([(i, i) for i in range(101)], dtype=dt) b = a[::-1] for kind in ['q', 'h', 'm']: msg = "object sort, kind=%s" % kind c = a.copy() c.sort(kind=kind) assert_equal(c, a, msg) c = b.copy() c.sort(kind=kind) assert_equal(c, a, msg) # test datetime64 sorts. a = np.arange(0, 101, dtype='datetime64[D]') b = a[::-1] for kind in ['q', 'h', 'm']: msg = "datetime64 sort, kind=%s" % kind c = a.copy() c.sort(kind=kind) assert_equal(c, a, msg) c = b.copy() c.sort(kind=kind) assert_equal(c, a, msg) # test timedelta64 sorts. a = np.arange(0, 101, dtype='timedelta64[D]') b = a[::-1] for kind in ['q', 'h', 'm']: msg = "timedelta64 sort, kind=%s" % kind c = a.copy() c.sort(kind=kind) assert_equal(c, a, msg) c = b.copy() c.sort(kind=kind) assert_equal(c, a, msg) # check axis handling. This should be the same for all type # specific sorts, so we only check it for one type and one kind a = np.array([[3, 2], [1, 0]]) b = np.array([[1, 0], [3, 2]]) c = np.array([[2, 3], [0, 1]]) d = a.copy() d.sort(axis=0) assert_equal(d, b, "test sort with axis=0") d = a.copy() d.sort(axis=1) assert_equal(d, c, "test sort with axis=1") d = a.copy() d.sort() assert_equal(d, c, "test sort with default axis") # check axis handling for multidimensional empty arrays a = np.array([]) a.shape = (3, 2, 1, 0) for axis in range(-a.ndim, a.ndim): msg = 'test empty array sort with axis={0}'.format(axis) assert_equal(np.sort(a, axis=axis), a, msg) msg = 'test empty array sort with axis=None' assert_equal(np.sort(a, axis=None), a.ravel(), msg) # test generic class with bogus ordering, # should not segfault. class Boom(object): def __lt__(self, other): return True a = np.array([Boom()]*100, dtype=object) for kind in ['q', 'm', 'h']: msg = "bogus comparison object sort, kind=%s" % kind c.sort(kind=kind) def test_void_sort(self): # gh-8210 - previously segfaulted for i in range(4): arr = np.empty(1000, 'V4') arr[::-1].sort() dt = np.dtype([('val', 'i4', (1,))]) for i in range(4): arr = np.empty(1000, dt) arr[::-1].sort() def test_sort_raises(self): #gh-9404 arr = np.array([0, datetime.now(), 1], dtype=object) for kind in ['q', 'm', 'h']: assert_raises(TypeError, arr.sort, kind=kind) #gh-3879 class Raiser(object): def raises_anything(*args, **kwargs): raise TypeError("SOMETHING ERRORED") __eq__ = __ne__ = __lt__ = __gt__ = __ge__ = __le__ = raises_anything arr = np.array([[Raiser(), n] for n in range(10)]).reshape(-1) np.random.shuffle(arr) for kind in ['q', 'm', 'h']: assert_raises(TypeError, arr.sort, kind=kind) def test_sort_degraded(self): # test degraded dataset would take minutes to run with normal qsort d = np.arange(1000000) do = d.copy() x = d # create a median of 3 killer where each median is the sorted second # last element of the quicksort partition while x.size > 3: mid = x.size // 2 x[mid], x[-2] = x[-2], x[mid] x = x[:-2] assert_equal(np.sort(d), do) assert_equal(d[np.argsort(d)], do) def test_copy(self): def assert_fortran(arr): assert_(arr.flags.fortran) assert_(arr.flags.f_contiguous) assert_(not arr.flags.c_contiguous) def assert_c(arr): assert_(not arr.flags.fortran) assert_(not arr.flags.f_contiguous) assert_(arr.flags.c_contiguous) a = np.empty((2, 2), order='F') # Test copying a Fortran array assert_c(a.copy()) assert_c(a.copy('C')) assert_fortran(a.copy('F')) assert_fortran(a.copy('A')) # Now test starting with a C array. a = np.empty((2, 2), order='C') assert_c(a.copy()) assert_c(a.copy('C')) assert_fortran(a.copy('F')) assert_c(a.copy('A')) def test_sort_order(self): # Test sorting an array with fields x1 = np.array([21, 32, 14]) x2 = np.array(['my', 'first', 'name']) x3 = np.array([3.1, 4.5, 6.2]) r = np.rec.fromarrays([x1, x2, x3], names='id,word,number') r.sort(order=['id']) assert_equal(r.id, np.array([14, 21, 32])) assert_equal(r.word, np.array(['name', 'my', 'first'])) assert_equal(r.number, np.array([6.2, 3.1, 4.5])) r.sort(order=['word']) assert_equal(r.id, np.array([32, 21, 14])) assert_equal(r.word, np.array(['first', 'my', 'name'])) assert_equal(r.number, np.array([4.5, 3.1, 6.2])) r.sort(order=['number']) assert_equal(r.id, np.array([21, 32, 14])) assert_equal(r.word, np.array(['my', 'first', 'name'])) assert_equal(r.number, np.array([3.1, 4.5, 6.2])) assert_raises_regex(ValueError, 'duplicate', lambda: r.sort(order=['id', 'id'])) if sys.byteorder == 'little': strtype = '>i2' else: strtype = '<i2' mydtype = [('name', strchar + '5'), ('col2', strtype)] r = np.array([('a', 1), ('b', 255), ('c', 3), ('d', 258)], dtype=mydtype) r.sort(order='col2') assert_equal(r['col2'], [1, 3, 255, 258]) assert_equal(r, np.array([('a', 1), ('c', 3), ('b', 255), ('d', 258)], dtype=mydtype)) def test_argsort(self): # all c scalar argsorts use the same code with different types # so it suffices to run a quick check with one type. The number # of sorted items must be greater than ~50 to check the actual # algorithm because quick and merge sort fall over to insertion # sort for small arrays. a = np.arange(101) b = a[::-1].copy() for kind in ['q', 'm', 'h']: msg = "scalar argsort, kind=%s" % kind assert_equal(a.copy().argsort(kind=kind), a, msg) assert_equal(b.copy().argsort(kind=kind), b, msg) # test complex argsorts. These use the same code as the scalars # but the compare function differs. ai = a*1j + 1 bi = b*1j + 1 for kind in ['q', 'm', 'h']: msg = "complex argsort, kind=%s" % kind assert_equal(ai.copy().argsort(kind=kind), a, msg) assert_equal(bi.copy().argsort(kind=kind), b, msg) ai = a + 1j bi = b + 1j for kind in ['q', 'm', 'h']: msg = "complex argsort, kind=%s" % kind assert_equal(ai.copy().argsort(kind=kind), a, msg) assert_equal(bi.copy().argsort(kind=kind), b, msg) # test argsort of complex arrays requiring byte-swapping, gh-5441 for endianess in '<>': for dt in np.typecodes['Complex']: arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianess + dt) msg = 'byte-swapped complex argsort, dtype={0}'.format(dt) assert_equal(arr.argsort(), np.arange(len(arr), dtype=np.intp), msg) # test string argsorts. s = 'aaaaaaaa' a = np.array([s + chr(i) for i in range(101)]) b = a[::-1].copy() r = np.arange(101) rr = r[::-1] for kind in ['q', 'm', 'h']: msg = "string argsort, kind=%s" % kind assert_equal(a.copy().argsort(kind=kind), r, msg) assert_equal(b.copy().argsort(kind=kind), rr, msg) # test unicode argsorts. s = 'aaaaaaaa' a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode) b = a[::-1] r = np.arange(101) rr = r[::-1] for kind in ['q', 'm', 'h']: msg = "unicode argsort, kind=%s" % kind assert_equal(a.copy().argsort(kind=kind), r, msg) assert_equal(b.copy().argsort(kind=kind), rr, msg) # test object array argsorts. a = np.empty((101,), dtype=object) a[:] = list(range(101)) b = a[::-1] r = np.arange(101) rr = r[::-1] for kind in ['q', 'm', 'h']: msg = "object argsort, kind=%s" % kind assert_equal(a.copy().argsort(kind=kind), r, msg) assert_equal(b.copy().argsort(kind=kind), rr, msg) # test structured array argsorts. dt = np.dtype([('f', float), ('i', int)]) a = np.array([(i, i) for i in range(101)], dtype=dt) b = a[::-1] r = np.arange(101) rr = r[::-1] for kind in ['q', 'm', 'h']: msg = "structured array argsort, kind=%s" % kind assert_equal(a.copy().argsort(kind=kind), r, msg) assert_equal(b.copy().argsort(kind=kind), rr, msg) # test datetime64 argsorts. a = np.arange(0, 101, dtype='datetime64[D]') b = a[::-1] r = np.arange(101) rr = r[::-1] for kind in ['q', 'h', 'm']: msg = "datetime64 argsort, kind=%s" % kind assert_equal(a.copy().argsort(kind=kind), r, msg) assert_equal(b.copy().argsort(kind=kind), rr, msg) # test timedelta64 argsorts. a = np.arange(0, 101, dtype='timedelta64[D]') b = a[::-1] r = np.arange(101) rr = r[::-1] for kind in ['q', 'h', 'm']: msg = "timedelta64 argsort, kind=%s" % kind assert_equal(a.copy().argsort(kind=kind), r, msg) assert_equal(b.copy().argsort(kind=kind), rr, msg) # check axis handling. This should be the same for all type # specific argsorts, so we only check it for one type and one kind a = np.array([[3, 2], [1, 0]]) b = np.array([[1, 1], [0, 0]]) c = np.array([[1, 0], [1, 0]]) assert_equal(a.copy().argsort(axis=0), b) assert_equal(a.copy().argsort(axis=1), c) assert_equal(a.copy().argsort(), c) # check axis handling for multidimensional empty arrays a = np.array([]) a.shape = (3, 2, 1, 0) for axis in range(-a.ndim, a.ndim): msg = 'test empty array argsort with axis={0}'.format(axis) assert_equal(np.argsort(a, axis=axis), np.zeros_like(a, dtype=np.intp), msg) msg = 'test empty array argsort with axis=None' assert_equal(np.argsort(a, axis=None), np.zeros_like(a.ravel(), dtype=np.intp), msg) # check that stable argsorts are stable r = np.arange(100) # scalars a = np.zeros(100) assert_equal(a.argsort(kind='m'), r) # complex a = np.zeros(100, dtype=complex) assert_equal(a.argsort(kind='m'), r) # string a = np.array(['aaaaaaaaa' for i in range(100)]) assert_equal(a.argsort(kind='m'), r) # unicode a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.unicode) assert_equal(a.argsort(kind='m'), r) def test_sort_unicode_kind(self): d = np.arange(10) k = b'\xc3\xa4'.decode("UTF8") assert_raises(ValueError, d.sort, kind=k) assert_raises(ValueError, d.argsort, kind=k) def test_searchsorted(self): # test for floats and complex containing nans. The logic is the # same for all float types so only test double types for now. # The search sorted routines use the compare functions for the # array type, so this checks if that is consistent with the sort # order. # check double a = np.array([0, 1, np.nan]) msg = "Test real searchsorted with nans, side='l'" b = a.searchsorted(a, side='l') assert_equal(b, np.arange(3), msg) msg = "Test real searchsorted with nans, side='r'" b = a.searchsorted(a, side='r') assert_equal(b, np.arange(1, 4), msg) # check double complex a = np.zeros(9, dtype=np.complex128) a.real += [0, 0, 1, 1, 0, 1, np.nan, np.nan, np.nan] a.imag += [0, 1, 0, 1, np.nan, np.nan, 0, 1, np.nan] msg = "Test complex searchsorted with nans, side='l'" b = a.searchsorted(a, side='l') assert_equal(b, np.arange(9), msg) msg = "Test complex searchsorted with nans, side='r'" b = a.searchsorted(a, side='r') assert_equal(b, np.arange(1, 10), msg) msg = "Test searchsorted with little endian, side='l'" a = np.array([0, 128], dtype='<i4') b = a.searchsorted(np.array(128, dtype='<i4')) assert_equal(b, 1, msg) msg = "Test searchsorted with big endian, side='l'" a = np.array([0, 128], dtype='>i4') b = a.searchsorted(np.array(128, dtype='>i4')) assert_equal(b, 1, msg) # Check 0 elements a = np.ones(0) b = a.searchsorted([0, 1, 2], 'l') assert_equal(b, [0, 0, 0]) b = a.searchsorted([0, 1, 2], 'r') assert_equal(b, [0, 0, 0]) a = np.ones(1) # Check 1 element b = a.searchsorted([0, 1, 2], 'l') assert_equal(b, [0, 0, 1]) b = a.searchsorted([0, 1, 2], 'r') assert_equal(b, [0, 1, 1]) # Check all elements equal a = np.ones(2) b = a.searchsorted([0, 1, 2], 'l') assert_equal(b, [0, 0, 2]) b = a.searchsorted([0, 1, 2], 'r') assert_equal(b, [0, 2, 2]) # Test searching unaligned array a = np.arange(10) aligned = np.empty(a.itemsize * a.size + 1, 'uint8') unaligned = aligned[1:].view(a.dtype) unaligned[:] = a # Test searching unaligned array b = unaligned.searchsorted(a, 'l') assert_equal(b, a) b = unaligned.searchsorted(a, 'r') assert_equal(b, a + 1) # Test searching for unaligned keys b = a.searchsorted(unaligned, 'l') assert_equal(b, a) b = a.searchsorted(unaligned, 'r') assert_equal(b, a + 1) # Test smart resetting of binsearch indices a = np.arange(5) b = a.searchsorted([6, 5, 4], 'l') assert_equal(b, [5, 5, 4]) b = a.searchsorted([6, 5, 4], 'r') assert_equal(b, [5, 5, 5]) # Test all type specific binary search functions types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'], np.typecodes['Datetime'], '?O')) for dt in types: if dt == 'M': dt = 'M8[D]' if dt == '?': a = np.arange(2, dtype=dt) out = np.arange(2) else: a = np.arange(0, 5, dtype=dt) out = np.arange(5) b = a.searchsorted(a, 'l') assert_equal(b, out) b = a.searchsorted(a, 'r') assert_equal(b, out + 1) def test_searchsorted_unicode(self): # Test searchsorted on unicode strings. # 1.6.1 contained a string length miscalculation in # arraytypes.c.src:UNICODE_compare() which manifested as # incorrect/inconsistent results from searchsorted. a = np.array(['P:\\20x_dapi_cy3\\20x_dapi_cy3_20100185_1', 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100186_1', 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100187_1', 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100189_1', 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100190_1', 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100191_1', 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100192_1', 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100193_1', 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100194_1', 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100195_1', 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100196_1', 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100197_1', 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100198_1', 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100199_1'], dtype=np.unicode) ind = np.arange(len(a)) assert_equal([a.searchsorted(v, 'left') for v in a], ind) assert_equal([a.searchsorted(v, 'right') for v in a], ind + 1) assert_equal([a.searchsorted(a[i], 'left') for i in ind], ind) assert_equal([a.searchsorted(a[i], 'right') for i in ind], ind + 1) def test_searchsorted_with_sorter(self): a = np.array([5, 2, 1, 3, 4]) s = np.argsort(a) assert_raises(TypeError, np.searchsorted, a, 0, sorter=(1, (2, 3))) assert_raises(TypeError, np.searchsorted, a, 0, sorter=[1.1]) assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4]) assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4, 5, 6]) # bounds check assert_raises(ValueError, np.searchsorted, a, 4, sorter=[0, 1, 2, 3, 5]) assert_raises(ValueError, np.searchsorted, a, 0, sorter=[-1, 0, 1, 2, 3]) assert_raises(ValueError, np.searchsorted, a, 0, sorter=[4, 0, -1, 2, 3]) a = np.random.rand(300) s = a.argsort() b = np.sort(a) k = np.linspace(0, 1, 20) assert_equal(b.searchsorted(k), a.searchsorted(k, sorter=s)) a = np.array([0, 1, 2, 3, 5]*20) s = a.argsort() k = [0, 1, 2, 3, 5] expected = [0, 20, 40, 60, 80] assert_equal(a.searchsorted(k, side='l', sorter=s), expected) expected = [20, 40, 60, 80, 100] assert_equal(a.searchsorted(k, side='r', sorter=s), expected) # Test searching unaligned array keys = np.arange(10) a = keys.copy() np.random.shuffle(s) s = a.argsort() aligned = np.empty(a.itemsize * a.size + 1, 'uint8') unaligned = aligned[1:].view(a.dtype) # Test searching unaligned array unaligned[:] = a b = unaligned.searchsorted(keys, 'l', s) assert_equal(b, keys) b = unaligned.searchsorted(keys, 'r', s) assert_equal(b, keys + 1) # Test searching for unaligned keys unaligned[:] = keys b = a.searchsorted(unaligned, 'l', s) assert_equal(b, keys) b = a.searchsorted(unaligned, 'r', s) assert_equal(b, keys + 1) # Test all type specific indirect binary search functions types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'], np.typecodes['Datetime'], '?O')) for dt in types: if dt == 'M': dt = 'M8[D]' if dt == '?': a = np.array([1, 0], dtype=dt) # We want the sorter array to be of a type that is different # from np.intp in all platforms, to check for #4698 s = np.array([1, 0], dtype=np.int16) out = np.array([1, 0]) else: a = np.array([3, 4, 1, 2, 0], dtype=dt) # We want the sorter array to be of a type that is different # from np.intp in all platforms, to check for #4698 s = np.array([4, 2, 3, 0, 1], dtype=np.int16) out = np.array([3, 4, 1, 2, 0], dtype=np.intp) b = a.searchsorted(a, 'l', s) assert_equal(b, out) b = a.searchsorted(a, 'r', s) assert_equal(b, out + 1) # Test non-contiguous sorter array a = np.array([3, 4, 1, 2, 0]) srt = np.empty((10,), dtype=np.intp) srt[1::2] = -1 srt[::2] = [4, 2, 3, 0, 1] s = srt[::2] out = np.array([3, 4, 1, 2, 0], dtype=np.intp) b = a.searchsorted(a, 'l', s) assert_equal(b, out) b = a.searchsorted(a, 'r', s) assert_equal(b, out + 1) def test_searchsorted_return_type(self): # Functions returning indices should always return base ndarrays class A(np.ndarray): pass a = np.arange(5).view(A) b = np.arange(1, 3).view(A) s = np.arange(5).view(A) assert_(not isinstance(a.searchsorted(b, 'l'), A)) assert_(not isinstance(a.searchsorted(b, 'r'), A)) assert_(not isinstance(a.searchsorted(b, 'l', s), A)) assert_(not isinstance(a.searchsorted(b, 'r', s), A)) def test_argpartition_out_of_range(self): # Test out of range values in kth raise an error, gh-5469 d = np.arange(10) assert_raises(ValueError, d.argpartition, 10) assert_raises(ValueError, d.argpartition, -11) # Test also for generic type argpartition, which uses sorting # and used to not bound check kth d_obj = np.arange(10, dtype=object) assert_raises(ValueError, d_obj.argpartition, 10) assert_raises(ValueError, d_obj.argpartition, -11) def test_partition_out_of_range(self): # Test out of range values in kth raise an error, gh-5469 d = np.arange(10) assert_raises(ValueError, d.partition, 10) assert_raises(ValueError, d.partition, -11) # Test also for generic type partition, which uses sorting # and used to not bound check kth d_obj = np.arange(10, dtype=object) assert_raises(ValueError, d_obj.partition, 10) assert_raises(ValueError, d_obj.partition, -11) def test_argpartition_integer(self): # Test non-integer values in kth raise an error/ d = np.arange(10) assert_raises(TypeError, d.argpartition, 9.) # Test also for generic type argpartition, which uses sorting # and used to not bound check kth d_obj = np.arange(10, dtype=object) assert_raises(TypeError, d_obj.argpartition, 9.) def test_partition_integer(self): # Test out of range values in kth raise an error, gh-5469 d = np.arange(10) assert_raises(TypeError, d.partition, 9.) # Test also for generic type partition, which uses sorting # and used to not bound check kth d_obj = np.arange(10, dtype=object) assert_raises(TypeError, d_obj.partition, 9.) def test_partition_empty_array(self): # check axis handling for multidimensional empty arrays a = np.array([]) a.shape = (3, 2, 1, 0) for axis in range(-a.ndim, a.ndim): msg = 'test empty array partition with axis={0}'.format(axis) assert_equal(np.partition(a, 0, axis=axis), a, msg) msg = 'test empty array partition with axis=None' assert_equal(np.partition(a, 0, axis=None), a.ravel(), msg) def test_argpartition_empty_array(self): # check axis handling for multidimensional empty arrays a = np.array([]) a.shape = (3, 2, 1, 0) for axis in range(-a.ndim, a.ndim): msg = 'test empty array argpartition with axis={0}'.format(axis) assert_equal(np.partition(a, 0, axis=axis), np.zeros_like(a, dtype=np.intp), msg) msg = 'test empty array argpartition with axis=None' assert_equal(np.partition(a, 0, axis=None), np.zeros_like(a.ravel(), dtype=np.intp), msg) def test_partition(self): d = np.arange(10) assert_raises(TypeError, np.partition, d, 2, kind=1) assert_raises(ValueError, np.partition, d, 2, kind="nonsense") assert_raises(ValueError, np.argpartition, d, 2, kind="nonsense") assert_raises(ValueError, d.partition, 2, axis=0, kind="nonsense") assert_raises(ValueError, d.argpartition, 2, axis=0, kind="nonsense") for k in ("introselect",): d = np.array([]) assert_array_equal(np.partition(d, 0, kind=k), d) assert_array_equal(np.argpartition(d, 0, kind=k), d) d = np.ones(1) assert_array_equal(np.partition(d, 0, kind=k)[0], d) assert_array_equal(d[np.argpartition(d, 0, kind=k)], np.partition(d, 0, kind=k)) # kth not modified kth = np.array([30, 15, 5]) okth = kth.copy() np.partition(np.arange(40), kth) assert_array_equal(kth, okth) for r in ([2, 1], [1, 2], [1, 1]): d = np.array(r) tgt = np.sort(d) assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0]) assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1]) assert_array_equal(d[np.argpartition(d, 0, kind=k)], np.partition(d, 0, kind=k)) assert_array_equal(d[np.argpartition(d, 1, kind=k)], np.partition(d, 1, kind=k)) for i in range(d.size): d[i:].partition(0, kind=k) assert_array_equal(d, tgt) for r in ([3, 2, 1], [1, 2, 3], [2, 1, 3], [2, 3, 1], [1, 1, 1], [1, 2, 2], [2, 2, 1], [1, 2, 1]): d = np.array(r) tgt = np.sort(d) assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0]) assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1]) assert_array_equal(np.partition(d, 2, kind=k)[2], tgt[2]) assert_array_equal(d[np.argpartition(d, 0, kind=k)], np.partition(d, 0, kind=k)) assert_array_equal(d[np.argpartition(d, 1, kind=k)], np.partition(d, 1, kind=k)) assert_array_equal(d[np.argpartition(d, 2, kind=k)], np.partition(d, 2, kind=k)) for i in range(d.size): d[i:].partition(0, kind=k) assert_array_equal(d, tgt) d = np.ones(50) assert_array_equal(np.partition(d, 0, kind=k), d) assert_array_equal(d[np.argpartition(d, 0, kind=k)], np.partition(d, 0, kind=k)) # sorted d = np.arange(49) assert_equal(np.partition(d, 5, kind=k)[5], 5) assert_equal(np.partition(d, 15, kind=k)[15], 15) assert_array_equal(d[np.argpartition(d, 5, kind=k)], np.partition(d, 5, kind=k)) assert_array_equal(d[np.argpartition(d, 15, kind=k)], np.partition(d, 15, kind=k)) # rsorted d = np.arange(47)[::-1] assert_equal(np.partition(d, 6, kind=k)[6], 6) assert_equal(np.partition(d, 16, kind=k)[16], 16) assert_array_equal(d[np.argpartition(d, 6, kind=k)], np.partition(d, 6, kind=k)) assert_array_equal(d[np.argpartition(d, 16, kind=k)], np.partition(d, 16, kind=k)) assert_array_equal(np.partition(d, -6, kind=k), np.partition(d, 41, kind=k)) assert_array_equal(np.partition(d, -16, kind=k), np.partition(d, 31, kind=k)) assert_array_equal(d[np.argpartition(d, -6, kind=k)], np.partition(d, 41, kind=k)) # median of 3 killer, O(n^2) on pure median 3 pivot quickselect # exercises the median of median of 5 code used to keep O(n) d = np.arange(1000000) x = np.roll(d, d.size // 2) mid = x.size // 2 + 1 assert_equal(np.partition(x, mid)[mid], mid) d = np.arange(1000001) x = np.roll(d, d.size // 2 + 1) mid = x.size // 2 + 1 assert_equal(np.partition(x, mid)[mid], mid) # max d = np.ones(10) d[1] = 4 assert_equal(np.partition(d, (2, -1))[-1], 4) assert_equal(np.partition(d, (2, -1))[2], 1) assert_equal(d[np.argpartition(d, (2, -1))][-1], 4) assert_equal(d[np.argpartition(d, (2, -1))][2], 1) d[1] = np.nan assert_(np.isnan(d[np.argpartition(d, (2, -1))][-1])) assert_(np.isnan(np.partition(d, (2, -1))[-1])) # equal elements d = np.arange(47) % 7 tgt = np.sort(np.arange(47) % 7) np.random.shuffle(d) for i in range(d.size): assert_equal(np.partition(d, i, kind=k)[i], tgt[i]) assert_array_equal(d[np.argpartition(d, 6, kind=k)], np.partition(d, 6, kind=k)) assert_array_equal(d[np.argpartition(d, 16, kind=k)], np.partition(d, 16, kind=k)) for i in range(d.size): d[i:].partition(0, kind=k) assert_array_equal(d, tgt) d = np.array([0, 1, 2, 3, 4, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 9]) kth = [0, 3, 19, 20] assert_equal(np.partition(d, kth, kind=k)[kth], (0, 3, 7, 7)) assert_equal(d[np.argpartition(d, kth, kind=k)][kth], (0, 3, 7, 7)) d = np.array([2, 1]) d.partition(0, kind=k) assert_raises(ValueError, d.partition, 2) assert_raises(np.AxisError, d.partition, 3, axis=1) assert_raises(ValueError, np.partition, d, 2) assert_raises(np.AxisError, np.partition, d, 2, axis=1) assert_raises(ValueError, d.argpartition, 2) assert_raises(np.AxisError, d.argpartition, 3, axis=1) assert_raises(ValueError, np.argpartition, d, 2) assert_raises(np.AxisError, np.argpartition, d, 2, axis=1) d = np.arange(10).reshape((2, 5)) d.partition(1, axis=0, kind=k) d.partition(4, axis=1, kind=k) np.partition(d, 1, axis=0, kind=k) np.partition(d, 4, axis=1, kind=k) np.partition(d, 1, axis=None, kind=k) np.partition(d, 9, axis=None, kind=k) d.argpartition(1, axis=0, kind=k) d.argpartition(4, axis=1, kind=k) np.argpartition(d, 1, axis=0, kind=k) np.argpartition(d, 4, axis=1, kind=k) np.argpartition(d, 1, axis=None, kind=k) np.argpartition(d, 9, axis=None, kind=k) assert_raises(ValueError, d.partition, 2, axis=0) assert_raises(ValueError, d.partition, 11, axis=1) assert_raises(TypeError, d.partition, 2, axis=None) assert_raises(ValueError, np.partition, d, 9, axis=1) assert_raises(ValueError, np.partition, d, 11, axis=None) assert_raises(ValueError, d.argpartition, 2, axis=0) assert_raises(ValueError, d.argpartition, 11, axis=1) assert_raises(ValueError, np.argpartition, d, 9, axis=1) assert_raises(ValueError, np.argpartition, d, 11, axis=None) td = [(dt, s) for dt in [np.int32, np.float32, np.complex64] for s in (9, 16)] for dt, s in td: aae = assert_array_equal at = assert_ d = np.arange(s, dtype=dt) np.random.shuffle(d) d1 = np.tile(np.arange(s, dtype=dt), (4, 1)) map(np.random.shuffle, d1) d0 = np.transpose(d1) for i in range(d.size): p = np.partition(d, i, kind=k) assert_equal(p[i], i) # all before are smaller assert_array_less(p[:i], p[i]) # all after are larger assert_array_less(p[i], p[i + 1:]) aae(p, d[np.argpartition(d, i, kind=k)]) p = np.partition(d1, i, axis=1, kind=k) aae(p[:, i], np.array([i] * d1.shape[0], dtype=dt)) # array_less does not seem to work right at((p[:, :i].T <= p[:, i]).all(), msg="%d: %r <= %r" % (i, p[:, i], p[:, :i].T)) at((p[:, i + 1:].T > p[:, i]).all(), msg="%d: %r < %r" % (i, p[:, i], p[:, i + 1:].T)) aae(p, d1[np.arange(d1.shape[0])[:, None], np.argpartition(d1, i, axis=1, kind=k)]) p = np.partition(d0, i, axis=0, kind=k) aae(p[i, :], np.array([i] * d1.shape[0], dtype=dt)) # array_less does not seem to work right at((p[:i, :] <= p[i, :]).all(), msg="%d: %r <= %r" % (i, p[i, :], p[:i, :])) at((p[i + 1:, :] > p[i, :]).all(), msg="%d: %r < %r" % (i, p[i, :], p[:, i + 1:])) aae(p, d0[np.argpartition(d0, i, axis=0, kind=k), np.arange(d0.shape[1])[None, :]]) # check inplace dc = d.copy() dc.partition(i, kind=k) assert_equal(dc, np.partition(d, i, kind=k)) dc = d0.copy() dc.partition(i, axis=0, kind=k) assert_equal(dc, np.partition(d0, i, axis=0, kind=k)) dc = d1.copy() dc.partition(i, axis=1, kind=k) assert_equal(dc, np.partition(d1, i, axis=1, kind=k)) def assert_partitioned(self, d, kth): prev = 0 for k in np.sort(kth): assert_array_less(d[prev:k], d[k], err_msg='kth %d' % k) assert_((d[k:] >= d[k]).all(), msg="kth %d, %r not greater equal %d" % (k, d[k:], d[k])) prev = k + 1 def test_partition_iterative(self): d = np.arange(17) kth = (0, 1, 2, 429, 231) assert_raises(ValueError, d.partition, kth) assert_raises(ValueError, d.argpartition, kth) d = np.arange(10).reshape((2, 5)) assert_raises(ValueError, d.partition, kth, axis=0) assert_raises(ValueError, d.partition, kth, axis=1) assert_raises(ValueError, np.partition, d, kth, axis=1) assert_raises(ValueError, np.partition, d, kth, axis=None) d = np.array([3, 4, 2, 1]) p = np.partition(d, (0, 3)) self.assert_partitioned(p, (0, 3)) self.assert_partitioned(d[np.argpartition(d, (0, 3))], (0, 3)) assert_array_equal(p, np.partition(d, (-3, -1))) assert_array_equal(p, d[np.argpartition(d, (-3, -1))]) d = np.arange(17) np.random.shuffle(d) d.partition(range(d.size)) assert_array_equal(np.arange(17), d) np.random.shuffle(d) assert_array_equal(np.arange(17), d[d.argpartition(range(d.size))]) # test unsorted kth d = np.arange(17) np.random.shuffle(d) keys = np.array([1, 3, 8, -2]) np.random.shuffle(d) p = np.partition(d, keys) self.assert_partitioned(p, keys) p = d[np.argpartition(d, keys)] self.assert_partitioned(p, keys) np.random.shuffle(keys) assert_array_equal(np.partition(d, keys), p) assert_array_equal(d[np.argpartition(d, keys)], p) # equal kth d = np.arange(20)[::-1] self.assert_partitioned(np.partition(d, [5]*4), [5]) self.assert_partitioned(np.partition(d, [5]*4 + [6, 13]), [5]*4 + [6, 13]) self.assert_partitioned(d[np.argpartition(d, [5]*4)], [5]) self.assert_partitioned(d[np.argpartition(d, [5]*4 + [6, 13])], [5]*4 + [6, 13]) d = np.arange(12) np.random.shuffle(d) d1 = np.tile(np.arange(12), (4, 1)) map(np.random.shuffle, d1) d0 = np.transpose(d1) kth = (1, 6, 7, -1) p = np.partition(d1, kth, axis=1) pa = d1[np.arange(d1.shape[0])[:, None], d1.argpartition(kth, axis=1)] assert_array_equal(p, pa) for i in range(d1.shape[0]): self.assert_partitioned(p[i,:], kth) p = np.partition(d0, kth, axis=0) pa = d0[np.argpartition(d0, kth, axis=0), np.arange(d0.shape[1])[None,:]] assert_array_equal(p, pa) for i in range(d0.shape[1]): self.assert_partitioned(p[:, i], kth) def test_partition_cdtype(self): d = np.array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41), ('Lancelot', 1.9, 38)], dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')]) tgt = np.sort(d, order=['age', 'height']) assert_array_equal(np.partition(d, range(d.size), order=['age', 'height']), tgt) assert_array_equal(d[np.argpartition(d, range(d.size), order=['age', 'height'])], tgt) for k in range(d.size): assert_equal(np.partition(d, k, order=['age', 'height'])[k], tgt[k]) assert_equal(d[np.argpartition(d, k, order=['age', 'height'])][k], tgt[k]) d = np.array(['Galahad', 'Arthur', 'zebra', 'Lancelot']) tgt = np.sort(d) assert_array_equal(np.partition(d, range(d.size)), tgt) for k in range(d.size): assert_equal(np.partition(d, k)[k], tgt[k]) assert_equal(d[np.argpartition(d, k)][k], tgt[k]) def test_partition_unicode_kind(self): d = np.arange(10) k = b'\xc3\xa4'.decode("UTF8") assert_raises(ValueError, d.partition, 2, kind=k) assert_raises(ValueError, d.argpartition, 2, kind=k) def test_partition_fuzz(self): # a few rounds of random data testing for j in range(10, 30): for i in range(1, j - 2): d = np.arange(j) np.random.shuffle(d) d = d % np.random.randint(2, 30) idx = np.random.randint(d.size) kth = [0, idx, i, i + 1] tgt = np.sort(d)[kth] assert_array_equal(np.partition(d, kth)[kth], tgt, err_msg="data: %r\n kth: %r" % (d, kth)) def test_argpartition_gh5524(self): # A test for functionality of argpartition on lists. d = [6,7,3,2,9,0] p = np.argpartition(d,1) self.assert_partitioned(np.array(d)[p],[1]) def test_flatten(self): x0 = np.array([[1, 2, 3], [4, 5, 6]], np.int32) x1 = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], np.int32) y0 = np.array([1, 2, 3, 4, 5, 6], np.int32) y0f = np.array([1, 4, 2, 5, 3, 6], np.int32) y1 = np.array([1, 2, 3, 4, 5, 6, 7, 8], np.int32) y1f = np.array([1, 5, 3, 7, 2, 6, 4, 8], np.int32) assert_equal(x0.flatten(), y0) assert_equal(x0.flatten('F'), y0f) assert_equal(x0.flatten('F'), x0.T.flatten()) assert_equal(x1.flatten(), y1) assert_equal(x1.flatten('F'), y1f) assert_equal(x1.flatten('F'), x1.T.flatten()) def test_dot(self): a = np.array([[1, 0], [0, 1]]) b = np.array([[0, 1], [1, 0]]) c = np.array([[9, 1], [1, -9]]) d = np.arange(24).reshape(4, 6) ddt = np.array( [[ 55, 145, 235, 325], [ 145, 451, 757, 1063], [ 235, 757, 1279, 1801], [ 325, 1063, 1801, 2539]] ) dtd = np.array( [[504, 540, 576, 612, 648, 684], [540, 580, 620, 660, 700, 740], [576, 620, 664, 708, 752, 796], [612, 660, 708, 756, 804, 852], [648, 700, 752, 804, 856, 908], [684, 740, 796, 852, 908, 964]] ) # gemm vs syrk optimizations for et in [np.float32, np.float64, np.complex64, np.complex128]: eaf = a.astype(et) assert_equal(np.dot(eaf, eaf), eaf) assert_equal(np.dot(eaf.T, eaf), eaf) assert_equal(np.dot(eaf, eaf.T), eaf) assert_equal(np.dot(eaf.T, eaf.T), eaf) assert_equal(np.dot(eaf.T.copy(), eaf), eaf) assert_equal(np.dot(eaf, eaf.T.copy()), eaf) assert_equal(np.dot(eaf.T.copy(), eaf.T.copy()), eaf) # syrk validations for et in [np.float32, np.float64, np.complex64, np.complex128]: eaf = a.astype(et) ebf = b.astype(et) assert_equal(np.dot(ebf, ebf), eaf) assert_equal(np.dot(ebf.T, ebf), eaf) assert_equal(np.dot(ebf, ebf.T), eaf) assert_equal(np.dot(ebf.T, ebf.T), eaf) # syrk - different shape, stride, and view validations for et in [np.float32, np.float64, np.complex64, np.complex128]: edf = d.astype(et) assert_equal( np.dot(edf[::-1, :], edf.T), np.dot(edf[::-1, :].copy(), edf.T.copy()) ) assert_equal( np.dot(edf[:, ::-1], edf.T), np.dot(edf[:, ::-1].copy(), edf.T.copy()) ) assert_equal( np.dot(edf, edf[::-1, :].T), np.dot(edf, edf[::-1, :].T.copy()) ) assert_equal( np.dot(edf, edf[:, ::-1].T), np.dot(edf, edf[:, ::-1].T.copy()) ) assert_equal( np.dot(edf[:edf.shape[0] // 2, :], edf[::2, :].T), np.dot(edf[:edf.shape[0] // 2, :].copy(), edf[::2, :].T.copy()) ) assert_equal( np.dot(edf[::2, :], edf[:edf.shape[0] // 2, :].T), np.dot(edf[::2, :].copy(), edf[:edf.shape[0] // 2, :].T.copy()) ) # syrk - different shape for et in [np.float32, np.float64, np.complex64, np.complex128]: edf = d.astype(et) eddtf = ddt.astype(et) edtdf = dtd.astype(et) assert_equal(np.dot(edf, edf.T), eddtf) assert_equal(np.dot(edf.T, edf), edtdf) # function versus methods assert_equal(np.dot(a, b), a.dot(b)) assert_equal(np.dot(np.dot(a, b), c), a.dot(b).dot(c)) # test passing in an output array c = np.zeros_like(a) a.dot(b, c) assert_equal(c, np.dot(a, b)) # test keyword args c = np.zeros_like(a) a.dot(b=b, out=c) assert_equal(c, np.dot(a, b)) def test_dot_type_mismatch(self): c = 1. A = np.array((1,1), dtype='i,i') assert_raises(TypeError, np.dot, c, A) assert_raises(TypeError, np.dot, A, c) def test_dot_out_mem_overlap(self): np.random.seed(1) # Test BLAS and non-BLAS code paths, including all dtypes # that dot() supports dtypes = [np.dtype(code) for code in np.typecodes['All'] if code not in 'USVM'] for dtype in dtypes: a = np.random.rand(3, 3).astype(dtype) # Valid dot() output arrays must be aligned b = _aligned_zeros((3, 3), dtype=dtype) b[...] = np.random.rand(3, 3) y = np.dot(a, b) x = np.dot(a, b, out=b) assert_equal(x, y, err_msg=repr(dtype)) # Check invalid output array assert_raises(ValueError, np.dot, a, b, out=b[::2]) assert_raises(ValueError, np.dot, a, b, out=b.T) def test_diagonal(self): a = np.arange(12).reshape((3, 4)) assert_equal(a.diagonal(), [0, 5, 10]) assert_equal(a.diagonal(0), [0, 5, 10]) assert_equal(a.diagonal(1), [1, 6, 11]) assert_equal(a.diagonal(-1), [4, 9]) b = np.arange(8).reshape((2, 2, 2)) assert_equal(b.diagonal(), [[0, 6], [1, 7]]) assert_equal(b.diagonal(0), [[0, 6], [1, 7]]) assert_equal(b.diagonal(1), [[2], [3]]) assert_equal(b.diagonal(-1), [[4], [5]]) assert_raises(ValueError, b.diagonal, axis1=0, axis2=0) assert_equal(b.diagonal(0, 1, 2), [[0, 3], [4, 7]]) assert_equal(b.diagonal(0, 0, 1), [[0, 6], [1, 7]]) assert_equal(b.diagonal(offset=1, axis1=0, axis2=2), [[1], [3]]) # Order of axis argument doesn't matter: assert_equal(b.diagonal(0, 2, 1), [[0, 3], [4, 7]]) def test_diagonal_view_notwriteable(self): # this test is only for 1.9, the diagonal view will be # writeable in 1.10. a = np.eye(3).diagonal() assert_(not a.flags.writeable) assert_(not a.flags.owndata) a = np.diagonal(np.eye(3)) assert_(not a.flags.writeable) assert_(not a.flags.owndata) a = np.diag(np.eye(3)) assert_(not a.flags.writeable) assert_(not a.flags.owndata) def test_diagonal_memleak(self): # Regression test for a bug that crept in at one point a = np.zeros((100, 100)) if HAS_REFCOUNT: assert_(sys.getrefcount(a) < 50) for i in range(100): a.diagonal() if HAS_REFCOUNT: assert_(sys.getrefcount(a) < 50) def test_trace(self): a = np.arange(12).reshape((3, 4)) assert_equal(a.trace(), 15) assert_equal(a.trace(0), 15) assert_equal(a.trace(1), 18) assert_equal(a.trace(-1), 13) b = np.arange(8).reshape((2, 2, 2)) assert_equal(b.trace(), [6, 8]) assert_equal(b.trace(0), [6, 8]) assert_equal(b.trace(1), [2, 3]) assert_equal(b.trace(-1), [4, 5]) assert_equal(b.trace(0, 0, 1), [6, 8]) assert_equal(b.trace(0, 0, 2), [5, 9]) assert_equal(b.trace(0, 1, 2), [3, 11]) assert_equal(b.trace(offset=1, axis1=0, axis2=2), [1, 3]) def test_trace_subclass(self): # The class would need to overwrite trace to ensure single-element # output also has the right subclass. class MyArray(np.ndarray): pass b = np.arange(8).reshape((2, 2, 2)).view(MyArray) t = b.trace() assert_(isinstance(t, MyArray)) def test_put(self): icodes = np.typecodes['AllInteger'] fcodes = np.typecodes['AllFloat'] for dt in icodes + fcodes + 'O': tgt = np.array([0, 1, 0, 3, 0, 5], dtype=dt) # test 1-d a = np.zeros(6, dtype=dt) a.put([1, 3, 5], [1, 3, 5]) assert_equal(a, tgt) # test 2-d a = np.zeros((2, 3), dtype=dt) a.put([1, 3, 5], [1, 3, 5]) assert_equal(a, tgt.reshape(2, 3)) for dt in '?': tgt = np.array([False, True, False, True, False, True], dtype=dt) # test 1-d a = np.zeros(6, dtype=dt) a.put([1, 3, 5], [True]*3) assert_equal(a, tgt) # test 2-d a = np.zeros((2, 3), dtype=dt) a.put([1, 3, 5], [True]*3) assert_equal(a, tgt.reshape(2, 3)) # check must be writeable a = np.zeros(6) a.flags.writeable = False assert_raises(ValueError, a.put, [1, 3, 5], [1, 3, 5]) # when calling np.put, make sure a # TypeError is raised if the object # isn't an ndarray bad_array = [1, 2, 3] assert_raises(TypeError, np.put, bad_array, [0, 2], 5) def test_ravel(self): a = np.array([[0, 1], [2, 3]]) assert_equal(a.ravel(), [0, 1, 2, 3]) assert_(not a.ravel().flags.owndata) assert_equal(a.ravel('F'), [0, 2, 1, 3]) assert_equal(a.ravel(order='C'), [0, 1, 2, 3]) assert_equal(a.ravel(order='F'), [0, 2, 1, 3]) assert_equal(a.ravel(order='A'), [0, 1, 2, 3]) assert_(not a.ravel(order='A').flags.owndata) assert_equal(a.ravel(order='K'), [0, 1, 2, 3]) assert_(not a.ravel(order='K').flags.owndata) assert_equal(a.ravel(), a.reshape(-1)) a = np.array([[0, 1], [2, 3]], order='F') assert_equal(a.ravel(), [0, 1, 2, 3]) assert_equal(a.ravel(order='A'), [0, 2, 1, 3]) assert_equal(a.ravel(order='K'), [0, 2, 1, 3]) assert_(not a.ravel(order='A').flags.owndata) assert_(not a.ravel(order='K').flags.owndata) assert_equal(a.ravel(), a.reshape(-1)) assert_equal(a.ravel(order='A'), a.reshape(-1, order='A')) a = np.array([[0, 1], [2, 3]])[::-1, :] assert_equal(a.ravel(), [2, 3, 0, 1]) assert_equal(a.ravel(order='C'), [2, 3, 0, 1]) assert_equal(a.ravel(order='F'), [2, 0, 3, 1]) assert_equal(a.ravel(order='A'), [2, 3, 0, 1]) # 'K' doesn't reverse the axes of negative strides assert_equal(a.ravel(order='K'), [2, 3, 0, 1]) assert_(a.ravel(order='K').flags.owndata) # Test simple 1-d copy behaviour: a = np.arange(10)[::2] assert_(a.ravel('K').flags.owndata) assert_(a.ravel('C').flags.owndata) assert_(a.ravel('F').flags.owndata) # Not contiguous and 1-sized axis with non matching stride a = np.arange(2**3 * 2)[::2] a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2) strides = list(a.strides) strides[1] = 123 a.strides = strides assert_(a.ravel(order='K').flags.owndata) assert_equal(a.ravel('K'), np.arange(0, 15, 2)) # contiguous and 1-sized axis with non matching stride works: a = np.arange(2**3) a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2) strides = list(a.strides) strides[1] = 123 a.strides = strides assert_(np.may_share_memory(a.ravel(order='K'), a)) assert_equal(a.ravel(order='K'), np.arange(2**3)) # Test negative strides (not very interesting since non-contiguous): a = np.arange(4)[::-1].reshape(2, 2) assert_(a.ravel(order='C').flags.owndata) assert_(a.ravel(order='K').flags.owndata) assert_equal(a.ravel('C'), [3, 2, 1, 0]) assert_equal(a.ravel('K'), [3, 2, 1, 0]) # 1-element tidy strides test (NPY_RELAXED_STRIDES_CHECKING): a = np.array([[1]]) a.strides = (123, 432) # If the stride is not 8, NPY_RELAXED_STRIDES_CHECKING is messing # them up on purpose: if np.ones(1).strides == (8,): assert_(np.may_share_memory(a.ravel('K'), a)) assert_equal(a.ravel('K').strides, (a.dtype.itemsize,)) for order in ('C', 'F', 'A', 'K'): # 0-d corner case: a = np.array(0) assert_equal(a.ravel(order), [0]) assert_(np.may_share_memory(a.ravel(order), a)) # Test that certain non-inplace ravels work right (mostly) for 'K': b = np.arange(2**4 * 2)[::2].reshape(2, 2, 2, 2) a = b[..., ::2] assert_equal(a.ravel('K'), [0, 4, 8, 12, 16, 20, 24, 28]) assert_equal(a.ravel('C'), [0, 4, 8, 12, 16, 20, 24, 28]) assert_equal(a.ravel('A'), [0, 4, 8, 12, 16, 20, 24, 28]) assert_equal(a.ravel('F'), [0, 16, 8, 24, 4, 20, 12, 28]) a = b[::2, ...] assert_equal(a.ravel('K'), [0, 2, 4, 6, 8, 10, 12, 14]) assert_equal(a.ravel('C'), [0, 2, 4, 6, 8, 10, 12, 14]) assert_equal(a.ravel('A'), [0, 2, 4, 6, 8, 10, 12, 14]) assert_equal(a.ravel('F'), [0, 8, 4, 12, 2, 10, 6, 14]) def test_ravel_subclass(self): class ArraySubclass(np.ndarray): pass a = np.arange(10).view(ArraySubclass) assert_(isinstance(a.ravel('C'), ArraySubclass)) assert_(isinstance(a.ravel('F'), ArraySubclass)) assert_(isinstance(a.ravel('A'), ArraySubclass)) assert_(isinstance(a.ravel('K'), ArraySubclass)) a = np.arange(10)[::2].view(ArraySubclass) assert_(isinstance(a.ravel('C'), ArraySubclass)) assert_(isinstance(a.ravel('F'), ArraySubclass)) assert_(isinstance(a.ravel('A'), ArraySubclass)) assert_(isinstance(a.ravel('K'), ArraySubclass)) def test_swapaxes(self): a = np.arange(1*2*3*4).reshape(1, 2, 3, 4).copy() idx = np.indices(a.shape) assert_(a.flags['OWNDATA']) b = a.copy() # check exceptions assert_raises(ValueError, a.swapaxes, -5, 0) assert_raises(ValueError, a.swapaxes, 4, 0) assert_raises(ValueError, a.swapaxes, 0, -5) assert_raises(ValueError, a.swapaxes, 0, 4) for i in range(-4, 4): for j in range(-4, 4): for k, src in enumerate((a, b)): c = src.swapaxes(i, j) # check shape shape = list(src.shape) shape[i] = src.shape[j] shape[j] = src.shape[i] assert_equal(c.shape, shape, str((i, j, k))) # check array contents i0, i1, i2, i3 = [dim-1 for dim in c.shape] j0, j1, j2, j3 = [dim-1 for dim in src.shape] assert_equal(src[idx[j0], idx[j1], idx[j2], idx[j3]], c[idx[i0], idx[i1], idx[i2], idx[i3]], str((i, j, k))) # check a view is always returned, gh-5260 assert_(not c.flags['OWNDATA'], str((i, j, k))) # check on non-contiguous input array if k == 1: b = c def test_conjugate(self): a = np.array([1-1j, 1+1j, 23+23.0j]) ac = a.conj() assert_equal(a.real, ac.real) assert_equal(a.imag, -ac.imag) assert_equal(ac, a.conjugate()) assert_equal(ac, np.conjugate(a)) a = np.array([1-1j, 1+1j, 23+23.0j], 'F') ac = a.conj() assert_equal(a.real, ac.real) assert_equal(a.imag, -ac.imag) assert_equal(ac, a.conjugate()) assert_equal(ac, np.conjugate(a)) a = np.array([1, 2, 3]) ac = a.conj() assert_equal(a, ac) assert_equal(ac, a.conjugate()) assert_equal(ac, np.conjugate(a)) a = np.array([1.0, 2.0, 3.0]) ac = a.conj() assert_equal(a, ac) assert_equal(ac, a.conjugate()) assert_equal(ac, np.conjugate(a)) a = np.array([1-1j, 1+1j, 1, 2.0], object) ac = a.conj() assert_equal(ac, [k.conjugate() for k in a]) assert_equal(ac, a.conjugate()) assert_equal(ac, np.conjugate(a)) a = np.array([1-1j, 1, 2.0, 'f'], object) assert_raises(AttributeError, lambda: a.conj()) assert_raises(AttributeError, lambda: a.conjugate()) def test__complex__(self): dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8', 'f', 'd', 'g', 'F', 'D', 'G', '?', 'O'] for dt in dtypes: a = np.array(7, dtype=dt) b = np.array([7], dtype=dt) c = np.array([[[[[7]]]]], dtype=dt) msg = 'dtype: {0}'.format(dt) ap = complex(a) assert_equal(ap, a, msg) bp = complex(b) assert_equal(bp, b, msg) cp = complex(c) assert_equal(cp, c, msg) def test__complex__should_not_work(self): dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8', 'f', 'd', 'g', 'F', 'D', 'G', '?', 'O'] for dt in dtypes: a = np.array([1, 2, 3], dtype=dt) assert_raises(TypeError, complex, a) dt = np.dtype([('a', 'f8'), ('b', 'i1')]) b = np.array((1.0, 3), dtype=dt) assert_raises(TypeError, complex, b) c = np.array([(1.0, 3), (2e-3, 7)], dtype=dt) assert_raises(TypeError, complex, c) d = np.array('1+1j') assert_raises(TypeError, complex, d) e = np.array(['1+1j'], 'U') assert_raises(TypeError, complex, e) class TestBinop(object): def test_inplace(self): # test refcount 1 inplace conversion assert_array_almost_equal(np.array([0.5]) * np.array([1.0, 2.0]), [0.5, 1.0]) d = np.array([0.5, 0.5])[::2] assert_array_almost_equal(d * (d * np.array([1.0, 2.0])), [0.25, 0.5]) a = np.array([0.5]) b = np.array([0.5]) c = a + b c = a - b c = a * b c = a / b assert_equal(a, b) assert_almost_equal(c, 1.) c = a + b * 2. / b * a - a / b assert_equal(a, b) assert_equal(c, 0.5) # true divide a = np.array([5]) b = np.array([3]) c = (a * a) / b assert_almost_equal(c, 25 / 3) assert_equal(a, 5) assert_equal(b, 3) # ndarray.__rop__ always calls ufunc # ndarray.__iop__ always calls ufunc # ndarray.__op__, __rop__: # - defer if other has __array_ufunc__ and it is None # or other is not a subclass and has higher array priority # - else, call ufunc def test_ufunc_binop_interaction(self): # Python method name (without underscores) # -> (numpy ufunc, has_in_place_version, preferred_dtype) ops = { 'add': (np.add, True, float), 'sub': (np.subtract, True, float), 'mul': (np.multiply, True, float), 'truediv': (np.true_divide, True, float), 'floordiv': (np.floor_divide, True, float), 'mod': (np.remainder, True, float), 'divmod': (np.divmod, False, float), 'pow': (np.power, True, int), 'lshift': (np.left_shift, True, int), 'rshift': (np.right_shift, True, int), 'and': (np.bitwise_and, True, int), 'xor': (np.bitwise_xor, True, int), 'or': (np.bitwise_or, True, int), # 'ge': (np.less_equal, False), # 'gt': (np.less, False), # 'le': (np.greater_equal, False), # 'lt': (np.greater, False), # 'eq': (np.equal, False), # 'ne': (np.not_equal, False), } class Coerced(Exception): pass def array_impl(self): raise Coerced def op_impl(self, other): return "forward" def rop_impl(self, other): return "reverse" def iop_impl(self, other): return "in-place" def array_ufunc_impl(self, ufunc, method, *args, **kwargs): return ("__array_ufunc__", ufunc, method, args, kwargs) # Create an object with the given base, in the given module, with a # bunch of placeholder __op__ methods, and optionally a # __array_ufunc__ and __array_priority__. def make_obj(base, array_priority=False, array_ufunc=False, alleged_module="__main__"): class_namespace = {"__array__": array_impl} if array_priority is not False: class_namespace["__array_priority__"] = array_priority for op in ops: class_namespace["__{0}__".format(op)] = op_impl class_namespace["__r{0}__".format(op)] = rop_impl class_namespace["__i{0}__".format(op)] = iop_impl if array_ufunc is not False: class_namespace["__array_ufunc__"] = array_ufunc eval_namespace = {"base": base, "class_namespace": class_namespace, "__name__": alleged_module, } MyType = eval("type('MyType', (base,), class_namespace)", eval_namespace) if issubclass(MyType, np.ndarray): # Use this range to avoid special case weirdnesses around # divide-by-0, pow(x, 2), overflow due to pow(big, big), etc. return np.arange(3, 5).view(MyType) else: return MyType() def check(obj, binop_override_expected, ufunc_override_expected, inplace_override_expected, check_scalar=True): for op, (ufunc, has_inplace, dtype) in ops.items(): err_msg = ('op: %s, ufunc: %s, has_inplace: %s, dtype: %s' % (op, ufunc, has_inplace, dtype)) check_objs = [np.arange(3, 5, dtype=dtype)] if check_scalar: check_objs.append(check_objs[0][0]) for arr in check_objs: arr_method = getattr(arr, "__{0}__".format(op)) def first_out_arg(result): if op == "divmod": assert_(isinstance(result, tuple)) return result[0] else: return result # arr __op__ obj if binop_override_expected: assert_equal(arr_method(obj), NotImplemented, err_msg) elif ufunc_override_expected: assert_equal(arr_method(obj)[0], "__array_ufunc__", err_msg) else: if (isinstance(obj, np.ndarray) and (type(obj).__array_ufunc__ is np.ndarray.__array_ufunc__)): # __array__ gets ignored res = first_out_arg(arr_method(obj)) assert_(res.__class__ is obj.__class__, err_msg) else: assert_raises((TypeError, Coerced), arr_method, obj, err_msg=err_msg) # obj __op__ arr arr_rmethod = getattr(arr, "__r{0}__".format(op)) if ufunc_override_expected: res = arr_rmethod(obj) assert_equal(res[0], "__array_ufunc__", err_msg=err_msg) assert_equal(res[1], ufunc, err_msg=err_msg) else: if (isinstance(obj, np.ndarray) and (type(obj).__array_ufunc__ is np.ndarray.__array_ufunc__)): # __array__ gets ignored res = first_out_arg(arr_rmethod(obj)) assert_(res.__class__ is obj.__class__, err_msg) else: # __array_ufunc__ = "asdf" creates a TypeError assert_raises((TypeError, Coerced), arr_rmethod, obj, err_msg=err_msg) # arr __iop__ obj # array scalars don't have in-place operators if has_inplace and isinstance(arr, np.ndarray): arr_imethod = getattr(arr, "__i{0}__".format(op)) if inplace_override_expected: assert_equal(arr_method(obj), NotImplemented, err_msg=err_msg) elif ufunc_override_expected: res = arr_imethod(obj) assert_equal(res[0], "__array_ufunc__", err_msg) assert_equal(res[1], ufunc, err_msg) assert_(type(res[-1]["out"]) is tuple, err_msg) assert_(res[-1]["out"][0] is arr, err_msg) else: if (isinstance(obj, np.ndarray) and (type(obj).__array_ufunc__ is np.ndarray.__array_ufunc__)): # __array__ gets ignored assert_(arr_imethod(obj) is arr, err_msg) else: assert_raises((TypeError, Coerced), arr_imethod, obj, err_msg=err_msg) op_fn = getattr(operator, op, None) if op_fn is None: op_fn = getattr(operator, op + "_", None) if op_fn is None: op_fn = getattr(builtins, op) assert_equal(op_fn(obj, arr), "forward", err_msg) if not isinstance(obj, np.ndarray): if binop_override_expected: assert_equal(op_fn(arr, obj), "reverse", err_msg) elif ufunc_override_expected: assert_equal(op_fn(arr, obj)[0], "__array_ufunc__", err_msg) if ufunc_override_expected: assert_equal(ufunc(obj, arr)[0], "__array_ufunc__", err_msg) # No array priority, no array_ufunc -> nothing called check(make_obj(object), False, False, False) # Negative array priority, no array_ufunc -> nothing called # (has to be very negative, because scalar priority is -1000000.0) check(make_obj(object, array_priority=-2**30), False, False, False) # Positive array priority, no array_ufunc -> binops and iops only check(make_obj(object, array_priority=1), True, False, True) # ndarray ignores array_priority for ndarray subclasses check(make_obj(np.ndarray, array_priority=1), False, False, False, check_scalar=False) # Positive array_priority and array_ufunc -> array_ufunc only check(make_obj(object, array_priority=1, array_ufunc=array_ufunc_impl), False, True, False) check(make_obj(np.ndarray, array_priority=1, array_ufunc=array_ufunc_impl), False, True, False) # array_ufunc set to None -> defer binops only check(make_obj(object, array_ufunc=None), True, False, False) check(make_obj(np.ndarray, array_ufunc=None), True, False, False, check_scalar=False) def test_ufunc_override_normalize_signature(self): # gh-5674 class SomeClass(object): def __array_ufunc__(self, ufunc, method, *inputs, **kw): return kw a = SomeClass() kw = np.add(a, [1]) assert_('sig' not in kw and 'signature' not in kw) kw = np.add(a, [1], sig='ii->i') assert_('sig' not in kw and 'signature' in kw) assert_equal(kw['signature'], 'ii->i') kw = np.add(a, [1], signature='ii->i') assert_('sig' not in kw and 'signature' in kw) assert_equal(kw['signature'], 'ii->i') def test_array_ufunc_index(self): # Check that index is set appropriately, also if only an output # is passed on (latter is another regression tests for github bug 4753) # This also checks implicitly that 'out' is always a tuple. class CheckIndex(object): def __array_ufunc__(self, ufunc, method, *inputs, **kw): for i, a in enumerate(inputs): if a is self: return i # calls below mean we must be in an output. for j, a in enumerate(kw['out']): if a is self: return (j,) a = CheckIndex() dummy = np.arange(2.) # 1 input, 1 output assert_equal(np.sin(a), 0) assert_equal(np.sin(dummy, a), (0,)) assert_equal(np.sin(dummy, out=a), (0,)) assert_equal(np.sin(dummy, out=(a,)), (0,)) assert_equal(np.sin(a, a), 0) assert_equal(np.sin(a, out=a), 0) assert_equal(np.sin(a, out=(a,)), 0) # 1 input, 2 outputs assert_equal(np.modf(dummy, a), (0,)) assert_equal(np.modf(dummy, None, a), (1,)) assert_equal(np.modf(dummy, dummy, a), (1,)) assert_equal(np.modf(dummy, out=(a, None)), (0,)) assert_equal(np.modf(dummy, out=(a, dummy)), (0,)) assert_equal(np.modf(dummy, out=(None, a)), (1,)) assert_equal(np.modf(dummy, out=(dummy, a)), (1,)) assert_equal(np.modf(a, out=(dummy, a)), 0) with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', DeprecationWarning) assert_equal(np.modf(dummy, out=a), (0,)) assert_(w[0].category is DeprecationWarning) assert_raises(ValueError, np.modf, dummy, out=(a,)) # 2 inputs, 1 output assert_equal(np.add(a, dummy), 0) assert_equal(np.add(dummy, a), 1) assert_equal(np.add(dummy, dummy, a), (0,)) assert_equal(np.add(dummy, a, a), 1) assert_equal(np.add(dummy, dummy, out=a), (0,)) assert_equal(np.add(dummy, dummy, out=(a,)), (0,)) assert_equal(np.add(a, dummy, out=a), 0) def test_out_override(self): # regression test for github bug 4753 class OutClass(np.ndarray): def __array_ufunc__(self, ufunc, method, *inputs, **kw): if 'out' in kw: tmp_kw = kw.copy() tmp_kw.pop('out') func = getattr(ufunc, method) kw['out'][0][...] = func(*inputs, **tmp_kw) A = np.array([0]).view(OutClass) B = np.array([5]) C = np.array([6]) np.multiply(C, B, A) assert_equal(A[0], 30) assert_(isinstance(A, OutClass)) A[0] = 0 np.multiply(C, B, out=A) assert_equal(A[0], 30) assert_(isinstance(A, OutClass)) def test_pow_override_with_errors(self): # regression test for gh-9112 class PowerOnly(np.ndarray): def __array_ufunc__(self, ufunc, method, *inputs, **kw): if ufunc is not np.power: raise NotImplementedError return "POWER!" # explicit cast to float, to ensure the fast power path is taken. a = np.array(5., dtype=np.float64).view(PowerOnly) assert_equal(a ** 2.5, "POWER!") with assert_raises(NotImplementedError): a ** 0.5 with assert_raises(NotImplementedError): a ** 0 with assert_raises(NotImplementedError): a ** 1 with assert_raises(NotImplementedError): a ** -1 with assert_raises(NotImplementedError): a ** 2 class TestTemporaryElide(object): # elision is only triggered on relatively large arrays def test_extension_incref_elide(self): # test extension (e.g. cython) calling PyNumber_* slots without # increasing the reference counts # # def incref_elide(a): # d = input.copy() # refcount 1 # return d, d + d # PyNumber_Add without increasing refcount from numpy.core.multiarray_tests import incref_elide d = np.ones(100000) orig, res = incref_elide(d) d + d # the return original should not be changed to an inplace operation assert_array_equal(orig, d) assert_array_equal(res, d + d) def test_extension_incref_elide_stack(self): # scanning if the refcount == 1 object is on the python stack to check # that we are called directly from python is flawed as object may still # be above the stack pointer and we have no access to the top of it # # def incref_elide_l(d): # return l[4] + l[4] # PyNumber_Add without increasing refcount from numpy.core.multiarray_tests import incref_elide_l # padding with 1 makes sure the object on the stack is not overwriten l = [1, 1, 1, 1, np.ones(100000)] res = incref_elide_l(l) # the return original should not be changed to an inplace operation assert_array_equal(l[4], np.ones(100000)) assert_array_equal(res, l[4] + l[4]) def test_temporary_with_cast(self): # check that we don't elide into a temporary which would need casting d = np.ones(200000, dtype=np.int64) assert_equal(((d + d) + 2**222).dtype, np.dtype('O')) r = ((d + d) / 2) assert_equal(r.dtype, np.dtype('f8')) r = np.true_divide((d + d), 2) assert_equal(r.dtype, np.dtype('f8')) r = ((d + d) / 2.) assert_equal(r.dtype, np.dtype('f8')) r = ((d + d) // 2) assert_equal(r.dtype, np.dtype(np.int64)) # commutative elision into the astype result f = np.ones(100000, dtype=np.float32) assert_equal(((f + f) + f.astype(np.float64)).dtype, np.dtype('f8')) # no elision into lower type d = f.astype(np.float64) assert_equal(((f + f) + d).dtype, d.dtype) l = np.ones(100000, dtype=np.longdouble) assert_equal(((d + d) + l).dtype, l.dtype) # test unary abs with different output dtype for dt in (np.complex64, np.complex128, np.clongdouble): c = np.ones(100000, dtype=dt) r = abs(c * 2.0) assert_equal(r.dtype, np.dtype('f%d' % (c.itemsize // 2))) def test_elide_broadcast(self): # test no elision on broadcast to higher dimension # only triggers elision code path in debug mode as triggering it in # normal mode needs 256kb large matching dimension, so a lot of memory d = np.ones((2000, 1), dtype=int) b = np.ones((2000), dtype=bool) r = (1 - d) + b assert_equal(r, 1) assert_equal(r.shape, (2000, 2000)) def test_elide_scalar(self): # check inplace op does not create ndarray from scalars a = np.bool_() assert_(type(~(a & a)) is np.bool_) def test_elide_readonly(self): # don't try to elide readonly temporaries r = np.asarray(np.broadcast_to(np.zeros(1), 100000).flat) * 0.0 assert_equal(r, 0) def test_elide_updateifcopy(self): a = np.ones(2**20)[::2] b = a.flat.__array__() + 1 del b assert_equal(a, 1) class TestCAPI(object): def test_IsPythonScalar(self): from numpy.core.multiarray_tests import IsPythonScalar assert_(IsPythonScalar(b'foobar')) assert_(IsPythonScalar(1)) assert_(IsPythonScalar(2**80)) assert_(IsPythonScalar(2.)) assert_(IsPythonScalar("a")) class TestSubscripting(object): def test_test_zero_rank(self): x = np.array([1, 2, 3]) assert_(isinstance(x[0], np.int_)) if sys.version_info[0] < 3: assert_(isinstance(x[0], int)) assert_(type(x[0, ...]) is np.ndarray) class TestPickling(object): def test_roundtrip(self): import pickle carray = np.array([[2, 9], [7, 0], [3, 8]]) DATA = [ carray, np.transpose(carray), np.array([('xxx', 1, 2.0)], dtype=[('a', (str, 3)), ('b', int), ('c', float)]) ] for a in DATA: assert_equal(a, pickle.loads(a.dumps()), err_msg="%r" % a) def _loads(self, obj): if sys.version_info[0] >= 3: return np.loads(obj, encoding='latin1') else: return np.loads(obj) # version 0 pickles, using protocol=2 to pickle # version 0 doesn't have a version field def test_version0_int8(self): s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.' a = np.array([1, 2, 3, 4], dtype=np.int8) p = self._loads(s) assert_equal(a, p) def test_version0_float32(self): s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.' a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32) p = self._loads(s) assert_equal(a, p) def test_version0_object(self): s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.' a = np.array([{'a': 1}, {'b': 2}]) p = self._loads(s) assert_equal(a, p) # version 1 pickles, using protocol=2 to pickle def test_version1_int8(self): s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.' a = np.array([1, 2, 3, 4], dtype=np.int8) p = self._loads(s) assert_equal(a, p) def test_version1_float32(self): s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(K\x01U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.' a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32) p = self._loads(s) assert_equal(a, p) def test_version1_object(self): s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.' a = np.array([{'a': 1}, {'b': 2}]) p = self._loads(s) assert_equal(a, p) def test_subarray_int_shape(self): s = b"cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'V6'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'a'\np12\ng3\ntp13\n(dp14\ng12\n(g7\n(S'V4'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'|'\np18\n(g7\n(S'i1'\np19\nI0\nI1\ntp20\nRp21\n(I3\nS'|'\np22\nNNNI-1\nI-1\nI0\ntp23\nb(I2\nI2\ntp24\ntp25\nNNI4\nI1\nI0\ntp26\nbI0\ntp27\nsg3\n(g7\n(S'V2'\np28\nI0\nI1\ntp29\nRp30\n(I3\nS'|'\np31\n(g21\nI2\ntp32\nNNI2\nI1\nI0\ntp33\nbI4\ntp34\nsI6\nI1\nI0\ntp35\nbI00\nS'\\x01\\x01\\x01\\x01\\x01\\x02'\np36\ntp37\nb." a = np.array([(1, (1, 2))], dtype=[('a', 'i1', (2, 2)), ('b', 'i1', 2)]) p = self._loads(s) assert_equal(a, p) class TestFancyIndexing(object): def test_list(self): x = np.ones((1, 1)) x[:, [0]] = 2.0 assert_array_equal(x, np.array([[2.0]])) x = np.ones((1, 1, 1)) x[:, :, [0]] = 2.0 assert_array_equal(x, np.array([[[2.0]]])) def test_tuple(self): x = np.ones((1, 1)) x[:, (0,)] = 2.0 assert_array_equal(x, np.array([[2.0]])) x = np.ones((1, 1, 1)) x[:, :, (0,)] = 2.0 assert_array_equal(x, np.array([[[2.0]]])) def test_mask(self): x = np.array([1, 2, 3, 4]) m = np.array([0, 1, 0, 0], bool) assert_array_equal(x[m], np.array([2])) def test_mask2(self): x = np.array([[1, 2, 3, 4], [5, 6, 7, 8]]) m = np.array([0, 1], bool) m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool) m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool) assert_array_equal(x[m], np.array([[5, 6, 7, 8]])) assert_array_equal(x[m2], np.array([2, 5])) assert_array_equal(x[m3], np.array([2])) def test_assign_mask(self): x = np.array([1, 2, 3, 4]) m = np.array([0, 1, 0, 0], bool) x[m] = 5 assert_array_equal(x, np.array([1, 5, 3, 4])) def test_assign_mask2(self): xorig = np.array([[1, 2, 3, 4], [5, 6, 7, 8]]) m = np.array([0, 1], bool) m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool) m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool) x = xorig.copy() x[m] = 10 assert_array_equal(x, np.array([[1, 2, 3, 4], [10, 10, 10, 10]])) x = xorig.copy() x[m2] = 10 assert_array_equal(x, np.array([[1, 10, 3, 4], [10, 6, 7, 8]])) x = xorig.copy() x[m3] = 10 assert_array_equal(x, np.array([[1, 10, 3, 4], [5, 6, 7, 8]])) class TestStringCompare(object): def test_string(self): g1 = np.array(["This", "is", "example"]) g2 = np.array(["This", "was", "example"]) assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]]) assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]]) assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]]) assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]]) assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]]) assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]]) def test_mixed(self): g1 = np.array(["spam", "spa", "spammer", "and eggs"]) g2 = "spam" assert_array_equal(g1 == g2, [x == g2 for x in g1]) assert_array_equal(g1 != g2, [x != g2 for x in g1]) assert_array_equal(g1 < g2, [x < g2 for x in g1]) assert_array_equal(g1 > g2, [x > g2 for x in g1]) assert_array_equal(g1 <= g2, [x <= g2 for x in g1]) assert_array_equal(g1 >= g2, [x >= g2 for x in g1]) def test_unicode(self): g1 = np.array([u"This", u"is", u"example"]) g2 = np.array([u"This", u"was", u"example"]) assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]]) assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]]) assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]]) assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]]) assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]]) assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]]) class TestArgmax(object): nan_arr = [ ([0, 1, 2, 3, np.nan], 4), ([0, 1, 2, np.nan, 3], 3), ([np.nan, 0, 1, 2, 3], 0), ([np.nan, 0, np.nan, 2, 3], 0), ([0, 1, 2, 3, complex(0, np.nan)], 4), ([0, 1, 2, 3, complex(np.nan, 0)], 4), ([0, 1, 2, complex(np.nan, 0), 3], 3), ([0, 1, 2, complex(0, np.nan), 3], 3), ([complex(0, np.nan), 0, 1, 2, 3], 0), ([complex(np.nan, np.nan), 0, 1, 2, 3], 0), ([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0), ([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0), ([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0), ([complex(0, 0), complex(0, 2), complex(0, 1)], 1), ([complex(1, 0), complex(0, 2), complex(0, 1)], 0), ([complex(1, 0), complex(0, 2), complex(1, 1)], 2), ([np.datetime64('1923-04-14T12:43:12'), np.datetime64('1994-06-21T14:43:15'), np.datetime64('2001-10-15T04:10:32'), np.datetime64('1995-11-25T16:02:16'), np.datetime64('2005-01-04T03:14:12'), np.datetime64('2041-12-03T14:05:03')], 5), ([np.datetime64('1935-09-14T04:40:11'), np.datetime64('1949-10-12T12:32:11'), np.datetime64('2010-01-03T05:14:12'), np.datetime64('2015-11-20T12:20:59'), np.datetime64('1932-09-23T10:10:13'), np.datetime64('2014-10-10T03:50:30')], 3), # Assorted tests with NaTs ([np.datetime64('NaT'), np.datetime64('NaT'), np.datetime64('2010-01-03T05:14:12'), np.datetime64('NaT'), np.datetime64('2015-09-23T10:10:13'), np.datetime64('1932-10-10T03:50:30')], 4), ([np.datetime64('2059-03-14T12:43:12'), np.datetime64('1996-09-21T14:43:15'), np.datetime64('NaT'), np.datetime64('2022-12-25T16:02:16'), np.datetime64('1963-10-04T03:14:12'), np.datetime64('2013-05-08T18:15:23')], 0), ([np.timedelta64(2, 's'), np.timedelta64(1, 's'), np.timedelta64('NaT', 's'), np.timedelta64(3, 's')], 3), ([np.timedelta64('NaT', 's')] * 3, 0), ([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35), timedelta(days=-1, seconds=23)], 0), ([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5), timedelta(days=5, seconds=14)], 1), ([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5), timedelta(days=10, seconds=43)], 2), ([False, False, False, False, True], 4), ([False, False, False, True, False], 3), ([True, False, False, False, False], 0), ([True, False, True, False, False], 0), ] def test_all(self): a = np.random.normal(0, 1, (4, 5, 6, 7, 8)) for i in range(a.ndim): amax = a.max(i) aargmax = a.argmax(i) axes = list(range(a.ndim)) axes.remove(i) assert_(np.all(amax == aargmax.choose(*a.transpose(i,*axes)))) def test_combinations(self): for arr, pos in self.nan_arr: with suppress_warnings() as sup: sup.filter(RuntimeWarning, "invalid value encountered in reduce") max_val = np.max(arr) assert_equal(np.argmax(arr), pos, err_msg="%r" % arr) assert_equal(arr[np.argmax(arr)], max_val, err_msg="%r" % arr) def test_output_shape(self): # see also gh-616 a = np.ones((10, 5)) # Check some simple shape mismatches out = np.ones(11, dtype=np.int_) assert_raises(ValueError, a.argmax, -1, out) out = np.ones((2, 5), dtype=np.int_) assert_raises(ValueError, a.argmax, -1, out) # these could be relaxed possibly (used to allow even the previous) out = np.ones((1, 10), dtype=np.int_) assert_raises(ValueError, a.argmax, -1, out) out = np.ones(10, dtype=np.int_) a.argmax(-1, out=out) assert_equal(out, a.argmax(-1)) def test_argmax_unicode(self): d = np.zeros(6031, dtype='<U9') d[5942] = "as" assert_equal(d.argmax(), 5942) def test_np_vs_ndarray(self): # make sure both ndarray.argmax and numpy.argmax support out/axis args a = np.random.normal(size=(2,3)) # check positional args out1 = np.zeros(2, dtype=int) out2 = np.zeros(2, dtype=int) assert_equal(a.argmax(1, out1), np.argmax(a, 1, out2)) assert_equal(out1, out2) # check keyword args out1 = np.zeros(3, dtype=int) out2 = np.zeros(3, dtype=int) assert_equal(a.argmax(out=out1, axis=0), np.argmax(a, out=out2, axis=0)) assert_equal(out1, out2) def test_object_argmax_with_NULLs(self): # See gh-6032 a = np.empty(4, dtype='O') ctypes.memset(a.ctypes.data, 0, a.nbytes) assert_equal(a.argmax(), 0) a[3] = 10 assert_equal(a.argmax(), 3) a[1] = 30 assert_equal(a.argmax(), 1) class TestArgmin(object): nan_arr = [ ([0, 1, 2, 3, np.nan], 4), ([0, 1, 2, np.nan, 3], 3), ([np.nan, 0, 1, 2, 3], 0), ([np.nan, 0, np.nan, 2, 3], 0), ([0, 1, 2, 3, complex(0, np.nan)], 4), ([0, 1, 2, 3, complex(np.nan, 0)], 4), ([0, 1, 2, complex(np.nan, 0), 3], 3), ([0, 1, 2, complex(0, np.nan), 3], 3), ([complex(0, np.nan), 0, 1, 2, 3], 0), ([complex(np.nan, np.nan), 0, 1, 2, 3], 0), ([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0), ([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0), ([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0), ([complex(0, 0), complex(0, 2), complex(0, 1)], 0), ([complex(1, 0), complex(0, 2), complex(0, 1)], 2), ([complex(1, 0), complex(0, 2), complex(1, 1)], 1), ([np.datetime64('1923-04-14T12:43:12'), np.datetime64('1994-06-21T14:43:15'), np.datetime64('2001-10-15T04:10:32'), np.datetime64('1995-11-25T16:02:16'), np.datetime64('2005-01-04T03:14:12'), np.datetime64('2041-12-03T14:05:03')], 0), ([np.datetime64('1935-09-14T04:40:11'), np.datetime64('1949-10-12T12:32:11'), np.datetime64('2010-01-03T05:14:12'), np.datetime64('2014-11-20T12:20:59'), np.datetime64('2015-09-23T10:10:13'), np.datetime64('1932-10-10T03:50:30')], 5), # Assorted tests with NaTs ([np.datetime64('NaT'), np.datetime64('NaT'), np.datetime64('2010-01-03T05:14:12'), np.datetime64('NaT'), np.datetime64('2015-09-23T10:10:13'), np.datetime64('1932-10-10T03:50:30')], 5), ([np.datetime64('2059-03-14T12:43:12'), np.datetime64('1996-09-21T14:43:15'), np.datetime64('NaT'), np.datetime64('2022-12-25T16:02:16'), np.datetime64('1963-10-04T03:14:12'), np.datetime64('2013-05-08T18:15:23')], 4), ([np.timedelta64(2, 's'), np.timedelta64(1, 's'), np.timedelta64('NaT', 's'), np.timedelta64(3, 's')], 1), ([np.timedelta64('NaT', 's')] * 3, 0), ([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35), timedelta(days=-1, seconds=23)], 2), ([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5), timedelta(days=5, seconds=14)], 0), ([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5), timedelta(days=10, seconds=43)], 1), ([True, True, True, True, False], 4), ([True, True, True, False, True], 3), ([False, True, True, True, True], 0), ([False, True, False, True, True], 0), ] def test_all(self): a = np.random.normal(0, 1, (4, 5, 6, 7, 8)) for i in range(a.ndim): amin = a.min(i) aargmin = a.argmin(i) axes = list(range(a.ndim)) axes.remove(i) assert_(np.all(amin == aargmin.choose(*a.transpose(i,*axes)))) def test_combinations(self): for arr, pos in self.nan_arr: with suppress_warnings() as sup: sup.filter(RuntimeWarning, "invalid value encountered in reduce") min_val = np.min(arr) assert_equal(np.argmin(arr), pos, err_msg="%r" % arr) assert_equal(arr[np.argmin(arr)], min_val, err_msg="%r" % arr) def test_minimum_signed_integers(self): a = np.array([1, -2**7, -2**7 + 1], dtype=np.int8) assert_equal(np.argmin(a), 1) a = np.array([1, -2**15, -2**15 + 1], dtype=np.int16) assert_equal(np.argmin(a), 1) a = np.array([1, -2**31, -2**31 + 1], dtype=np.int32) assert_equal(np.argmin(a), 1) a = np.array([1, -2**63, -2**63 + 1], dtype=np.int64) assert_equal(np.argmin(a), 1) def test_output_shape(self): # see also gh-616 a = np.ones((10, 5)) # Check some simple shape mismatches out = np.ones(11, dtype=np.int_) assert_raises(ValueError, a.argmin, -1, out) out = np.ones((2, 5), dtype=np.int_) assert_raises(ValueError, a.argmin, -1, out) # these could be relaxed possibly (used to allow even the previous) out = np.ones((1, 10), dtype=np.int_) assert_raises(ValueError, a.argmin, -1, out) out = np.ones(10, dtype=np.int_) a.argmin(-1, out=out) assert_equal(out, a.argmin(-1)) def test_argmin_unicode(self): d = np.ones(6031, dtype='<U9') d[6001] = "0" assert_equal(d.argmin(), 6001) def test_np_vs_ndarray(self): # make sure both ndarray.argmin and numpy.argmin support out/axis args a = np.random.normal(size=(2, 3)) # check positional args out1 = np.zeros(2, dtype=int) out2 = np.ones(2, dtype=int) assert_equal(a.argmin(1, out1), np.argmin(a, 1, out2)) assert_equal(out1, out2) # check keyword args out1 = np.zeros(3, dtype=int) out2 = np.ones(3, dtype=int) assert_equal(a.argmin(out=out1, axis=0), np.argmin(a, out=out2, axis=0)) assert_equal(out1, out2) def test_object_argmin_with_NULLs(self): # See gh-6032 a = np.empty(4, dtype='O') ctypes.memset(a.ctypes.data, 0, a.nbytes) assert_equal(a.argmin(), 0) a[3] = 30 assert_equal(a.argmin(), 3) a[1] = 10 assert_equal(a.argmin(), 1) class TestMinMax(object): def test_scalar(self): assert_raises(np.AxisError, np.amax, 1, 1) assert_raises(np.AxisError, np.amin, 1, 1) assert_equal(np.amax(1, axis=0), 1) assert_equal(np.amin(1, axis=0), 1) assert_equal(np.amax(1, axis=None), 1) assert_equal(np.amin(1, axis=None), 1) def test_axis(self): assert_raises(np.AxisError, np.amax, [1, 2, 3], 1000) assert_equal(np.amax([[1, 2, 3]], axis=1), 3) def test_datetime(self): # NaTs are ignored for dtype in ('m8[s]', 'm8[Y]'): a = np.arange(10).astype(dtype) a[3] = 'NaT' assert_equal(np.amin(a), a[0]) assert_equal(np.amax(a), a[9]) a[0] = 'NaT' assert_equal(np.amin(a), a[1]) assert_equal(np.amax(a), a[9]) a.fill('NaT') assert_equal(np.amin(a), a[0]) assert_equal(np.amax(a), a[0]) class TestNewaxis(object): def test_basic(self): sk = np.array([0, -0.1, 0.1]) res = 250*sk[:, np.newaxis] assert_almost_equal(res.ravel(), 250*sk) class TestClip(object): def _check_range(self, x, cmin, cmax): assert_(np.all(x >= cmin)) assert_(np.all(x <= cmax)) def _clip_type(self, type_group, array_max, clip_min, clip_max, inplace=False, expected_min=None, expected_max=None): if expected_min is None: expected_min = clip_min if expected_max is None: expected_max = clip_max for T in np.sctypes[type_group]: if sys.byteorder == 'little': byte_orders = ['=', '>'] else: byte_orders = ['<', '='] for byteorder in byte_orders: dtype = np.dtype(T).newbyteorder(byteorder) x = (np.random.random(1000) * array_max).astype(dtype) if inplace: x.clip(clip_min, clip_max, x) else: x = x.clip(clip_min, clip_max) byteorder = '=' if x.dtype.byteorder == '|': byteorder = '|' assert_equal(x.dtype.byteorder, byteorder) self._check_range(x, expected_min, expected_max) return x def test_basic(self): for inplace in [False, True]: self._clip_type( 'float', 1024, -12.8, 100.2, inplace=inplace) self._clip_type( 'float', 1024, 0, 0, inplace=inplace) self._clip_type( 'int', 1024, -120, 100.5, inplace=inplace) self._clip_type( 'int', 1024, 0, 0, inplace=inplace) self._clip_type( 'uint', 1024, 0, 0, inplace=inplace) self._clip_type( 'uint', 1024, -120, 100, inplace=inplace, expected_min=0) def test_record_array(self): rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)], dtype=[('x', '<f8'), ('y', '<f8'), ('z', '<f8')]) y = rec['x'].clip(-0.3, 0.5) self._check_range(y, -0.3, 0.5) def test_max_or_min(self): val = np.array([0, 1, 2, 3, 4, 5, 6, 7]) x = val.clip(3) assert_(np.all(x >= 3)) x = val.clip(min=3) assert_(np.all(x >= 3)) x = val.clip(max=4) assert_(np.all(x <= 4)) def test_nan(self): input_arr = np.array([-2., np.nan, 0.5, 3., 0.25, np.nan]) result = input_arr.clip(-1, 1) expected = np.array([-1., np.nan, 0.5, 1., 0.25, np.nan]) assert_array_equal(result, expected) class TestCompress(object): def test_axis(self): tgt = [[5, 6, 7, 8, 9]] arr = np.arange(10).reshape(2, 5) out = np.compress([0, 1], arr, axis=0) assert_equal(out, tgt) tgt = [[1, 3], [6, 8]] out = np.compress([0, 1, 0, 1, 0], arr, axis=1) assert_equal(out, tgt) def test_truncate(self): tgt = [[1], [6]] arr = np.arange(10).reshape(2, 5) out = np.compress([0, 1], arr, axis=1) assert_equal(out, tgt) def test_flatten(self): arr = np.arange(10).reshape(2, 5) out = np.compress([0, 1], arr) assert_equal(out, 1) class TestPutmask(object): def tst_basic(self, x, T, mask, val): np.putmask(x, mask, val) assert_equal(x[mask], T(val)) assert_equal(x.dtype, T) def test_ip_types(self): unchecked_types = [bytes, unicode, np.void, object] x = np.random.random(1000)*100 mask = x < 40 for val in [-100, 0, 15]: for types in np.sctypes.values(): for T in types: if T not in unchecked_types: yield self.tst_basic, x.copy().astype(T), T, mask, val def test_mask_size(self): assert_raises(ValueError, np.putmask, np.array([1, 2, 3]), [True], 5) def tst_byteorder(self, dtype): x = np.array([1, 2, 3], dtype) np.putmask(x, [True, False, True], -1) assert_array_equal(x, [-1, 2, -1]) def test_ip_byteorder(self): for dtype in ('>i4', '<i4'): yield self.tst_byteorder, dtype def test_record_array(self): # Note mixed byteorder. rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)], dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')]) np.putmask(rec['x'], [True, False], 10) assert_array_equal(rec['x'], [10, 5]) assert_array_equal(rec['y'], [2, 4]) assert_array_equal(rec['z'], [3, 3]) np.putmask(rec['y'], [True, False], 11) assert_array_equal(rec['x'], [10, 5]) assert_array_equal(rec['y'], [11, 4]) assert_array_equal(rec['z'], [3, 3]) class TestTake(object): def tst_basic(self, x): ind = list(range(x.shape[0])) assert_array_equal(x.take(ind, axis=0), x) def test_ip_types(self): unchecked_types = [bytes, unicode, np.void, object] x = np.random.random(24)*100 x.shape = 2, 3, 4 for types in np.sctypes.values(): for T in types: if T not in unchecked_types: yield self.tst_basic, x.copy().astype(T) def test_raise(self): x = np.random.random(24)*100 x.shape = 2, 3, 4 assert_raises(IndexError, x.take, [0, 1, 2], axis=0) assert_raises(IndexError, x.take, [-3], axis=0) assert_array_equal(x.take([-1], axis=0)[0], x[1]) def test_clip(self): x = np.random.random(24)*100 x.shape = 2, 3, 4 assert_array_equal(x.take([-1], axis=0, mode='clip')[0], x[0]) assert_array_equal(x.take([2], axis=0, mode='clip')[0], x[1]) def test_wrap(self): x = np.random.random(24)*100 x.shape = 2, 3, 4 assert_array_equal(x.take([-1], axis=0, mode='wrap')[0], x[1]) assert_array_equal(x.take([2], axis=0, mode='wrap')[0], x[0]) assert_array_equal(x.take([3], axis=0, mode='wrap')[0], x[1]) def tst_byteorder(self, dtype): x = np.array([1, 2, 3], dtype) assert_array_equal(x.take([0, 2, 1]), [1, 3, 2]) def test_ip_byteorder(self): for dtype in ('>i4', '<i4'): yield self.tst_byteorder, dtype def test_record_array(self): # Note mixed byteorder. rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)], dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')]) rec1 = rec.take([1]) assert_(rec1['x'] == 5.0 and rec1['y'] == 4.0) class TestLexsort(object): def test_basic(self): a = [1, 2, 1, 3, 1, 5] b = [0, 4, 5, 6, 2, 3] idx = np.lexsort((b, a)) expected_idx = np.array([0, 4, 2, 1, 3, 5]) assert_array_equal(idx, expected_idx) x = np.vstack((b, a)) idx = np.lexsort(x) assert_array_equal(idx, expected_idx) assert_array_equal(x[1][idx], np.sort(x[1])) def test_datetime(self): a = np.array([0,0,0], dtype='datetime64[D]') b = np.array([2,1,0], dtype='datetime64[D]') idx = np.lexsort((b, a)) expected_idx = np.array([2, 1, 0]) assert_array_equal(idx, expected_idx) a = np.array([0,0,0], dtype='timedelta64[D]') b = np.array([2,1,0], dtype='timedelta64[D]') idx = np.lexsort((b, a)) expected_idx = np.array([2, 1, 0]) assert_array_equal(idx, expected_idx) def test_object(self): # gh-6312 a = np.random.choice(10, 1000) b = np.random.choice(['abc', 'xy', 'wz', 'efghi', 'qwst', 'x'], 1000) for u in a, b: left = np.lexsort((u.astype('O'),)) right = np.argsort(u, kind='mergesort') assert_array_equal(left, right) for u, v in (a, b), (b, a): idx = np.lexsort((u, v)) assert_array_equal(idx, np.lexsort((u.astype('O'), v))) assert_array_equal(idx, np.lexsort((u, v.astype('O')))) u, v = np.array(u, dtype='object'), np.array(v, dtype='object') assert_array_equal(idx, np.lexsort((u, v))) def test_invalid_axis(self): # gh-7528 x = np.linspace(0., 1., 42*3).reshape(42, 3) assert_raises(np.AxisError, np.lexsort, x, axis=2) class TestIO(object): """Test tofile, fromfile, tobytes, and fromstring""" def setup(self): shape = (2, 4, 3) rand = np.random.random self.x = rand(shape) + rand(shape).astype(complex)*1j self.x[0,:, 1] = [np.nan, np.inf, -np.inf, np.nan] self.dtype = self.x.dtype self.tempdir = tempfile.mkdtemp() self.filename = tempfile.mktemp(dir=self.tempdir) def teardown(self): shutil.rmtree(self.tempdir) def test_nofile(self): # this should probably be supported as a file # but for now test for proper errors b = io.BytesIO() assert_raises(IOError, np.fromfile, b, np.uint8, 80) d = np.ones(7) assert_raises(IOError, lambda x: x.tofile(b), d) def test_bool_fromstring(self): v = np.array([True, False, True, False], dtype=np.bool_) y = np.fromstring('1 0 -2.3 0.0', sep=' ', dtype=np.bool_) assert_array_equal(v, y) def test_uint64_fromstring(self): d = np.fromstring("9923372036854775807 104783749223640", dtype=np.uint64, sep=' ') e = np.array([9923372036854775807, 104783749223640], dtype=np.uint64) assert_array_equal(d, e) def test_int64_fromstring(self): d = np.fromstring("-25041670086757 104783749223640", dtype=np.int64, sep=' ') e = np.array([-25041670086757, 104783749223640], dtype=np.int64) assert_array_equal(d, e) def test_empty_files_binary(self): f = open(self.filename, 'w') f.close() y = np.fromfile(self.filename) assert_(y.size == 0, "Array not empty") def test_empty_files_text(self): f = open(self.filename, 'w') f.close() y = np.fromfile(self.filename, sep=" ") assert_(y.size == 0, "Array not empty") def test_roundtrip_file(self): f = open(self.filename, 'wb') self.x.tofile(f) f.close() # NB. doesn't work with flush+seek, due to use of C stdio f = open(self.filename, 'rb') y = np.fromfile(f, dtype=self.dtype) f.close() assert_array_equal(y, self.x.flat) def test_roundtrip_filename(self): self.x.tofile(self.filename) y = np.fromfile(self.filename, dtype=self.dtype) assert_array_equal(y, self.x.flat) def test_roundtrip_binary_str(self): s = self.x.tobytes() y = np.fromstring(s, dtype=self.dtype) assert_array_equal(y, self.x.flat) s = self.x.tobytes('F') y = np.fromstring(s, dtype=self.dtype) assert_array_equal(y, self.x.flatten('F')) def test_roundtrip_str(self): x = self.x.real.ravel() s = "@".join(map(str, x)) y = np.fromstring(s, sep="@") # NB. str imbues less precision nan_mask = ~np.isfinite(x) assert_array_equal(x[nan_mask], y[nan_mask]) assert_array_almost_equal(x[~nan_mask], y[~nan_mask], decimal=5) def test_roundtrip_repr(self): x = self.x.real.ravel() s = "@".join(map(repr, x)) y = np.fromstring(s, sep="@") assert_array_equal(x, y) def test_unseekable_fromfile(self): # gh-6246 self.x.tofile(self.filename) def fail(*args, **kwargs): raise IOError('Can not tell or seek') with io.open(self.filename, 'rb', buffering=0) as f: f.seek = fail f.tell = fail assert_raises(IOError, np.fromfile, f, dtype=self.dtype) def test_io_open_unbuffered_fromfile(self): # gh-6632 self.x.tofile(self.filename) with io.open(self.filename, 'rb', buffering=0) as f: y = np.fromfile(f, dtype=self.dtype) assert_array_equal(y, self.x.flat) def test_largish_file(self): # check the fallocate path on files > 16MB d = np.zeros(4 * 1024 ** 2) d.tofile(self.filename) assert_equal(os.path.getsize(self.filename), d.nbytes) assert_array_equal(d, np.fromfile(self.filename)) # check offset with open(self.filename, "r+b") as f: f.seek(d.nbytes) d.tofile(f) assert_equal(os.path.getsize(self.filename), d.nbytes * 2) # check append mode (gh-8329) open(self.filename, "w").close() # delete file contents with open(self.filename, "ab") as f: d.tofile(f) assert_array_equal(d, np.fromfile(self.filename)) with open(self.filename, "ab") as f: d.tofile(f) assert_equal(os.path.getsize(self.filename), d.nbytes * 2) def test_io_open_buffered_fromfile(self): # gh-6632 self.x.tofile(self.filename) with io.open(self.filename, 'rb', buffering=-1) as f: y = np.fromfile(f, dtype=self.dtype) assert_array_equal(y, self.x.flat) def test_file_position_after_fromfile(self): # gh-4118 sizes = [io.DEFAULT_BUFFER_SIZE//8, io.DEFAULT_BUFFER_SIZE, io.DEFAULT_BUFFER_SIZE*8] for size in sizes: f = open(self.filename, 'wb') f.seek(size-1) f.write(b'\0') f.close() for mode in ['rb', 'r+b']: err_msg = "%d %s" % (size, mode) f = open(self.filename, mode) f.read(2) np.fromfile(f, dtype=np.float64, count=1) pos = f.tell() f.close() assert_equal(pos, 10, err_msg=err_msg) def test_file_position_after_tofile(self): # gh-4118 sizes = [io.DEFAULT_BUFFER_SIZE//8, io.DEFAULT_BUFFER_SIZE, io.DEFAULT_BUFFER_SIZE*8] for size in sizes: err_msg = "%d" % (size,) f = open(self.filename, 'wb') f.seek(size-1) f.write(b'\0') f.seek(10) f.write(b'12') np.array([0], dtype=np.float64).tofile(f) pos = f.tell() f.close() assert_equal(pos, 10 + 2 + 8, err_msg=err_msg) f = open(self.filename, 'r+b') f.read(2) f.seek(0, 1) # seek between read&write required by ANSI C np.array([0], dtype=np.float64).tofile(f) pos = f.tell() f.close() assert_equal(pos, 10, err_msg=err_msg) def _check_from(self, s, value, **kw): y = np.fromstring(s, **kw) assert_array_equal(y, value) f = open(self.filename, 'wb') f.write(s) f.close() y = np.fromfile(self.filename, **kw) assert_array_equal(y, value) def test_nan(self): self._check_from( b"nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)", [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], sep=' ') def test_inf(self): self._check_from( b"inf +inf -inf infinity -Infinity iNfInItY -inF", [np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf], sep=' ') def test_numbers(self): self._check_from(b"1.234 -1.234 .3 .3e55 -123133.1231e+133", [1.234, -1.234, .3, .3e55, -123133.1231e+133], sep=' ') def test_binary(self): self._check_from(b'\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@', np.array([1, 2, 3, 4]), dtype='<f4') @dec.slow # takes > 1 minute on mechanical hard drive def test_big_binary(self): """Test workarounds for 32-bit limited fwrite, fseek, and ftell calls in windows. These normally would hang doing something like this. See http://projects.scipy.org/numpy/ticket/1660""" if sys.platform != 'win32': return try: # before workarounds, only up to 2**32-1 worked fourgbplus = 2**32 + 2**16 testbytes = np.arange(8, dtype=np.int8) n = len(testbytes) flike = tempfile.NamedTemporaryFile() f = flike.file np.tile(testbytes, fourgbplus // testbytes.nbytes).tofile(f) flike.seek(0) a = np.fromfile(f, dtype=np.int8) flike.close() assert_(len(a) == fourgbplus) # check only start and end for speed: assert_((a[:n] == testbytes).all()) assert_((a[-n:] == testbytes).all()) except (MemoryError, ValueError): pass def test_string(self): self._check_from(b'1,2,3,4', [1., 2., 3., 4.], sep=',') def test_counted_string(self): self._check_from(b'1,2,3,4', [1., 2., 3., 4.], count=4, sep=',') self._check_from(b'1,2,3,4', [1., 2., 3.], count=3, sep=',') self._check_from(b'1,2,3,4', [1., 2., 3., 4.], count=-1, sep=',') def test_string_with_ws(self): self._check_from(b'1 2 3 4 ', [1, 2, 3, 4], dtype=int, sep=' ') def test_counted_string_with_ws(self): self._check_from(b'1 2 3 4 ', [1, 2, 3], count=3, dtype=int, sep=' ') def test_ascii(self): self._check_from(b'1 , 2 , 3 , 4', [1., 2., 3., 4.], sep=',') self._check_from(b'1,2,3,4', [1., 2., 3., 4.], dtype=float, sep=',') def test_malformed(self): self._check_from(b'1.234 1,234', [1.234, 1.], sep=' ') def test_long_sep(self): self._check_from(b'1_x_3_x_4_x_5', [1, 3, 4, 5], sep='_x_') def test_dtype(self): v = np.array([1, 2, 3, 4], dtype=np.int_) self._check_from(b'1,2,3,4', v, sep=',', dtype=np.int_) def test_dtype_bool(self): # can't use _check_from because fromstring can't handle True/False v = np.array([True, False, True, False], dtype=np.bool_) s = b'1,0,-2.3,0' f = open(self.filename, 'wb') f.write(s) f.close() y = np.fromfile(self.filename, sep=',', dtype=np.bool_) assert_(y.dtype == '?') assert_array_equal(y, v) def test_tofile_sep(self): x = np.array([1.51, 2, 3.51, 4], dtype=float) f = open(self.filename, 'w') x.tofile(f, sep=',') f.close() f = open(self.filename, 'r') s = f.read() f.close() #assert_equal(s, '1.51,2.0,3.51,4.0') y = np.array([float(p) for p in s.split(',')]) assert_array_equal(x,y) def test_tofile_format(self): x = np.array([1.51, 2, 3.51, 4], dtype=float) f = open(self.filename, 'w') x.tofile(f, sep=',', format='%.2f') f.close() f = open(self.filename, 'r') s = f.read() f.close() assert_equal(s, '1.51,2.00,3.51,4.00') def test_locale(self): in_foreign_locale(self.test_numbers)() in_foreign_locale(self.test_nan)() in_foreign_locale(self.test_inf)() in_foreign_locale(self.test_counted_string)() in_foreign_locale(self.test_ascii)() in_foreign_locale(self.test_malformed)() in_foreign_locale(self.test_tofile_sep)() in_foreign_locale(self.test_tofile_format)() class TestFromBuffer(object): def tst_basic(self, buffer, expected, kwargs): assert_array_equal(np.frombuffer(buffer,**kwargs), expected) def test_ip_basic(self): for byteorder in ['<', '>']: for dtype in [float, int, complex]: dt = np.dtype(dtype).newbyteorder(byteorder) x = (np.random.random((4, 7))*5).astype(dt) buf = x.tobytes() yield self.tst_basic, buf, x.flat, {'dtype':dt} def test_empty(self): yield self.tst_basic, b'', np.array([]), {} class TestFlat(object): def setup(self): a0 = np.arange(20.0) a = a0.reshape(4, 5) a0.shape = (4, 5) a.flags.writeable = False self.a = a self.b = a[::2, ::2] self.a0 = a0 self.b0 = a0[::2, ::2] def test_contiguous(self): testpassed = False try: self.a.flat[12] = 100.0 except ValueError: testpassed = True assert_(testpassed) assert_(self.a.flat[12] == 12.0) def test_discontiguous(self): testpassed = False try: self.b.flat[4] = 100.0 except ValueError: testpassed = True assert_(testpassed) assert_(self.b.flat[4] == 12.0) def test___array__(self): c = self.a.flat.__array__() d = self.b.flat.__array__() e = self.a0.flat.__array__() f = self.b0.flat.__array__() assert_(c.flags.writeable is False) assert_(d.flags.writeable is False) assert_(e.flags.writeable is True) assert_(f.flags.writeable is True) assert_(c.flags.updateifcopy is False) assert_(d.flags.updateifcopy is False) assert_(e.flags.updateifcopy is False) assert_(f.flags.updateifcopy is True) assert_(f.base is self.b0) class TestResize(object): def test_basic(self): x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) if IS_PYPY: x.resize((5, 5), refcheck=False) else: x.resize((5, 5)) assert_array_equal(x.flat[:9], np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).flat) assert_array_equal(x[9:].flat, 0) def test_check_reference(self): x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) y = x assert_raises(ValueError, x.resize, (5, 1)) del y # avoid pyflakes unused variable warning. def test_int_shape(self): x = np.eye(3) if IS_PYPY: x.resize(3, refcheck=False) else: x.resize(3) assert_array_equal(x, np.eye(3)[0,:]) def test_none_shape(self): x = np.eye(3) x.resize(None) assert_array_equal(x, np.eye(3)) x.resize() assert_array_equal(x, np.eye(3)) def test_0d_shape(self): # to it multiple times to test it does not break alloc cache gh-9216 for i in range(10): x = np.empty((1,)) x.resize(()) assert_equal(x.shape, ()) assert_equal(x.size, 1) x = np.empty(()) x.resize((1,)) assert_equal(x.shape, (1,)) assert_equal(x.size, 1) def test_invalid_arguments(self): assert_raises(TypeError, np.eye(3).resize, 'hi') assert_raises(ValueError, np.eye(3).resize, -1) assert_raises(TypeError, np.eye(3).resize, order=1) assert_raises(TypeError, np.eye(3).resize, refcheck='hi') def test_freeform_shape(self): x = np.eye(3) if IS_PYPY: x.resize(3, 2, 1, refcheck=False) else: x.resize(3, 2, 1) assert_(x.shape == (3, 2, 1)) def test_zeros_appended(self): x = np.eye(3) if IS_PYPY: x.resize(2, 3, 3, refcheck=False) else: x.resize(2, 3, 3) assert_array_equal(x[0], np.eye(3)) assert_array_equal(x[1], np.zeros((3, 3))) def test_obj_obj(self): # check memory is initialized on resize, gh-4857 a = np.ones(10, dtype=[('k', object, 2)]) if IS_PYPY: a.resize(15, refcheck=False) else: a.resize(15,) assert_equal(a.shape, (15,)) assert_array_equal(a['k'][-5:], 0) assert_array_equal(a['k'][:-5], 1) class TestRecord(object): def test_field_rename(self): dt = np.dtype([('f', float), ('i', int)]) dt.names = ['p', 'q'] assert_equal(dt.names, ['p', 'q']) def test_multiple_field_name_occurrence(self): def test_assign(): dtype = np.dtype([("A", "f8"), ("B", "f8"), ("A", "f8")]) # Error raised when multiple fields have the same name assert_raises(ValueError, test_assign) if sys.version_info[0] >= 3: def test_bytes_fields(self): # Bytes are not allowed in field names and not recognized in titles # on Py3 assert_raises(TypeError, np.dtype, [(b'a', int)]) assert_raises(TypeError, np.dtype, [(('b', b'a'), int)]) dt = np.dtype([((b'a', 'b'), int)]) assert_raises(ValueError, dt.__getitem__, b'a') x = np.array([(1,), (2,), (3,)], dtype=dt) assert_raises(IndexError, x.__getitem__, b'a') y = x[0] assert_raises(IndexError, y.__getitem__, b'a') def test_multiple_field_name_unicode(self): def test_assign_unicode(): dt = np.dtype([("\u20B9", "f8"), ("B", "f8"), ("\u20B9", "f8")]) # Error raised when multiple fields have the same name(unicode included) assert_raises(ValueError, test_assign_unicode) else: def test_unicode_field_titles(self): # Unicode field titles are added to field dict on Py2 title = u'b' dt = np.dtype([((title, 'a'), int)]) dt[title] dt['a'] x = np.array([(1,), (2,), (3,)], dtype=dt) x[title] x['a'] y = x[0] y[title] y['a'] def test_unicode_field_names(self): # Unicode field names are not allowed on Py2 title = u'b' assert_raises(TypeError, np.dtype, [(title, int)]) assert_raises(TypeError, np.dtype, [(('a', title), int)]) def test_field_names(self): # Test unicode and 8-bit / byte strings can be used a = np.zeros((1,), dtype=[('f1', 'i4'), ('f2', 'i4'), ('f3', [('sf1', 'i4')])]) is_py3 = sys.version_info[0] >= 3 if is_py3: funcs = (str,) # byte string indexing fails gracefully assert_raises(IndexError, a.__setitem__, b'f1', 1) assert_raises(IndexError, a.__getitem__, b'f1') assert_raises(IndexError, a['f1'].__setitem__, b'sf1', 1) assert_raises(IndexError, a['f1'].__getitem__, b'sf1') else: funcs = (str, unicode) for func in funcs: b = a.copy() fn1 = func('f1') b[fn1] = 1 assert_equal(b[fn1], 1) fnn = func('not at all') assert_raises(ValueError, b.__setitem__, fnn, 1) assert_raises(ValueError, b.__getitem__, fnn) b[0][fn1] = 2 assert_equal(b[fn1], 2) # Subfield assert_raises(ValueError, b[0].__setitem__, fnn, 1) assert_raises(ValueError, b[0].__getitem__, fnn) # Subfield fn3 = func('f3') sfn1 = func('sf1') b[fn3][sfn1] = 1 assert_equal(b[fn3][sfn1], 1) assert_raises(ValueError, b[fn3].__setitem__, fnn, 1) assert_raises(ValueError, b[fn3].__getitem__, fnn) # multiple subfields fn2 = func('f2') b[fn2] = 3 with suppress_warnings() as sup: sup.filter(FutureWarning, "Assignment between structured arrays.*") sup.filter(FutureWarning, "Numpy has detected that you .*") assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3)) assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2)) assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,))) # view of subfield view/copy assert_equal(b[['f1', 'f2']][0].view(('i4', 2)).tolist(), (2, 3)) assert_equal(b[['f2', 'f1']][0].view(('i4', 2)).tolist(), (3, 2)) view_dtype = [('f1', 'i4'), ('f3', [('', 'i4')])] assert_equal(b[['f1', 'f3']][0].view(view_dtype).tolist(), (2, (1,))) # non-ascii unicode field indexing is well behaved if not is_py3: raise SkipTest('non ascii unicode field indexing skipped; ' 'raises segfault on python 2.x') else: assert_raises(ValueError, a.__setitem__, u'\u03e0', 1) assert_raises(ValueError, a.__getitem__, u'\u03e0') def test_field_names_deprecation(self): def collect_warnings(f, *args, **kwargs): with warnings.catch_warnings(record=True) as log: warnings.simplefilter("always") f(*args, **kwargs) return [w.category for w in log] a = np.zeros((1,), dtype=[('f1', 'i4'), ('f2', 'i4'), ('f3', [('sf1', 'i4')])]) a['f1'][0] = 1 a['f2'][0] = 2 a['f3'][0] = (3,) b = np.zeros((1,), dtype=[('f1', 'i4'), ('f2', 'i4'), ('f3', [('sf1', 'i4')])]) b['f1'][0] = 1 b['f2'][0] = 2 b['f3'][0] = (3,) # All the different functions raise a warning, but not an error assert_equal(collect_warnings(a[['f1', 'f2']].__setitem__, 0, (10, 20)), [FutureWarning]) # For <=1.12 a is not modified, but it will be in 1.13 assert_equal(a, b) # Views also warn subset = a[['f1', 'f2']] subset_view = subset.view() assert_equal(collect_warnings(subset_view['f1'].__setitem__, 0, 10), [FutureWarning]) # But the write goes through: assert_equal(subset['f1'][0], 10) # Only one warning per multiple field indexing, though (even if there # are multiple views involved): assert_equal(collect_warnings(subset['f1'].__setitem__, 0, 10), []) # make sure views of a multi-field index warn too c = np.zeros(3, dtype='i8,i8,i8') assert_equal(collect_warnings(c[['f0', 'f2']].view, 'i8,i8'), [FutureWarning]) # make sure assignment using a different dtype warns a = np.zeros(2, dtype=[('a', 'i4'), ('b', 'i4')]) b = np.zeros(2, dtype=[('b', 'i4'), ('a', 'i4')]) assert_equal(collect_warnings(a.__setitem__, (), b), [FutureWarning]) def test_record_hash(self): a = np.array([(1, 2), (1, 2)], dtype='i1,i2') a.flags.writeable = False b = np.array([(1, 2), (3, 4)], dtype=[('num1', 'i1'), ('num2', 'i2')]) b.flags.writeable = False c = np.array([(1, 2), (3, 4)], dtype='i1,i2') c.flags.writeable = False assert_(hash(a[0]) == hash(a[1])) assert_(hash(a[0]) == hash(b[0])) assert_(hash(a[0]) != hash(b[1])) assert_(hash(c[0]) == hash(a[0]) and c[0] == a[0]) def test_record_no_hash(self): a = np.array([(1, 2), (1, 2)], dtype='i1,i2') assert_raises(TypeError, hash, a[0]) def test_empty_structure_creation(self): # make sure these do not raise errors (gh-5631) np.array([()], dtype={'names': [], 'formats': [], 'offsets': [], 'itemsize': 12}) np.array([(), (), (), (), ()], dtype={'names': [], 'formats': [], 'offsets': [], 'itemsize': 12}) class TestView(object): def test_basic(self): x = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], dtype=[('r', np.int8), ('g', np.int8), ('b', np.int8), ('a', np.int8)]) # We must be specific about the endianness here: y = x.view(dtype='<i4') # ... and again without the keyword. z = x.view('<i4') assert_array_equal(y, z) assert_array_equal(y, [67305985, 134678021]) def _mean(a, **args): return a.mean(**args) def _var(a, **args): return a.var(**args) def _std(a, **args): return a.std(**args) class TestStats(object): funcs = [_mean, _var, _std] def setup(self): np.random.seed(range(3)) self.rmat = np.random.random((4, 5)) self.cmat = self.rmat + 1j * self.rmat self.omat = np.array([Decimal(repr(r)) for r in self.rmat.flat]) self.omat = self.omat.reshape(4, 5) def test_python_type(self): for x in (np.float16(1.), 1, 1., 1+0j): assert_equal(np.mean([x]), 1.) assert_equal(np.std([x]), 0.) assert_equal(np.var([x]), 0.) def test_keepdims(self): mat = np.eye(3) for f in self.funcs: for axis in [0, 1]: res = f(mat, axis=axis, keepdims=True) assert_(res.ndim == mat.ndim) assert_(res.shape[axis] == 1) for axis in [None]: res = f(mat, axis=axis, keepdims=True) assert_(res.shape == (1, 1)) def test_out(self): mat = np.eye(3) for f in self.funcs: out = np.zeros(3) tgt = f(mat, axis=1) res = f(mat, axis=1, out=out) assert_almost_equal(res, out) assert_almost_equal(res, tgt) out = np.empty(2) assert_raises(ValueError, f, mat, axis=1, out=out) out = np.empty((2, 2)) assert_raises(ValueError, f, mat, axis=1, out=out) def test_dtype_from_input(self): icodes = np.typecodes['AllInteger'] fcodes = np.typecodes['AllFloat'] # object type for f in self.funcs: mat = np.array([[Decimal(1)]*3]*3) tgt = mat.dtype.type res = f(mat, axis=1).dtype.type assert_(res is tgt) # scalar case res = type(f(mat, axis=None)) assert_(res is Decimal) # integer types for f in self.funcs: for c in icodes: mat = np.eye(3, dtype=c) tgt = np.float64 res = f(mat, axis=1).dtype.type assert_(res is tgt) # scalar case res = f(mat, axis=None).dtype.type assert_(res is tgt) # mean for float types for f in [_mean]: for c in fcodes: mat = np.eye(3, dtype=c) tgt = mat.dtype.type res = f(mat, axis=1).dtype.type assert_(res is tgt) # scalar case res = f(mat, axis=None).dtype.type assert_(res is tgt) # var, std for float types for f in [_var, _std]: for c in fcodes: mat = np.eye(3, dtype=c) # deal with complex types tgt = mat.real.dtype.type res = f(mat, axis=1).dtype.type assert_(res is tgt) # scalar case res = f(mat, axis=None).dtype.type assert_(res is tgt) def test_dtype_from_dtype(self): mat = np.eye(3) # stats for integer types # FIXME: # this needs definition as there are lots places along the line # where type casting may take place. # for f in self.funcs: # for c in np.typecodes['AllInteger']: # tgt = np.dtype(c).type # res = f(mat, axis=1, dtype=c).dtype.type # assert_(res is tgt) # # scalar case # res = f(mat, axis=None, dtype=c).dtype.type # assert_(res is tgt) # stats for float types for f in self.funcs: for c in np.typecodes['AllFloat']: tgt = np.dtype(c).type res = f(mat, axis=1, dtype=c).dtype.type assert_(res is tgt) # scalar case res = f(mat, axis=None, dtype=c).dtype.type assert_(res is tgt) def test_ddof(self): for f in [_var]: for ddof in range(3): dim = self.rmat.shape[1] tgt = f(self.rmat, axis=1) * dim res = f(self.rmat, axis=1, ddof=ddof) * (dim - ddof) for f in [_std]: for ddof in range(3): dim = self.rmat.shape[1] tgt = f(self.rmat, axis=1) * np.sqrt(dim) res = f(self.rmat, axis=1, ddof=ddof) * np.sqrt(dim - ddof) assert_almost_equal(res, tgt) assert_almost_equal(res, tgt) def test_ddof_too_big(self): dim = self.rmat.shape[1] for f in [_var, _std]: for ddof in range(dim, dim + 2): with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') res = f(self.rmat, axis=1, ddof=ddof) assert_(not (res < 0).any()) assert_(len(w) > 0) assert_(issubclass(w[0].category, RuntimeWarning)) def test_empty(self): A = np.zeros((0, 3)) for f in self.funcs: for axis in [0, None]: with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') assert_(np.isnan(f(A, axis=axis)).all()) assert_(len(w) > 0) assert_(issubclass(w[0].category, RuntimeWarning)) for axis in [1]: with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') assert_equal(f(A, axis=axis), np.zeros([])) def test_mean_values(self): for mat in [self.rmat, self.cmat, self.omat]: for axis in [0, 1]: tgt = mat.sum(axis=axis) res = _mean(mat, axis=axis) * mat.shape[axis] assert_almost_equal(res, tgt) for axis in [None]: tgt = mat.sum(axis=axis) res = _mean(mat, axis=axis) * np.prod(mat.shape) assert_almost_equal(res, tgt) def test_mean_float16(self): # This fail if the sum inside mean is done in float16 instead # of float32. assert_(_mean(np.ones(100000, dtype='float16')) == 1) def test_var_values(self): for mat in [self.rmat, self.cmat, self.omat]: for axis in [0, 1, None]: msqr = _mean(mat * mat.conj(), axis=axis) mean = _mean(mat, axis=axis) tgt = msqr - mean * mean.conjugate() res = _var(mat, axis=axis) assert_almost_equal(res, tgt) def test_std_values(self): for mat in [self.rmat, self.cmat, self.omat]: for axis in [0, 1, None]: tgt = np.sqrt(_var(mat, axis=axis)) res = _std(mat, axis=axis) assert_almost_equal(res, tgt) def test_subclass(self): class TestArray(np.ndarray): def __new__(cls, data, info): result = np.array(data) result = result.view(cls) result.info = info return result def __array_finalize__(self, obj): self.info = getattr(obj, "info", '') dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba') res = dat.mean(1) assert_(res.info == dat.info) res = dat.std(1) assert_(res.info == dat.info) res = dat.var(1) assert_(res.info == dat.info) class TestVdot(object): def test_basic(self): dt_numeric = np.typecodes['AllFloat'] + np.typecodes['AllInteger'] dt_complex = np.typecodes['Complex'] # test real a = np.eye(3) for dt in dt_numeric + 'O': b = a.astype(dt) res = np.vdot(b, b) assert_(np.isscalar(res)) assert_equal(np.vdot(b, b), 3) # test complex a = np.eye(3) * 1j for dt in dt_complex + 'O': b = a.astype(dt) res = np.vdot(b, b) assert_(np.isscalar(res)) assert_equal(np.vdot(b, b), 3) # test boolean b = np.eye(3, dtype=bool) res = np.vdot(b, b) assert_(np.isscalar(res)) assert_equal(np.vdot(b, b), True) def test_vdot_array_order(self): a = np.array([[1, 2], [3, 4]], order='C') b = np.array([[1, 2], [3, 4]], order='F') res = np.vdot(a, a) # integer arrays are exact assert_equal(np.vdot(a, b), res) assert_equal(np.vdot(b, a), res) assert_equal(np.vdot(b, b), res) def test_vdot_uncontiguous(self): for size in [2, 1000]: # Different sizes match different branches in vdot. a = np.zeros((size, 2, 2)) b = np.zeros((size, 2, 2)) a[:, 0, 0] = np.arange(size) b[:, 0, 0] = np.arange(size) + 1 # Make a and b uncontiguous: a = a[..., 0] b = b[..., 0] assert_equal(np.vdot(a, b), np.vdot(a.flatten(), b.flatten())) assert_equal(np.vdot(a, b.copy()), np.vdot(a.flatten(), b.flatten())) assert_equal(np.vdot(a.copy(), b), np.vdot(a.flatten(), b.flatten())) assert_equal(np.vdot(a.copy('F'), b), np.vdot(a.flatten(), b.flatten())) assert_equal(np.vdot(a, b.copy('F')), np.vdot(a.flatten(), b.flatten())) class TestDot(object): def setup(self): np.random.seed(128) self.A = np.random.rand(4, 2) self.b1 = np.random.rand(2, 1) self.b2 = np.random.rand(2) self.b3 = np.random.rand(1, 2) self.b4 = np.random.rand(4) self.N = 7 def test_dotmatmat(self): A = self.A res = np.dot(A.transpose(), A) tgt = np.array([[1.45046013, 0.86323640], [0.86323640, 0.84934569]]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotmatvec(self): A, b1 = self.A, self.b1 res = np.dot(A, b1) tgt = np.array([[0.32114320], [0.04889721], [0.15696029], [0.33612621]]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotmatvec2(self): A, b2 = self.A, self.b2 res = np.dot(A, b2) tgt = np.array([0.29677940, 0.04518649, 0.14468333, 0.31039293]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecmat(self): A, b4 = self.A, self.b4 res = np.dot(b4, A) tgt = np.array([1.23495091, 1.12222648]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecmat2(self): b3, A = self.b3, self.A res = np.dot(b3, A.transpose()) tgt = np.array([[0.58793804, 0.08957460, 0.30605758, 0.62716383]]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecmat3(self): A, b4 = self.A, self.b4 res = np.dot(A.transpose(), b4) tgt = np.array([1.23495091, 1.12222648]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecvecouter(self): b1, b3 = self.b1, self.b3 res = np.dot(b1, b3) tgt = np.array([[0.20128610, 0.08400440], [0.07190947, 0.03001058]]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecvecinner(self): b1, b3 = self.b1, self.b3 res = np.dot(b3, b1) tgt = np.array([[ 0.23129668]]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotcolumnvect1(self): b1 = np.ones((3, 1)) b2 = [5.3] res = np.dot(b1, b2) tgt = np.array([5.3, 5.3, 5.3]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotcolumnvect2(self): b1 = np.ones((3, 1)).transpose() b2 = [6.2] res = np.dot(b2, b1) tgt = np.array([6.2, 6.2, 6.2]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecscalar(self): np.random.seed(100) b1 = np.random.rand(1, 1) b2 = np.random.rand(1, 4) res = np.dot(b1, b2) tgt = np.array([[0.15126730, 0.23068496, 0.45905553, 0.00256425]]) assert_almost_equal(res, tgt, decimal=self.N) def test_dotvecscalar2(self): np.random.seed(100) b1 = np.random.rand(4, 1) b2 = np.random.rand(1, 1) res = np.dot(b1, b2) tgt = np.array([[0.00256425],[0.00131359],[0.00200324],[ 0.00398638]]) assert_almost_equal(res, tgt, decimal=self.N) def test_all(self): dims = [(), (1,), (1, 1)] dout = [(), (1,), (1, 1), (1,), (), (1,), (1, 1), (1,), (1, 1)] for dim, (dim1, dim2) in zip(dout, itertools.product(dims, dims)): b1 = np.zeros(dim1) b2 = np.zeros(dim2) res = np.dot(b1, b2) tgt = np.zeros(dim) assert_(res.shape == tgt.shape) assert_almost_equal(res, tgt, decimal=self.N) def test_vecobject(self): class Vec(object): def __init__(self, sequence=None): if sequence is None: sequence = [] self.array = np.array(sequence) def __add__(self, other): out = Vec() out.array = self.array + other.array return out def __sub__(self, other): out = Vec() out.array = self.array - other.array return out def __mul__(self, other): # with scalar out = Vec(self.array.copy()) out.array *= other return out def __rmul__(self, other): return self*other U_non_cont = np.transpose([[1., 1.], [1., 2.]]) U_cont = np.ascontiguousarray(U_non_cont) x = np.array([Vec([1., 0.]), Vec([0., 1.])]) zeros = np.array([Vec([0., 0.]), Vec([0., 0.])]) zeros_test = np.dot(U_cont, x) - np.dot(U_non_cont, x) assert_equal(zeros[0].array, zeros_test[0].array) assert_equal(zeros[1].array, zeros_test[1].array) def test_dot_2args(self): from numpy.core.multiarray import dot a = np.array([[1, 2], [3, 4]], dtype=float) b = np.array([[1, 0], [1, 1]], dtype=float) c = np.array([[3, 2], [7, 4]], dtype=float) d = dot(a, b) assert_allclose(c, d) def test_dot_3args(self): from numpy.core.multiarray import dot np.random.seed(22) f = np.random.random_sample((1024, 16)) v = np.random.random_sample((16, 32)) r = np.empty((1024, 32)) for i in range(12): dot(f, v, r) if HAS_REFCOUNT: assert_equal(sys.getrefcount(r), 2) r2 = dot(f, v, out=None) assert_array_equal(r2, r) assert_(r is dot(f, v, out=r)) v = v[:, 0].copy() # v.shape == (16,) r = r[:, 0].copy() # r.shape == (1024,) r2 = dot(f, v) assert_(r is dot(f, v, r)) assert_array_equal(r2, r) def test_dot_3args_errors(self): from numpy.core.multiarray import dot np.random.seed(22) f = np.random.random_sample((1024, 16)) v = np.random.random_sample((16, 32)) r = np.empty((1024, 31)) assert_raises(ValueError, dot, f, v, r) r = np.empty((1024,)) assert_raises(ValueError, dot, f, v, r) r = np.empty((32,)) assert_raises(ValueError, dot, f, v, r) r = np.empty((32, 1024)) assert_raises(ValueError, dot, f, v, r) assert_raises(ValueError, dot, f, v, r.T) r = np.empty((1024, 64)) assert_raises(ValueError, dot, f, v, r[:, ::2]) assert_raises(ValueError, dot, f, v, r[:, :32]) r = np.empty((1024, 32), dtype=np.float32) assert_raises(ValueError, dot, f, v, r) r = np.empty((1024, 32), dtype=int) assert_raises(ValueError, dot, f, v, r) def test_dot_array_order(self): a = np.array([[1, 2], [3, 4]], order='C') b = np.array([[1, 2], [3, 4]], order='F') res = np.dot(a, a) # integer arrays are exact assert_equal(np.dot(a, b), res) assert_equal(np.dot(b, a), res) assert_equal(np.dot(b, b), res) def test_dot_scalar_and_matrix_of_objects(self): # Ticket #2469 arr = np.matrix([1, 2], dtype=object) desired = np.matrix([[3, 6]], dtype=object) assert_equal(np.dot(arr, 3), desired) assert_equal(np.dot(3, arr), desired) def test_accelerate_framework_sgemv_fix(self): def aligned_array(shape, align, dtype, order='C'): d = dtype(0) N = np.prod(shape) tmp = np.zeros(N * d.nbytes + align, dtype=np.uint8) address = tmp.__array_interface__["data"][0] for offset in range(align): if (address + offset) % align == 0: break tmp = tmp[offset:offset+N*d.nbytes].view(dtype=dtype) return tmp.reshape(shape, order=order) def as_aligned(arr, align, dtype, order='C'): aligned = aligned_array(arr.shape, align, dtype, order) aligned[:] = arr[:] return aligned def assert_dot_close(A, X, desired): assert_allclose(np.dot(A, X), desired, rtol=1e-5, atol=1e-7) m = aligned_array(100, 15, np.float32) s = aligned_array((100, 100), 15, np.float32) np.dot(s, m) # this will always segfault if the bug is present testdata = itertools.product((15,32), (10000,), (200,89), ('C','F')) for align, m, n, a_order in testdata: # Calculation in double precision A_d = np.random.rand(m, n) X_d = np.random.rand(n) desired = np.dot(A_d, X_d) # Calculation with aligned single precision A_f = as_aligned(A_d, align, np.float32, order=a_order) X_f = as_aligned(X_d, align, np.float32) assert_dot_close(A_f, X_f, desired) # Strided A rows A_d_2 = A_d[::2] desired = np.dot(A_d_2, X_d) A_f_2 = A_f[::2] assert_dot_close(A_f_2, X_f, desired) # Strided A columns, strided X vector A_d_22 = A_d_2[:, ::2] X_d_2 = X_d[::2] desired = np.dot(A_d_22, X_d_2) A_f_22 = A_f_2[:, ::2] X_f_2 = X_f[::2] assert_dot_close(A_f_22, X_f_2, desired) # Check the strides are as expected if a_order == 'F': assert_equal(A_f_22.strides, (8, 8 * m)) else: assert_equal(A_f_22.strides, (8 * n, 8)) assert_equal(X_f_2.strides, (8,)) # Strides in A rows + cols only X_f_2c = as_aligned(X_f_2, align, np.float32) assert_dot_close(A_f_22, X_f_2c, desired) # Strides just in A cols A_d_12 = A_d[:, ::2] desired = np.dot(A_d_12, X_d_2) A_f_12 = A_f[:, ::2] assert_dot_close(A_f_12, X_f_2c, desired) # Strides in A cols and X assert_dot_close(A_f_12, X_f_2, desired) class MatmulCommon(object): """Common tests for '@' operator and numpy.matmul. Do not derive from TestCase to avoid nose running it. """ # Should work with these types. Will want to add # "O" at some point types = "?bhilqBHILQefdgFDG" def test_exceptions(self): dims = [ ((1,), (2,)), # mismatched vector vector ((2, 1,), (2,)), # mismatched matrix vector ((2,), (1, 2)), # mismatched vector matrix ((1, 2), (3, 1)), # mismatched matrix matrix ((1,), ()), # vector scalar ((), (1)), # scalar vector ((1, 1), ()), # matrix scalar ((), (1, 1)), # scalar matrix ((2, 2, 1), (3, 1, 2)), # cannot broadcast ] for dt, (dm1, dm2) in itertools.product(self.types, dims): a = np.ones(dm1, dtype=dt) b = np.ones(dm2, dtype=dt) assert_raises(ValueError, self.matmul, a, b) def test_shapes(self): dims = [ ((1, 1), (2, 1, 1)), # broadcast first argument ((2, 1, 1), (1, 1)), # broadcast second argument ((2, 1, 1), (2, 1, 1)), # matrix stack sizes match ] for dt, (dm1, dm2) in itertools.product(self.types, dims): a = np.ones(dm1, dtype=dt) b = np.ones(dm2, dtype=dt) res = self.matmul(a, b) assert_(res.shape == (2, 1, 1)) # vector vector returns scalars. for dt in self.types: a = np.ones((2,), dtype=dt) b = np.ones((2,), dtype=dt) c = self.matmul(a, b) assert_(np.array(c).shape == ()) def test_result_types(self): mat = np.ones((1,1)) vec = np.ones((1,)) for dt in self.types: m = mat.astype(dt) v = vec.astype(dt) for arg in [(m, v), (v, m), (m, m)]: res = self.matmul(*arg) assert_(res.dtype == dt) # vector vector returns scalars res = self.matmul(v, v) assert_(type(res) is np.dtype(dt).type) def test_vector_vector_values(self): vec = np.array([1, 2]) tgt = 5 for dt in self.types[1:]: v1 = vec.astype(dt) res = self.matmul(v1, v1) assert_equal(res, tgt) # boolean type vec = np.array([True, True], dtype='?') res = self.matmul(vec, vec) assert_equal(res, True) def test_vector_matrix_values(self): vec = np.array([1, 2]) mat1 = np.array([[1, 2], [3, 4]]) mat2 = np.stack([mat1]*2, axis=0) tgt1 = np.array([7, 10]) tgt2 = np.stack([tgt1]*2, axis=0) for dt in self.types[1:]: v = vec.astype(dt) m1 = mat1.astype(dt) m2 = mat2.astype(dt) res = self.matmul(v, m1) assert_equal(res, tgt1) res = self.matmul(v, m2) assert_equal(res, tgt2) # boolean type vec = np.array([True, False]) mat1 = np.array([[True, False], [False, True]]) mat2 = np.stack([mat1]*2, axis=0) tgt1 = np.array([True, False]) tgt2 = np.stack([tgt1]*2, axis=0) res = self.matmul(vec, mat1) assert_equal(res, tgt1) res = self.matmul(vec, mat2) assert_equal(res, tgt2) def test_matrix_vector_values(self): vec = np.array([1, 2]) mat1 = np.array([[1, 2], [3, 4]]) mat2 = np.stack([mat1]*2, axis=0) tgt1 = np.array([5, 11]) tgt2 = np.stack([tgt1]*2, axis=0) for dt in self.types[1:]: v = vec.astype(dt) m1 = mat1.astype(dt) m2 = mat2.astype(dt) res = self.matmul(m1, v) assert_equal(res, tgt1) res = self.matmul(m2, v) assert_equal(res, tgt2) # boolean type vec = np.array([True, False]) mat1 = np.array([[True, False], [False, True]]) mat2 = np.stack([mat1]*2, axis=0) tgt1 = np.array([True, False]) tgt2 = np.stack([tgt1]*2, axis=0) res = self.matmul(vec, mat1) assert_equal(res, tgt1) res = self.matmul(vec, mat2) assert_equal(res, tgt2) def test_matrix_matrix_values(self): mat1 = np.array([[1, 2], [3, 4]]) mat2 = np.array([[1, 0], [1, 1]]) mat12 = np.stack([mat1, mat2], axis=0) mat21 = np.stack([mat2, mat1], axis=0) tgt11 = np.array([[7, 10], [15, 22]]) tgt12 = np.array([[3, 2], [7, 4]]) tgt21 = np.array([[1, 2], [4, 6]]) tgt12_21 = np.stack([tgt12, tgt21], axis=0) tgt11_12 = np.stack((tgt11, tgt12), axis=0) tgt11_21 = np.stack((tgt11, tgt21), axis=0) for dt in self.types[1:]: m1 = mat1.astype(dt) m2 = mat2.astype(dt) m12 = mat12.astype(dt) m21 = mat21.astype(dt) # matrix @ matrix res = self.matmul(m1, m2) assert_equal(res, tgt12) res = self.matmul(m2, m1) assert_equal(res, tgt21) # stacked @ matrix res = self.matmul(m12, m1) assert_equal(res, tgt11_21) # matrix @ stacked res = self.matmul(m1, m12) assert_equal(res, tgt11_12) # stacked @ stacked res = self.matmul(m12, m21) assert_equal(res, tgt12_21) # boolean type m1 = np.array([[1, 1], [0, 0]], dtype=np.bool_) m2 = np.array([[1, 0], [1, 1]], dtype=np.bool_) m12 = np.stack([m1, m2], axis=0) m21 = np.stack([m2, m1], axis=0) tgt11 = m1 tgt12 = m1 tgt21 = np.array([[1, 1], [1, 1]], dtype=np.bool_) tgt12_21 = np.stack([tgt12, tgt21], axis=0) tgt11_12 = np.stack((tgt11, tgt12), axis=0) tgt11_21 = np.stack((tgt11, tgt21), axis=0) # matrix @ matrix res = self.matmul(m1, m2) assert_equal(res, tgt12) res = self.matmul(m2, m1) assert_equal(res, tgt21) # stacked @ matrix res = self.matmul(m12, m1) assert_equal(res, tgt11_21) # matrix @ stacked res = self.matmul(m1, m12) assert_equal(res, tgt11_12) # stacked @ stacked res = self.matmul(m12, m21) assert_equal(res, tgt12_21) class TestMatmul(MatmulCommon): matmul = np.matmul def test_out_arg(self): a = np.ones((2, 2), dtype=float) b = np.ones((2, 2), dtype=float) tgt = np.full((2,2), 2, dtype=float) # test as positional argument msg = "out positional argument" out = np.zeros((2, 2), dtype=float) self.matmul(a, b, out) assert_array_equal(out, tgt, err_msg=msg) # test as keyword argument msg = "out keyword argument" out = np.zeros((2, 2), dtype=float) self.matmul(a, b, out=out) assert_array_equal(out, tgt, err_msg=msg) # test out with not allowed type cast (safe casting) # einsum and cblas raise different error types, so # use Exception. msg = "out argument with illegal cast" out = np.zeros((2, 2), dtype=np.int32) assert_raises(Exception, self.matmul, a, b, out=out) # skip following tests for now, cblas does not allow non-contiguous # outputs and consistency with dot would require same type, # dimensions, subtype, and c_contiguous. # test out with allowed type cast # msg = "out argument with allowed cast" # out = np.zeros((2, 2), dtype=np.complex128) # self.matmul(a, b, out=out) # assert_array_equal(out, tgt, err_msg=msg) # test out non-contiguous # msg = "out argument with non-contiguous layout" # c = np.zeros((2, 2, 2), dtype=float) # self.matmul(a, b, out=c[..., 0]) # assert_array_equal(c, tgt, err_msg=msg) if sys.version_info[:2] >= (3, 5): class TestMatmulOperator(MatmulCommon): import operator matmul = operator.matmul def test_array_priority_override(self): class A(object): __array_priority__ = 1000 def __matmul__(self, other): return "A" def __rmatmul__(self, other): return "A" a = A() b = np.ones(2) assert_equal(self.matmul(a, b), "A") assert_equal(self.matmul(b, a), "A") def test_matmul_inplace(): # It would be nice to support in-place matmul eventually, but for now # we don't have a working implementation, so better just to error out # and nudge people to writing "a = a @ b". a = np.eye(3) b = np.eye(3) assert_raises(TypeError, a.__imatmul__, b) import operator assert_raises(TypeError, operator.imatmul, a, b) # we avoid writing the token `exec` so as not to crash python 2's # parser exec_ = getattr(builtins, "exec") assert_raises(TypeError, exec_, "a @= b", globals(), locals()) class TestInner(object): def test_inner_type_mismatch(self): c = 1. A = np.array((1,1), dtype='i,i') assert_raises(TypeError, np.inner, c, A) assert_raises(TypeError, np.inner, A, c) def test_inner_scalar_and_vector(self): for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?': sca = np.array(3, dtype=dt)[()] vec = np.array([1, 2], dtype=dt) desired = np.array([3, 6], dtype=dt) assert_equal(np.inner(vec, sca), desired) assert_equal(np.inner(sca, vec), desired) def test_inner_scalar_and_matrix(self): for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?': sca = np.array(3, dtype=dt)[()] arr = np.matrix([[1, 2], [3, 4]], dtype=dt) desired = np.matrix([[3, 6], [9, 12]], dtype=dt) assert_equal(np.inner(arr, sca), desired) assert_equal(np.inner(sca, arr), desired) def test_inner_scalar_and_matrix_of_objects(self): # Ticket #4482 arr = np.matrix([1, 2], dtype=object) desired = np.matrix([[3, 6]], dtype=object) assert_equal(np.inner(arr, 3), desired) assert_equal(np.inner(3, arr), desired) def test_vecself(self): # Ticket 844. # Inner product of a vector with itself segfaults or give # meaningless result a = np.zeros(shape=(1, 80), dtype=np.float64) p = np.inner(a, a) assert_almost_equal(p, 0, decimal=14) def test_inner_product_with_various_contiguities(self): # github issue 6532 for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?': # check an inner product involving a matrix transpose A = np.array([[1, 2], [3, 4]], dtype=dt) B = np.array([[1, 3], [2, 4]], dtype=dt) C = np.array([1, 1], dtype=dt) desired = np.array([4, 6], dtype=dt) assert_equal(np.inner(A.T, C), desired) assert_equal(np.inner(C, A.T), desired) assert_equal(np.inner(B, C), desired) assert_equal(np.inner(C, B), desired) # check a matrix product desired = np.array([[7, 10], [15, 22]], dtype=dt) assert_equal(np.inner(A, B), desired) # check the syrk vs. gemm paths desired = np.array([[5, 11], [11, 25]], dtype=dt) assert_equal(np.inner(A, A), desired) assert_equal(np.inner(A, A.copy()), desired) # check an inner product involving an aliased and reversed view a = np.arange(5).astype(dt) b = a[::-1] desired = np.array(10, dtype=dt).item() assert_equal(np.inner(b, a), desired) def test_3d_tensor(self): for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?': a = np.arange(24).reshape(2,3,4).astype(dt) b = np.arange(24, 48).reshape(2,3,4).astype(dt) desired = np.array( [[[[ 158, 182, 206], [ 230, 254, 278]], [[ 566, 654, 742], [ 830, 918, 1006]], [[ 974, 1126, 1278], [1430, 1582, 1734]]], [[[1382, 1598, 1814], [2030, 2246, 2462]], [[1790, 2070, 2350], [2630, 2910, 3190]], [[2198, 2542, 2886], [3230, 3574, 3918]]]], dtype=dt ) assert_equal(np.inner(a, b), desired) assert_equal(np.inner(b, a).transpose(2,3,0,1), desired) class TestSummarization(object): def test_1d(self): A = np.arange(1001) strA = '[ 0 1 2 ..., 998 999 1000]' assert_(str(A) == strA) reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])' assert_(repr(A) == reprA) def test_2d(self): A = np.arange(1002).reshape(2, 501) strA = '[[ 0 1 2 ..., 498 499 500]\n' \ ' [ 501 502 503 ..., 999 1000 1001]]' assert_(str(A) == strA) reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \ ' [ 501, 502, 503, ..., 999, 1000, 1001]])' assert_(repr(A) == reprA) class TestAlen(object): def test_basic(self): m = np.array([1, 2, 3]) assert_equal(np.alen(m), 3) m = np.array([[1, 2, 3], [4, 5, 7]]) assert_equal(np.alen(m), 2) m = [1, 2, 3] assert_equal(np.alen(m), 3) m = [[1, 2, 3], [4, 5, 7]] assert_equal(np.alen(m), 2) def test_singleton(self): assert_equal(np.alen(5), 1) class TestChoose(object): def setup(self): self.x = 2*np.ones((3,), dtype=int) self.y = 3*np.ones((3,), dtype=int) self.x2 = 2*np.ones((2, 3), dtype=int) self.y2 = 3*np.ones((2, 3), dtype=int) self.ind = [0, 0, 1] def test_basic(self): A = np.choose(self.ind, (self.x, self.y)) assert_equal(A, [2, 2, 3]) def test_broadcast1(self): A = np.choose(self.ind, (self.x2, self.y2)) assert_equal(A, [[2, 2, 3], [2, 2, 3]]) def test_broadcast2(self): A = np.choose(self.ind, (self.x, self.y2)) assert_equal(A, [[2, 2, 3], [2, 2, 3]]) class TestRepeat(object): def setup(self): self.m = np.array([1, 2, 3, 4, 5, 6]) self.m_rect = self.m.reshape((2, 3)) def test_basic(self): A = np.repeat(self.m, [1, 3, 2, 1, 1, 2]) assert_equal(A, [1, 2, 2, 2, 3, 3, 4, 5, 6, 6]) def test_broadcast1(self): A = np.repeat(self.m, 2) assert_equal(A, [1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6]) def test_axis_spec(self): A = np.repeat(self.m_rect, [2, 1], axis=0) assert_equal(A, [[1, 2, 3], [1, 2, 3], [4, 5, 6]]) A = np.repeat(self.m_rect, [1, 3, 2], axis=1) assert_equal(A, [[1, 2, 2, 2, 3, 3], [4, 5, 5, 5, 6, 6]]) def test_broadcast2(self): A = np.repeat(self.m_rect, 2, axis=0) assert_equal(A, [[1, 2, 3], [1, 2, 3], [4, 5, 6], [4, 5, 6]]) A = np.repeat(self.m_rect, 2, axis=1) assert_equal(A, [[1, 1, 2, 2, 3, 3], [4, 4, 5, 5, 6, 6]]) # TODO: test for multidimensional NEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4} class TestNeighborhoodIter(object): # Simple, 2d tests def _test_simple2d(self, dt): # Test zero and one padding for simple data type x = np.array([[0, 1], [2, 3]], dtype=dt) r = [np.array([[0, 0, 0], [0, 0, 1]], dtype=dt), np.array([[0, 0, 0], [0, 1, 0]], dtype=dt), np.array([[0, 0, 1], [0, 2, 3]], dtype=dt), np.array([[0, 1, 0], [2, 3, 0]], dtype=dt)] l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0], NEIGH_MODE['zero']) assert_array_equal(l, r) r = [np.array([[1, 1, 1], [1, 0, 1]], dtype=dt), np.array([[1, 1, 1], [0, 1, 1]], dtype=dt), np.array([[1, 0, 1], [1, 2, 3]], dtype=dt), np.array([[0, 1, 1], [2, 3, 1]], dtype=dt)] l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0], NEIGH_MODE['one']) assert_array_equal(l, r) r = [np.array([[4, 4, 4], [4, 0, 1]], dtype=dt), np.array([[4, 4, 4], [0, 1, 4]], dtype=dt), np.array([[4, 0, 1], [4, 2, 3]], dtype=dt), np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)] l = test_neighborhood_iterator(x, [-1, 0, -1, 1], 4, NEIGH_MODE['constant']) assert_array_equal(l, r) def test_simple2d(self): self._test_simple2d(float) def test_simple2d_object(self): self._test_simple2d(Decimal) def _test_mirror2d(self, dt): x = np.array([[0, 1], [2, 3]], dtype=dt) r = [np.array([[0, 0, 1], [0, 0, 1]], dtype=dt), np.array([[0, 1, 1], [0, 1, 1]], dtype=dt), np.array([[0, 0, 1], [2, 2, 3]], dtype=dt), np.array([[0, 1, 1], [2, 3, 3]], dtype=dt)] l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0], NEIGH_MODE['mirror']) assert_array_equal(l, r) def test_mirror2d(self): self._test_mirror2d(float) def test_mirror2d_object(self): self._test_mirror2d(Decimal) # Simple, 1d tests def _test_simple(self, dt): # Test padding with constant values x = np.linspace(1, 5, 5).astype(dt) r = [[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 0]] l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['zero']) assert_array_equal(l, r) r = [[1, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 1]] l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['one']) assert_array_equal(l, r) r = [[x[4], 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, x[4]]] l = test_neighborhood_iterator(x, [-1, 1], x[4], NEIGH_MODE['constant']) assert_array_equal(l, r) def test_simple_float(self): self._test_simple(float) def test_simple_object(self): self._test_simple(Decimal) # Test mirror modes def _test_mirror(self, dt): x = np.linspace(1, 5, 5).astype(dt) r = np.array([[2, 1, 1, 2, 3], [1, 1, 2, 3, 4], [1, 2, 3, 4, 5], [2, 3, 4, 5, 5], [3, 4, 5, 5, 4]], dtype=dt) l = test_neighborhood_iterator(x, [-2, 2], x[1], NEIGH_MODE['mirror']) assert_([i.dtype == dt for i in l]) assert_array_equal(l, r) def test_mirror(self): self._test_mirror(float) def test_mirror_object(self): self._test_mirror(Decimal) # Circular mode def _test_circular(self, dt): x = np.linspace(1, 5, 5).astype(dt) r = np.array([[4, 5, 1, 2, 3], [5, 1, 2, 3, 4], [1, 2, 3, 4, 5], [2, 3, 4, 5, 1], [3, 4, 5, 1, 2]], dtype=dt) l = test_neighborhood_iterator(x, [-2, 2], x[0], NEIGH_MODE['circular']) assert_array_equal(l, r) def test_circular(self): self._test_circular(float) def test_circular_object(self): self._test_circular(Decimal) # Test stacking neighborhood iterators class TestStackedNeighborhoodIter(object): # Simple, 1d test: stacking 2 constant-padded neigh iterators def test_simple_const(self): dt = np.float64 # Test zero and one padding for simple data type x = np.array([1, 2, 3], dtype=dt) r = [np.array([0], dtype=dt), np.array([0], dtype=dt), np.array([1], dtype=dt), np.array([2], dtype=dt), np.array([3], dtype=dt), np.array([0], dtype=dt), np.array([0], dtype=dt)] l = test_neighborhood_iterator_oob(x, [-2, 4], NEIGH_MODE['zero'], [0, 0], NEIGH_MODE['zero']) assert_array_equal(l, r) r = [np.array([1, 0, 1], dtype=dt), np.array([0, 1, 2], dtype=dt), np.array([1, 2, 3], dtype=dt), np.array([2, 3, 0], dtype=dt), np.array([3, 0, 1], dtype=dt)] l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'], [-1, 1], NEIGH_MODE['one']) assert_array_equal(l, r) # 2nd simple, 1d test: stacking 2 neigh iterators, mixing const padding and # mirror padding def test_simple_mirror(self): dt = np.float64 # Stacking zero on top of mirror x = np.array([1, 2, 3], dtype=dt) r = [np.array([0, 1, 1], dtype=dt), np.array([1, 1, 2], dtype=dt), np.array([1, 2, 3], dtype=dt), np.array([2, 3, 3], dtype=dt), np.array([3, 3, 0], dtype=dt)] l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['mirror'], [-1, 1], NEIGH_MODE['zero']) assert_array_equal(l, r) # Stacking mirror on top of zero x = np.array([1, 2, 3], dtype=dt) r = [np.array([1, 0, 0], dtype=dt), np.array([0, 0, 1], dtype=dt), np.array([0, 1, 2], dtype=dt), np.array([1, 2, 3], dtype=dt), np.array([2, 3, 0], dtype=dt)] l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'], [-2, 0], NEIGH_MODE['mirror']) assert_array_equal(l, r) # Stacking mirror on top of zero: 2nd x = np.array([1, 2, 3], dtype=dt) r = [np.array([0, 1, 2], dtype=dt), np.array([1, 2, 3], dtype=dt), np.array([2, 3, 0], dtype=dt), np.array([3, 0, 0], dtype=dt), np.array([0, 0, 3], dtype=dt)] l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'], [0, 2], NEIGH_MODE['mirror']) assert_array_equal(l, r) # Stacking mirror on top of zero: 3rd x = np.array([1, 2, 3], dtype=dt) r = [np.array([1, 0, 0, 1, 2], dtype=dt), np.array([0, 0, 1, 2, 3], dtype=dt), np.array([0, 1, 2, 3, 0], dtype=dt), np.array([1, 2, 3, 0, 0], dtype=dt), np.array([2, 3, 0, 0, 3], dtype=dt)] l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'], [-2, 2], NEIGH_MODE['mirror']) assert_array_equal(l, r) # 3rd simple, 1d test: stacking 2 neigh iterators, mixing const padding and # circular padding def test_simple_circular(self): dt = np.float64 # Stacking zero on top of mirror x = np.array([1, 2, 3], dtype=dt) r = [np.array([0, 3, 1], dtype=dt), np.array([3, 1, 2], dtype=dt), np.array([1, 2, 3], dtype=dt), np.array([2, 3, 1], dtype=dt), np.array([3, 1, 0], dtype=dt)] l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['circular'], [-1, 1], NEIGH_MODE['zero']) assert_array_equal(l, r) # Stacking mirror on top of zero x = np.array([1, 2, 3], dtype=dt) r = [np.array([3, 0, 0], dtype=dt), np.array([0, 0, 1], dtype=dt), np.array([0, 1, 2], dtype=dt), np.array([1, 2, 3], dtype=dt), np.array([2, 3, 0], dtype=dt)] l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'], [-2, 0], NEIGH_MODE['circular']) assert_array_equal(l, r) # Stacking mirror on top of zero: 2nd x = np.array([1, 2, 3], dtype=dt) r = [np.array([0, 1, 2], dtype=dt), np.array([1, 2, 3], dtype=dt), np.array([2, 3, 0], dtype=dt), np.array([3, 0, 0], dtype=dt), np.array([0, 0, 1], dtype=dt)] l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'], [0, 2], NEIGH_MODE['circular']) assert_array_equal(l, r) # Stacking mirror on top of zero: 3rd x = np.array([1, 2, 3], dtype=dt) r = [np.array([3, 0, 0, 1, 2], dtype=dt), np.array([0, 0, 1, 2, 3], dtype=dt), np.array([0, 1, 2, 3, 0], dtype=dt), np.array([1, 2, 3, 0, 0], dtype=dt), np.array([2, 3, 0, 0, 1], dtype=dt)] l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'], [-2, 2], NEIGH_MODE['circular']) assert_array_equal(l, r) # 4th simple, 1d test: stacking 2 neigh iterators, but with lower iterator # being strictly within the array def test_simple_strict_within(self): dt = np.float64 # Stacking zero on top of zero, first neighborhood strictly inside the # array x = np.array([1, 2, 3], dtype=dt) r = [np.array([1, 2, 3, 0], dtype=dt)] l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['zero']) assert_array_equal(l, r) # Stacking mirror on top of zero, first neighborhood strictly inside the # array x = np.array([1, 2, 3], dtype=dt) r = [np.array([1, 2, 3, 3], dtype=dt)] l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['mirror']) assert_array_equal(l, r) # Stacking mirror on top of zero, first neighborhood strictly inside the # array x = np.array([1, 2, 3], dtype=dt) r = [np.array([1, 2, 3, 1], dtype=dt)] l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['circular']) assert_array_equal(l, r) class TestWarnings(object): def test_complex_warning(self): x = np.array([1, 2]) y = np.array([1-2j, 1+2j]) with warnings.catch_warnings(): warnings.simplefilter("error", np.ComplexWarning) assert_raises(np.ComplexWarning, x.__setitem__, slice(None), y) assert_equal(x, [1, 2]) class TestMinScalarType(object): def test_usigned_shortshort(self): dt = np.min_scalar_type(2**8-1) wanted = np.dtype('uint8') assert_equal(wanted, dt) def test_usigned_short(self): dt = np.min_scalar_type(2**16-1) wanted = np.dtype('uint16') assert_equal(wanted, dt) def test_usigned_int(self): dt = np.min_scalar_type(2**32-1) wanted = np.dtype('uint32') assert_equal(wanted, dt) def test_usigned_longlong(self): dt = np.min_scalar_type(2**63-1) wanted = np.dtype('uint64') assert_equal(wanted, dt) def test_object(self): dt = np.min_scalar_type(2**64) wanted = np.dtype('O') assert_equal(wanted, dt) from numpy.core._internal import _dtype_from_pep3118 class TestPEP3118Dtype(object): def _check(self, spec, wanted): dt = np.dtype(wanted) actual = _dtype_from_pep3118(spec) assert_equal(actual, dt, err_msg="spec %r != dtype %r" % (spec, wanted)) def test_native_padding(self): align = np.dtype('i').alignment for j in range(8): if j == 0: s = 'bi' else: s = 'b%dxi' % j self._check('@'+s, {'f0': ('i1', 0), 'f1': ('i', align*(1 + j//align))}) self._check('='+s, {'f0': ('i1', 0), 'f1': ('i', 1+j)}) def test_native_padding_2(self): # Native padding should work also for structs and sub-arrays self._check('x3T{xi}', {'f0': (({'f0': ('i', 4)}, (3,)), 4)}) self._check('^x3T{xi}', {'f0': (({'f0': ('i', 1)}, (3,)), 1)}) def test_trailing_padding(self): # Trailing padding should be included, *and*, the item size # should match the alignment if in aligned mode align = np.dtype('i').alignment size = np.dtype('i').itemsize def aligned(n): return align*(1 + (n-1)//align) base = dict(formats=['i'], names=['f0']) self._check('ix', dict(itemsize=aligned(size + 1), **base)) self._check('ixx', dict(itemsize=aligned(size + 2), **base)) self._check('ixxx', dict(itemsize=aligned(size + 3), **base)) self._check('ixxxx', dict(itemsize=aligned(size + 4), **base)) self._check('i7x', dict(itemsize=aligned(size + 7), **base)) self._check('^ix', dict(itemsize=size + 1, **base)) self._check('^ixx', dict(itemsize=size + 2, **base)) self._check('^ixxx', dict(itemsize=size + 3, **base)) self._check('^ixxxx', dict(itemsize=size + 4, **base)) self._check('^i7x', dict(itemsize=size + 7, **base)) def test_native_padding_3(self): dt = np.dtype( [('a', 'b'), ('b', 'i'), ('sub', np.dtype('b,i')), ('c', 'i')], align=True) self._check("T{b:a:xxxi:b:T{b:f0:=i:f1:}:sub:xxxi:c:}", dt) dt = np.dtype( [('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'), ('e', 'b'), ('sub', np.dtype('b,i', align=True))]) self._check("T{b:a:=i:b:b:c:b:d:b:e:T{b:f0:xxxi:f1:}:sub:}", dt) def test_padding_with_array_inside_struct(self): dt = np.dtype( [('a', 'b'), ('b', 'i'), ('c', 'b', (3,)), ('d', 'i')], align=True) self._check("T{b:a:xxxi:b:3b:c:xi:d:}", dt) def test_byteorder_inside_struct(self): # The byte order after @T{=i} should be '=', not '@'. # Check this by noting the absence of native alignment. self._check('@T{^i}xi', {'f0': ({'f0': ('i', 0)}, 0), 'f1': ('i', 5)}) def test_intra_padding(self): # Natively aligned sub-arrays may require some internal padding align = np.dtype('i').alignment size = np.dtype('i').itemsize def aligned(n): return (align*(1 + (n-1)//align)) self._check('(3)T{ix}', (dict( names=['f0'], formats=['i'], offsets=[0], itemsize=aligned(size + 1) ), (3,))) def test_char_vs_string(self): dt = np.dtype('c') self._check('c', dt) dt = np.dtype([('f0', 'S1', (4,)), ('f1', 'S4')]) self._check('4c4s', dt) def test_field_order(self): # gh-9053 - previously, we relied on dictionary key order self._check("(0)I:a:f:b:", [('a', 'I', (0,)), ('b', 'f')]) self._check("(0)I:b:f:a:", [('b', 'I', (0,)), ('a', 'f')]) def test_unnamed_fields(self): self._check('ii', [('f0', 'i'), ('f1', 'i')]) self._check('ii:f0:', [('f1', 'i'), ('f0', 'i')]) self._check('i', 'i') self._check('i:f0:', [('f0', 'i')]) class TestNewBufferProtocol(object): def _check_roundtrip(self, obj): obj = np.asarray(obj) x = memoryview(obj) y = np.asarray(x) y2 = np.array(x) assert_(not y.flags.owndata) assert_(y2.flags.owndata) assert_equal(y.dtype, obj.dtype) assert_equal(y.shape, obj.shape) assert_array_equal(obj, y) assert_equal(y2.dtype, obj.dtype) assert_equal(y2.shape, obj.shape) assert_array_equal(obj, y2) def test_roundtrip(self): x = np.array([1, 2, 3, 4, 5], dtype='i4') self._check_roundtrip(x) x = np.array([[1, 2], [3, 4]], dtype=np.float64) self._check_roundtrip(x) x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:] self._check_roundtrip(x) dt = [('a', 'b'), ('b', 'h'), ('c', 'i'), ('d', 'l'), ('dx', 'q'), ('e', 'B'), ('f', 'H'), ('g', 'I'), ('h', 'L'), ('hx', 'Q'), ('i', np.single), ('j', np.double), ('k', np.longdouble), ('ix', np.csingle), ('jx', np.cdouble), ('kx', np.clongdouble), ('l', 'S4'), ('m', 'U4'), ('n', 'V3'), ('o', '?'), ('p', np.half), ] x = np.array( [(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, b'aaaa', 'bbbb', b'xxx', True, 1.0)], dtype=dt) self._check_roundtrip(x) x = np.array(([[1, 2], [3, 4]],), dtype=[('a', (int, (2, 2)))]) self._check_roundtrip(x) x = np.array([1, 2, 3], dtype='>i2') self._check_roundtrip(x) x = np.array([1, 2, 3], dtype='<i2') self._check_roundtrip(x) x = np.array([1, 2, 3], dtype='>i4') self._check_roundtrip(x) x = np.array([1, 2, 3], dtype='<i4') self._check_roundtrip(x) # check long long can be represented as non-native x = np.array([1, 2, 3], dtype='>q') self._check_roundtrip(x) # Native-only data types can be passed through the buffer interface # only in native byte order if sys.byteorder == 'little': x = np.array([1, 2, 3], dtype='>g') assert_raises(ValueError, self._check_roundtrip, x) x = np.array([1, 2, 3], dtype='<g') self._check_roundtrip(x) else: x = np.array([1, 2, 3], dtype='>g') self._check_roundtrip(x) x = np.array([1, 2, 3], dtype='<g') assert_raises(ValueError, self._check_roundtrip, x) def test_roundtrip_half(self): half_list = [ 1.0, -2.0, 6.5504 * 10**4, # (max half precision) 2**-14, # ~= 6.10352 * 10**-5 (minimum positive normal) 2**-24, # ~= 5.96046 * 10**-8 (minimum strictly positive subnormal) 0.0, -0.0, float('+inf'), float('-inf'), 0.333251953125, # ~= 1/3 ] x = np.array(half_list, dtype='>e') self._check_roundtrip(x) x = np.array(half_list, dtype='<e') self._check_roundtrip(x) def test_roundtrip_single_types(self): for typ in np.typeDict.values(): dtype = np.dtype(typ) if dtype.char in 'Mm': # datetimes cannot be used in buffers continue if dtype.char == 'V': # skip void continue x = np.zeros(4, dtype=dtype) self._check_roundtrip(x) if dtype.char not in 'qQgG': dt = dtype.newbyteorder('<') x = np.zeros(4, dtype=dt) self._check_roundtrip(x) dt = dtype.newbyteorder('>') x = np.zeros(4, dtype=dt) self._check_roundtrip(x) def test_roundtrip_scalar(self): # Issue #4015. self._check_roundtrip(0) def test_export_simple_1d(self): x = np.array([1, 2, 3, 4, 5], dtype='i') y = memoryview(x) assert_equal(y.format, 'i') assert_equal(y.shape, (5,)) assert_equal(y.ndim, 1) assert_equal(y.strides, (4,)) assert_equal(y.suboffsets, EMPTY) assert_equal(y.itemsize, 4) def test_export_simple_nd(self): x = np.array([[1, 2], [3, 4]], dtype=np.float64) y = memoryview(x) assert_equal(y.format, 'd') assert_equal(y.shape, (2, 2)) assert_equal(y.ndim, 2) assert_equal(y.strides, (16, 8)) assert_equal(y.suboffsets, EMPTY) assert_equal(y.itemsize, 8) def test_export_discontiguous(self): x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:] y = memoryview(x) assert_equal(y.format, 'f') assert_equal(y.shape, (3, 3)) assert_equal(y.ndim, 2) assert_equal(y.strides, (36, 4)) assert_equal(y.suboffsets, EMPTY) assert_equal(y.itemsize, 4) def test_export_record(self): dt = [('a', 'b'), ('b', 'h'), ('c', 'i'), ('d', 'l'), ('dx', 'q'), ('e', 'B'), ('f', 'H'), ('g', 'I'), ('h', 'L'), ('hx', 'Q'), ('i', np.single), ('j', np.double), ('k', np.longdouble), ('ix', np.csingle), ('jx', np.cdouble), ('kx', np.clongdouble), ('l', 'S4'), ('m', 'U4'), ('n', 'V3'), ('o', '?'), ('p', np.half), ] x = np.array( [(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, b'aaaa', 'bbbb', b' ', True, 1.0)], dtype=dt) y = memoryview(x) assert_equal(y.shape, (1,)) assert_equal(y.ndim, 1) assert_equal(y.suboffsets, EMPTY) sz = sum([np.dtype(b).itemsize for a, b in dt]) if np.dtype('l').itemsize == 4: assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:q:dx:B:e:@H:f:=I:g:L:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}') else: assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:q:dx:B:e:@H:f:=I:g:Q:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}') # Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides if not (np.ones(1).strides[0] == np.iinfo(np.intp).max): assert_equal(y.strides, (sz,)) assert_equal(y.itemsize, sz) def test_export_subarray(self): x = np.array(([[1, 2], [3, 4]],), dtype=[('a', ('i', (2, 2)))]) y = memoryview(x) assert_equal(y.format, 'T{(2,2)i:a:}') assert_equal(y.shape, EMPTY) assert_equal(y.ndim, 0) assert_equal(y.strides, EMPTY) assert_equal(y.suboffsets, EMPTY) assert_equal(y.itemsize, 16) def test_export_endian(self): x = np.array([1, 2, 3], dtype='>i') y = memoryview(x) if sys.byteorder == 'little': assert_equal(y.format, '>i') else: assert_equal(y.format, 'i') x = np.array([1, 2, 3], dtype='<i') y = memoryview(x) if sys.byteorder == 'little': assert_equal(y.format, 'i') else: assert_equal(y.format, '<i') def test_export_flags(self): # Check SIMPLE flag, see also gh-3613 (exception should be BufferError) assert_raises(ValueError, get_buffer_info, np.arange(5)[::2], ('SIMPLE',)) def test_padding(self): for j in range(8): x = np.array([(1,), (2,)], dtype={'f0': (int, j)}) self._check_roundtrip(x) def test_reference_leak(self): if HAS_REFCOUNT: count_1 = sys.getrefcount(np.core._internal) a = np.zeros(4) b = memoryview(a) c = np.asarray(b) if HAS_REFCOUNT: count_2 = sys.getrefcount(np.core._internal) assert_equal(count_1, count_2) del c # avoid pyflakes unused variable warning. def test_padded_struct_array(self): dt1 = np.dtype( [('a', 'b'), ('b', 'i'), ('sub', np.dtype('b,i')), ('c', 'i')], align=True) x1 = np.arange(dt1.itemsize, dtype=np.int8).view(dt1) self._check_roundtrip(x1) dt2 = np.dtype( [('a', 'b'), ('b', 'i'), ('c', 'b', (3,)), ('d', 'i')], align=True) x2 = np.arange(dt2.itemsize, dtype=np.int8).view(dt2) self._check_roundtrip(x2) dt3 = np.dtype( [('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'), ('e', 'b'), ('sub', np.dtype('b,i', align=True))]) x3 = np.arange(dt3.itemsize, dtype=np.int8).view(dt3) self._check_roundtrip(x3) def test_relaxed_strides(self): # Test that relaxed strides are converted to non-relaxed c = np.ones((1, 10, 10), dtype='i8') # Check for NPY_RELAXED_STRIDES_CHECKING: if np.ones((10, 1), order="C").flags.f_contiguous: c.strides = (-1, 80, 8) assert_(memoryview(c).strides == (800, 80, 8)) # Writing C-contiguous data to a BytesIO buffer should work fd = io.BytesIO() fd.write(c.data) fortran = c.T assert_(memoryview(fortran).strides == (8, 80, 800)) arr = np.ones((1, 10)) if arr.flags.f_contiguous: shape, strides = get_buffer_info(arr, ['F_CONTIGUOUS']) assert_(strides[0] == 8) arr = np.ones((10, 1), order='F') shape, strides = get_buffer_info(arr, ['C_CONTIGUOUS']) assert_(strides[-1] == 8) class TestArrayAttributeDeletion(object): def test_multiarray_writable_attributes_deletion(self): # ticket #2046, should not seqfault, raise AttributeError a = np.ones(2) attr = ['shape', 'strides', 'data', 'dtype', 'real', 'imag', 'flat'] with suppress_warnings() as sup: sup.filter(DeprecationWarning, "Assigning the 'data' attribute") for s in attr: assert_raises(AttributeError, delattr, a, s) def test_multiarray_not_writable_attributes_deletion(self): a = np.ones(2) attr = ["ndim", "flags", "itemsize", "size", "nbytes", "base", "ctypes", "T", "__array_interface__", "__array_struct__", "__array_priority__", "__array_finalize__"] for s in attr: assert_raises(AttributeError, delattr, a, s) def test_multiarray_flags_writable_attribute_deletion(self): a = np.ones(2).flags attr = ['updateifcopy', 'aligned', 'writeable'] for s in attr: assert_raises(AttributeError, delattr, a, s) def test_multiarray_flags_not_writable_attribute_deletion(self): a = np.ones(2).flags attr = ["contiguous", "c_contiguous", "f_contiguous", "fortran", "owndata", "fnc", "forc", "behaved", "carray", "farray", "num"] for s in attr: assert_raises(AttributeError, delattr, a, s) def test_array_interface(): # Test scalar coercion within the array interface class Foo(object): def __init__(self, value): self.value = value self.iface = {'typestr': '=f8'} def __float__(self): return float(self.value) @property def __array_interface__(self): return self.iface f = Foo(0.5) assert_equal(np.array(f), 0.5) assert_equal(np.array([f]), [0.5]) assert_equal(np.array([f, f]), [0.5, 0.5]) assert_equal(np.array(f).dtype, np.dtype('=f8')) # Test various shape definitions f.iface['shape'] = () assert_equal(np.array(f), 0.5) f.iface['shape'] = None assert_raises(TypeError, np.array, f) f.iface['shape'] = (1, 1) assert_equal(np.array(f), [[0.5]]) f.iface['shape'] = (2,) assert_raises(ValueError, np.array, f) # test scalar with no shape class ArrayLike(object): array = np.array(1) __array_interface__ = array.__array_interface__ assert_equal(np.array(ArrayLike()), 1) def test_array_interface_itemsize(): # See gh-6361 my_dtype = np.dtype({'names': ['A', 'B'], 'formats': ['f4', 'f4'], 'offsets': [0, 8], 'itemsize': 16}) a = np.ones(10, dtype=my_dtype) descr_t = np.dtype(a.__array_interface__['descr']) typestr_t = np.dtype(a.__array_interface__['typestr']) assert_equal(descr_t.itemsize, typestr_t.itemsize) def test_flat_element_deletion(): it = np.ones(3).flat try: del it[1] del it[1:2] except TypeError: pass except Exception: raise AssertionError def test_scalar_element_deletion(): a = np.zeros(2, dtype=[('x', 'int'), ('y', 'int')]) assert_raises(ValueError, a[0].__delitem__, 'x') class TestMemEventHook(object): def test_mem_seteventhook(self): # The actual tests are within the C code in # multiarray/multiarray_tests.c.src test_pydatamem_seteventhook_start() # force an allocation and free of a numpy array # needs to be larger then limit of small memory cacher in ctors.c a = np.zeros(1000) del a gc.collect() test_pydatamem_seteventhook_end() class TestMapIter(object): def test_mapiter(self): # The actual tests are within the C code in # multiarray/multiarray_tests.c.src a = np.arange(12).reshape((3, 4)).astype(float) index = ([1, 1, 2, 0], [0, 0, 2, 3]) vals = [50, 50, 30, 16] test_inplace_increment(a, index, vals) assert_equal(a, [[0.00, 1., 2.0, 19.], [104., 5., 6.0, 7.0], [8.00, 9., 40., 11.]]) b = np.arange(6).astype(float) index = (np.array([1, 2, 0]),) vals = [50, 4, 100.1] test_inplace_increment(b, index, vals) assert_equal(b, [100.1, 51., 6., 3., 4., 5.]) class TestAsCArray(object): def test_1darray(self): array = np.arange(24, dtype=np.double) from_c = test_as_c_array(array, 3) assert_equal(array[3], from_c) def test_2darray(self): array = np.arange(24, dtype=np.double).reshape(3, 8) from_c = test_as_c_array(array, 2, 4) assert_equal(array[2, 4], from_c) def test_3darray(self): array = np.arange(24, dtype=np.double).reshape(2, 3, 4) from_c = test_as_c_array(array, 1, 2, 3) assert_equal(array[1, 2, 3], from_c) class TestConversion(object): def test_array_scalar_relational_operation(self): # All integer for dt1 in np.typecodes['AllInteger']: assert_(1 > np.array(0, dtype=dt1), "type %s failed" % (dt1,)) assert_(not 1 < np.array(0, dtype=dt1), "type %s failed" % (dt1,)) for dt2 in np.typecodes['AllInteger']: assert_(np.array(1, dtype=dt1) > np.array(0, dtype=dt2), "type %s and %s failed" % (dt1, dt2)) assert_(not np.array(1, dtype=dt1) < np.array(0, dtype=dt2), "type %s and %s failed" % (dt1, dt2)) # Unsigned integers for dt1 in 'BHILQP': assert_(-1 < np.array(1, dtype=dt1), "type %s failed" % (dt1,)) assert_(not -1 > np.array(1, dtype=dt1), "type %s failed" % (dt1,)) assert_(-1 != np.array(1, dtype=dt1), "type %s failed" % (dt1,)) # Unsigned vs signed for dt2 in 'bhilqp': assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2), "type %s and %s failed" % (dt1, dt2)) assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2), "type %s and %s failed" % (dt1, dt2)) assert_(np.array(1, dtype=dt1) != np.array(-1, dtype=dt2), "type %s and %s failed" % (dt1, dt2)) # Signed integers and floats for dt1 in 'bhlqp' + np.typecodes['Float']: assert_(1 > np.array(-1, dtype=dt1), "type %s failed" % (dt1,)) assert_(not 1 < np.array(-1, dtype=dt1), "type %s failed" % (dt1,)) assert_(-1 == np.array(-1, dtype=dt1), "type %s failed" % (dt1,)) for dt2 in 'bhlqp' + np.typecodes['Float']: assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2), "type %s and %s failed" % (dt1, dt2)) assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2), "type %s and %s failed" % (dt1, dt2)) assert_(np.array(-1, dtype=dt1) == np.array(-1, dtype=dt2), "type %s and %s failed" % (dt1, dt2)) def test_to_bool_scalar(self): assert_equal(bool(np.array([False])), False) assert_equal(bool(np.array([True])), True) assert_equal(bool(np.array([[42]])), True) assert_raises(ValueError, bool, np.array([1, 2])) class NotConvertible(object): def __bool__(self): raise NotImplementedError __nonzero__ = __bool__ # python 2 assert_raises(NotImplementedError, bool, np.array(NotConvertible())) assert_raises(NotImplementedError, bool, np.array([NotConvertible()])) self_containing = np.array([None]) self_containing[0] = self_containing try: Error = RecursionError except NameError: Error = RuntimeError # python < 3.5 assert_raises(Error, bool, self_containing) # previously stack overflow class TestWhere(object): def test_basic(self): dts = [bool, np.int16, np.int32, np.int64, np.double, np.complex128, np.longdouble, np.clongdouble] for dt in dts: c = np.ones(53, dtype=bool) assert_equal(np.where( c, dt(0), dt(1)), dt(0)) assert_equal(np.where(~c, dt(0), dt(1)), dt(1)) assert_equal(np.where(True, dt(0), dt(1)), dt(0)) assert_equal(np.where(False, dt(0), dt(1)), dt(1)) d = np.ones_like(c).astype(dt) e = np.zeros_like(d) r = d.astype(dt) c[7] = False r[7] = e[7] assert_equal(np.where(c, e, e), e) assert_equal(np.where(c, d, e), r) assert_equal(np.where(c, d, e[0]), r) assert_equal(np.where(c, d[0], e), r) assert_equal(np.where(c[::2], d[::2], e[::2]), r[::2]) assert_equal(np.where(c[1::2], d[1::2], e[1::2]), r[1::2]) assert_equal(np.where(c[::3], d[::3], e[::3]), r[::3]) assert_equal(np.where(c[1::3], d[1::3], e[1::3]), r[1::3]) assert_equal(np.where(c[::-2], d[::-2], e[::-2]), r[::-2]) assert_equal(np.where(c[::-3], d[::-3], e[::-3]), r[::-3]) assert_equal(np.where(c[1::-3], d[1::-3], e[1::-3]), r[1::-3]) def test_exotic(self): # object assert_array_equal(np.where(True, None, None), np.array(None)) # zero sized m = np.array([], dtype=bool).reshape(0, 3) b = np.array([], dtype=np.float64).reshape(0, 3) assert_array_equal(np.where(m, 0, b), np.array([]).reshape(0, 3)) # object cast d = np.array([-1.34, -0.16, -0.54, -0.31, -0.08, -0.95, 0.000, 0.313, 0.547, -0.18, 0.876, 0.236, 1.969, 0.310, 0.699, 1.013, 1.267, 0.229, -1.39, 0.487]) nan = float('NaN') e = np.array(['5z', '0l', nan, 'Wz', nan, nan, 'Xq', 'cs', nan, nan, 'QN', nan, nan, 'Fd', nan, nan, 'kp', nan, '36', 'i1'], dtype=object) m = np.array([0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0], dtype=bool) r = e[:] r[np.where(m)] = d[np.where(m)] assert_array_equal(np.where(m, d, e), r) r = e[:] r[np.where(~m)] = d[np.where(~m)] assert_array_equal(np.where(m, e, d), r) assert_array_equal(np.where(m, e, e), e) # minimal dtype result with NaN scalar (e.g required by pandas) d = np.array([1., 2.], dtype=np.float32) e = float('NaN') assert_equal(np.where(True, d, e).dtype, np.float32) e = float('Infinity') assert_equal(np.where(True, d, e).dtype, np.float32) e = float('-Infinity') assert_equal(np.where(True, d, e).dtype, np.float32) # also check upcast e = float(1e150) assert_equal(np.where(True, d, e).dtype, np.float64) def test_ndim(self): c = [True, False] a = np.zeros((2, 25)) b = np.ones((2, 25)) r = np.where(np.array(c)[:,np.newaxis], a, b) assert_array_equal(r[0], a[0]) assert_array_equal(r[1], b[0]) a = a.T b = b.T r = np.where(c, a, b) assert_array_equal(r[:,0], a[:,0]) assert_array_equal(r[:,1], b[:,0]) def test_dtype_mix(self): c = np.array([False, True, False, False, False, False, True, False, False, False, True, False]) a = np.uint32(1) b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.], dtype=np.float64) r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.], dtype=np.float64) assert_equal(np.where(c, a, b), r) a = a.astype(np.float32) b = b.astype(np.int64) assert_equal(np.where(c, a, b), r) # non bool mask c = c.astype(int) c[c != 0] = 34242324 assert_equal(np.where(c, a, b), r) # invert tmpmask = c != 0 c[c == 0] = 41247212 c[tmpmask] = 0 assert_equal(np.where(c, b, a), r) def test_foreign(self): c = np.array([False, True, False, False, False, False, True, False, False, False, True, False]) r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.], dtype=np.float64) a = np.ones(1, dtype='>i4') b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.], dtype=np.float64) assert_equal(np.where(c, a, b), r) b = b.astype('>f8') assert_equal(np.where(c, a, b), r) a = a.astype('<i4') assert_equal(np.where(c, a, b), r) c = c.astype('>i4') assert_equal(np.where(c, a, b), r) def test_error(self): c = [True, True] a = np.ones((4, 5)) b = np.ones((5, 5)) assert_raises(ValueError, np.where, c, a, a) assert_raises(ValueError, np.where, c[0], a, b) def test_string(self): # gh-4778 check strings are properly filled with nulls a = np.array("abc") b = np.array("x" * 753) assert_equal(np.where(True, a, b), "abc") assert_equal(np.where(False, b, a), "abc") # check native datatype sized strings a = np.array("abcd") b = np.array("x" * 8) assert_equal(np.where(True, a, b), "abcd") assert_equal(np.where(False, b, a), "abcd") def test_empty_result(self): # pass empty where result through an assignment which reads the data of # empty arrays, error detectable with valgrind, see gh-8922 x = np.zeros((1, 1)) ibad = np.vstack(np.where(x == 99.)) assert_array_equal(ibad, np.atleast_2d(np.array([[],[]], dtype=np.intp))) def test_largedim(self): # invalid read regression gh-9304 shape = [10, 2, 3, 4, 5, 6] np.random.seed(2) array = np.random.rand(*shape) for i in range(10): benchmark = array.nonzero() result = array.nonzero() assert_array_equal(benchmark, result) if not IS_PYPY: # sys.getsizeof() is not valid on PyPy class TestSizeOf(object): def test_empty_array(self): x = np.array([]) assert_(sys.getsizeof(x) > 0) def check_array(self, dtype): elem_size = dtype(0).itemsize for length in [10, 50, 100, 500]: x = np.arange(length, dtype=dtype) assert_(sys.getsizeof(x) > length * elem_size) def test_array_int32(self): self.check_array(np.int32) def test_array_int64(self): self.check_array(np.int64) def test_array_float32(self): self.check_array(np.float32) def test_array_float64(self): self.check_array(np.float64) def test_view(self): d = np.ones(100) assert_(sys.getsizeof(d[...]) < sys.getsizeof(d)) def test_reshape(self): d = np.ones(100) assert_(sys.getsizeof(d) < sys.getsizeof(d.reshape(100, 1, 1).copy())) def test_resize(self): d = np.ones(100) old = sys.getsizeof(d) d.resize(50) assert_(old > sys.getsizeof(d)) d.resize(150) assert_(old < sys.getsizeof(d)) def test_error(self): d = np.ones(100) assert_raises(TypeError, d.__sizeof__, "a") class TestHashing(object): def test_arrays_not_hashable(self): x = np.ones(3) assert_raises(TypeError, hash, x) def test_collections_hashable(self): x = np.array([]) assert_(not isinstance(x, collections.Hashable)) class TestArrayPriority(object): # This will go away when __array_priority__ is settled, meanwhile # it serves to check unintended changes. op = operator binary_ops = [ op.pow, op.add, op.sub, op.mul, op.floordiv, op.truediv, op.mod, op.and_, op.or_, op.xor, op.lshift, op.rshift, op.mod, op.gt, op.ge, op.lt, op.le, op.ne, op.eq ] # See #7949. Dont use "/" operator With -3 switch, since python reports it # as a DeprecationWarning if sys.version_info[0] < 3 and not sys.py3kwarning: binary_ops.append(op.div) class Foo(np.ndarray): __array_priority__ = 100. def __new__(cls, *args, **kwargs): return np.array(*args, **kwargs).view(cls) class Bar(np.ndarray): __array_priority__ = 101. def __new__(cls, *args, **kwargs): return np.array(*args, **kwargs).view(cls) class Other(object): __array_priority__ = 1000. def _all(self, other): return self.__class__() __add__ = __radd__ = _all __sub__ = __rsub__ = _all __mul__ = __rmul__ = _all __pow__ = __rpow__ = _all __div__ = __rdiv__ = _all __mod__ = __rmod__ = _all __truediv__ = __rtruediv__ = _all __floordiv__ = __rfloordiv__ = _all __and__ = __rand__ = _all __xor__ = __rxor__ = _all __or__ = __ror__ = _all __lshift__ = __rlshift__ = _all __rshift__ = __rrshift__ = _all __eq__ = _all __ne__ = _all __gt__ = _all __ge__ = _all __lt__ = _all __le__ = _all def test_ndarray_subclass(self): a = np.array([1, 2]) b = self.Bar([1, 2]) for f in self.binary_ops: msg = repr(f) assert_(isinstance(f(a, b), self.Bar), msg) assert_(isinstance(f(b, a), self.Bar), msg) def test_ndarray_other(self): a = np.array([1, 2]) b = self.Other() for f in self.binary_ops: msg = repr(f) assert_(isinstance(f(a, b), self.Other), msg) assert_(isinstance(f(b, a), self.Other), msg) def test_subclass_subclass(self): a = self.Foo([1, 2]) b = self.Bar([1, 2]) for f in self.binary_ops: msg = repr(f) assert_(isinstance(f(a, b), self.Bar), msg) assert_(isinstance(f(b, a), self.Bar), msg) def test_subclass_other(self): a = self.Foo([1, 2]) b = self.Other() for f in self.binary_ops: msg = repr(f) assert_(isinstance(f(a, b), self.Other), msg) assert_(isinstance(f(b, a), self.Other), msg) class TestBytestringArrayNonzero(object): def test_empty_bstring_array_is_falsey(self): assert_(not np.array([''], dtype=str)) def test_whitespace_bstring_array_is_falsey(self): a = np.array(['spam'], dtype=str) a[0] = ' \0\0' assert_(not a) def test_all_null_bstring_array_is_falsey(self): a = np.array(['spam'], dtype=str) a[0] = '\0\0\0\0' assert_(not a) def test_null_inside_bstring_array_is_truthy(self): a = np.array(['spam'], dtype=str) a[0] = ' \0 \0' assert_(a) class TestUnicodeArrayNonzero(object): def test_empty_ustring_array_is_falsey(self): assert_(not np.array([''], dtype=np.unicode)) def test_whitespace_ustring_array_is_falsey(self): a = np.array(['eggs'], dtype=np.unicode) a[0] = ' \0\0' assert_(not a) def test_all_null_ustring_array_is_falsey(self): a = np.array(['eggs'], dtype=np.unicode) a[0] = '\0\0\0\0' assert_(not a) def test_null_inside_ustring_array_is_truthy(self): a = np.array(['eggs'], dtype=np.unicode) a[0] = ' \0 \0' assert_(a) class TestCTypes(object): def test_ctypes_is_available(self): test_arr = np.array([[1, 2, 3], [4, 5, 6]]) assert_equal(ctypes, test_arr.ctypes._ctypes) assert_equal(tuple(test_arr.ctypes.shape), (2, 3)) def test_ctypes_is_not_available(self): from numpy.core import _internal _internal.ctypes = None try: test_arr = np.array([[1, 2, 3], [4, 5, 6]]) assert_(isinstance(test_arr.ctypes._ctypes, _internal._missing_ctypes)) assert_equal(tuple(test_arr.ctypes.shape), (2, 3)) finally: _internal.ctypes = ctypes def test_orderconverter_with_nonASCII_unicode_ordering(): # gh-7475 a = np.arange(5) assert_raises(ValueError, a.flatten, order=u'\xe2') def test_equal_override(): # gh-9153: ndarray.__eq__ uses special logic for structured arrays, which # did not respect overrides with __array_priority__ or __array_ufunc__. # The PR fixed this for __array_priority__ and __array_ufunc__ = None. class MyAlwaysEqual(object): def __eq__(self, other): return "eq" def __ne__(self, other): return "ne" class MyAlwaysEqualOld(MyAlwaysEqual): __array_priority__ = 10000 class MyAlwaysEqualNew(MyAlwaysEqual): __array_ufunc__ = None array = np.array([(0, 1), (2, 3)], dtype='i4,i4') for my_always_equal_cls in MyAlwaysEqualOld, MyAlwaysEqualNew: my_always_equal = my_always_equal_cls() assert_equal(my_always_equal == array, 'eq') assert_equal(array == my_always_equal, 'eq') assert_equal(my_always_equal != array, 'ne') assert_equal(array != my_always_equal, 'ne') def test_npymath_complex(): # Smoketest npymath functions from numpy.core.multiarray_tests import ( npy_cabs, npy_carg) funcs = {npy_cabs: np.absolute, npy_carg: np.angle} vals = (1, np.inf, -np.inf, np.nan) types = (np.complex64, np.complex128, np.clongdouble) for fun, npfun in funcs.items(): for x, y in itertools.product(vals, vals): for t in types: z = t(complex(x, y)) got = fun(z) expected = npfun(z) assert_allclose(got, expected) def test_npymath_real(): # Smoketest npymath functions from numpy.core.multiarray_tests import ( npy_log10, npy_cosh, npy_sinh, npy_tan, npy_tanh) funcs = {npy_log10: np.log10, npy_cosh: np.cosh, npy_sinh: np.sinh, npy_tan: np.tan, npy_tanh: np.tanh} vals = (1, np.inf, -np.inf, np.nan) types = (np.float32, np.float64, np.longdouble) with np.errstate(all='ignore'): for fun, npfun in funcs.items(): for x, t in itertools.product(vals, types): z = t(x) got = fun(z) expected = npfun(z) assert_allclose(got, expected) if __name__ == "__main__": run_module_suite()
bsd-3-clause
3,085,100,826,705,004,500
36.366581
588
0.50084
false
bilke/OpenSG-1.8
SConsLocal/scons-local-0.96.1/SCons/Platform/irix.py
2
1555
"""SCons.Platform.irix Platform-specific initialization for SGI IRIX systems. There normally shouldn't be any need to import this module directly. It will usually be imported through the generic SCons.Platform.Platform() selection method. """ # # Copyright (c) 2001, 2002, 2003, 2004 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "/home/scons/scons/branch.0/baseline/src/engine/SCons/Platform/irix.py 0.96.1.D001 2004/08/23 09:55:29 knight" import posix def generate(env): posix.generate(env)
lgpl-2.1
-25,476,674,456,146,590
39.921053
125
0.771704
false
nash-x/hws
neutron/plugins/l2_proxy/agent/clients.py
1
9765
# Copyright 2014, Huawei, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools from oslo.config import cfg from neutron import context as n_context from neutron.openstack.common import importutils from neutron.openstack.common import log as logging from neutron.openstack.common import excutils logger = logging.getLogger(__name__) from neutron.plugins.l2_proxy.agent import neutron_proxy_context from neutron.plugins.l2_proxy.agent import neutron_keystoneclient as hkc from novaclient import client as novaclient from novaclient import shell as novashell from neutronclient.common import exceptions try: from swiftclient import client as swiftclient except ImportError: swiftclient = None logger.info('swiftclient not available') try: from neutronclient.v2_0 import client as neutronclient except ImportError: neutronclient = None logger.info('neutronclient not available') try: from cinderclient import client as cinderclient except ImportError: cinderclient = None logger.info('cinderclient not available') try: from ceilometerclient.v2 import client as ceilometerclient except ImportError: ceilometerclient = None logger.info('ceilometerclient not available') cloud_opts = [ cfg.StrOpt('cloud_backend', default=None, help="Cloud module to use as a backend. Defaults to OpenStack.") ] cfg.CONF.register_opts(cloud_opts) CASCADING = 'cascading' CASCADED = 'cascaded' class OpenStackClients(object): ''' Convenience class to create and cache client instances. ''' def __init__(self, context): self.context = context self._nova = {} self._keystone = None self._swift = None self._neutron = None self._cinder = None self._ceilometer = None @property def auth_token(self): # if there is no auth token in the context # attempt to get one using the context username and password return self.context.auth_token or self.keystone().auth_token def keystone(self): if self._keystone: return self._keystone self._keystone = hkc.KeystoneClient(self.context) return self._keystone def url_for(self, **kwargs): return self.keystone().url_for(**kwargs) def nova(self, service_type='compute'): if service_type in self._nova: return self._nova[service_type] con = self.context if self.auth_token is None: logger.error("Nova connection failed, no auth_token!") return None computeshell = novashell.OpenStackComputeShell() extensions = computeshell._discover_extensions("1.1") args = { 'project_id': con.tenant_id, 'auth_url': con.auth_url, 'service_type': service_type, 'username': None, 'api_key': None, 'extensions': extensions } client = novaclient.Client(1.1, **args) management_url = self.url_for( service_type=service_type, attr='region', filter_value='RegionTwo') client.client.auth_token = self.auth_token client.client.management_url = management_url self._nova[service_type] = client return client def swift(self): if swiftclient is None: return None if self._swift: return self._swift con = self.context if self.auth_token is None: logger.error("Swift connection failed, no auth_token!") return None args = { 'auth_version': '2.0', 'tenant_name': con.tenant_id, 'user': con.username, 'key': None, 'authurl': None, 'preauthtoken': self.auth_token, 'preauthurl': self.url_for(service_type='object-store') } self._swift = swiftclient.Connection(**args) return self._swift def neutron(self): if neutronclient is None: return None if self._neutron: return self._neutron con = self.context if self.auth_token is None: logger.error("Neutron connection failed, no auth_token!") return None if self.context.region_name is None: management_url = self.url_for(service_type='network', endpoint_type='publicURL') else: management_url = self.url_for( service_type='network', attr='region', endpoint_type='publicURL', filter_value=self.context.region_name) args = { 'auth_url': con.auth_url, 'insecure': self.context.insecure, 'service_type': 'network', 'token': self.auth_token, 'endpoint_url': management_url } self._neutron = neutronclient.Client(**args) return self._neutron def cinder(self): if cinderclient is None: return self.nova('volume') if self._cinder: return self._cinder con = self.context if self.auth_token is None: logger.error("Cinder connection failed, no auth_token!") return None args = { 'service_type': 'volume', 'auth_url': con.auth_url, 'project_id': con.tenant_id, 'username': None, 'api_key': None } self._cinder = cinderclient.Client('1', **args) management_url = self.url_for(service_type='volume') self._cinder.client.auth_token = self.auth_token self._cinder.client.management_url = management_url return self._cinder def ceilometer(self): if ceilometerclient is None: return None if self._ceilometer: return self._ceilometer if self.auth_token is None: logger.error("Ceilometer connection failed, no auth_token!") return None con = self.context args = { 'auth_url': con.auth_url, 'service_type': 'metering', 'project_id': con.tenant_id, 'token': lambda: self.auth_token, 'endpoint': self.url_for(service_type='metering'), } client = ceilometerclient.Client(**args) self._ceilometer = client return self._ceilometer if cfg.CONF.cloud_backend: cloud_backend_module = importutils.import_module(cfg.CONF.cloud_backend) Clients = cloud_backend_module.Clients else: Clients = OpenStackClients logger.debug('Using backend %s' % Clients) def get_cascade_neutron_client(mode): if mode == CASCADING: region_name = cfg.CONF.AGENT.region_name elif mode == CASCADED: region_name = cfg.CONF.AGENT.neutron_region_name else: logger.error(_('Must be input mode(cascading or cascaded).')) raise context = n_context.get_admin_context_without_session() neutron_admin_auth_url = cfg.CONF.AGENT.neutron_admin_auth_url kwargs = {'auth_token': None, 'username': cfg.CONF.AGENT.neutron_admin_user, 'password': cfg.CONF.AGENT.admin_password, 'aws_creds': None, 'tenant': cfg.CONF.AGENT.neutron_admin_tenant_name, 'auth_url': neutron_admin_auth_url, 'insecure': cfg.CONF.AGENT.auth_insecure, 'roles': context.roles, 'is_admin': context.is_admin, 'region_name': region_name} reqCon = neutron_proxy_context.RequestContext(**kwargs) openStackClients = OpenStackClients(reqCon) neutronClient = openStackClients.neutron() return neutronClient def check_neutron_client_valid(function): @functools.wraps(function) def decorated_function(self, method_name, *args, **kwargs): retry = 0 while(True): try: return function(self, method_name, *args, **kwargs) except exceptions.Unauthorized: retry = retry + 1 if(retry <= 3): self.client = get_cascade_neutron_client(self.mode) continue else: with excutils.save_and_reraise_exception(): logger.error(_('Try 3 times, Unauthorized.')) return None return decorated_function class CascadeNeutronClient(object): def __init__(self, mode): #mode is cascading or cascaded self.mode = mode self.client = get_cascade_neutron_client(self.mode) @check_neutron_client_valid def __call__(self, method_name, *args, **kwargs): method = getattr(self.client, method_name) if method: return method(*args, **kwargs) else: raise Exception('can not find the method') @check_neutron_client_valid def execute(self, method_name, *args, **kwargs): method = getattr(self.client, method_name) if method: return method(*args, **kwargs) else: raise Exception('can not find the method')
apache-2.0
659,710,628,943,577,700
30.704545
79
0.603277
false
sibskull/synaptiks
synaptiks/monitors/mouses.py
1
8959
# -*- coding: utf-8 -*- # Copyright (c) 2011, Sebastian Wiesner <[email protected]> # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. """ synaptiks.monitors.mouses ========================= Implementation of mouse monitoring. .. moduleauthor:: Sebastian Wiesner <[email protected]> """ from __future__ import (print_function, division, unicode_literals, absolute_import) from collections import namedtuple from itertools import ifilter import pyudev from pyudev.pyqt4 import QUDevMonitorObserver from PyQt4.QtCore import QObject, pyqtSignal from synaptiks.monitors.power import create_resume_monitor __all__ = ['MouseDevicesManager', 'MouseDevicesMonitor', 'MouseDevice'] def _is_mouse(device): return (device.sys_name.startswith('event') and device.get('ID_INPUT_MOUSE') == '1' and not device.get('ID_INPUT_TOUCHPAD') == '1') class MouseDevice(namedtuple('_MouseDevice', ['serial', 'name'])): """ A :func:`~collections.namedtuple()` representing a mouse device. A mouse device currently has two attributes, the order corresponds to the tuple index: - :attr:`serial` - :attr:`name` """ @classmethod def from_udev(cls, device): """ Create a :class:`MouseDevice` tuple from a :class:`pyudev.Device`. """ # The name is available from the parent device of the actual event # device. The parent represents the actual physical device. The name # may be decorated with quotation marks, which are removed for the sake # of a clean represenation return cls(device['ID_SERIAL'], device.parent['NAME'].strip('"')) class MouseDevicesMonitor(QObject): """ Watch for plugged or unplugged mouse devices. """ #: Qt signal, which is emitted, when a mouse is plugged. The slot gets a #: single argument of :class:`MouseDevice`, which represents the plugged #: mouse device mousePlugged = pyqtSignal(MouseDevice) #: Qt signal, which is emitted, when a mouse is unplugged. The slot gets a #: single argument of type :class:`MouseDevice`, which represents the #: unplugged mouse device mouseUnplugged = pyqtSignal(MouseDevice) def __init__(self, parent=None): """ Create a new monitor. ``parent`` is the parent :class:`~PyQt4.QtCore.QObject`. """ QObject.__init__(self, parent) self._udev = pyudev.Context() self._notifier = QUDevMonitorObserver( pyudev.Monitor.from_netlink(self._udev), self) self._notifier.deviceEvent.connect(self._handle_udev_event) self._notifier.monitor.filter_by('input') self._notifier.monitor.start() self._event_signal_map = dict( add=self.mousePlugged, remove=self.mouseUnplugged) @property def plugged_devices(self): """ An iterator over all currently plugged mouse devices as :class:`MouseDevice` objects. """ devices = self._udev.list_devices( subsystem='input', ID_INPUT_MOUSE=True) for device in ifilter(_is_mouse, devices): yield MouseDevice.from_udev(device) def _handle_udev_event(self, evt, device): signal = self._event_signal_map.get(unicode(evt)) if signal and _is_mouse(device): signal.emit(MouseDevice.from_udev(device)) class MouseDevicesManager(MouseDevicesMonitor): """ Manage mouse devices. This class derives from :class:`MouseDevicesMonitor` to provide more advanced monitoring of mouse devices. In addition to the basic monitoring provided by :class:`MouseDevicesMonitor` this class keeps a record of currently plugged devices, and thus also informs about the *first* mouse plugged, and the *last* mouse unplugged. """ #: Qt signal, which is emitted if the first mouse is plugged. The slot : #: gets a single argument, which is the plugged :class:`MouseDevice`. firstMousePlugged = pyqtSignal(MouseDevice) #: Qt signal, which is emitted if the last mouse is unplugged. The slot : #: gets a single argument, which is the plugged :class:`MouseDevice`. lastMouseUnplugged = pyqtSignal(MouseDevice) def __init__(self, parent=None): """ Create a new manager. ``parent`` is the parent ``QObject``. """ MouseDevicesMonitor.__init__(self, parent) self._resume_monitor = create_resume_monitor(self) self._mouse_registry = set() self._ignored_mouses = frozenset() self.is_running = False def start(self): """ Start to observe mouse devices. Does nothing, if the manager is already running. """ if not self.is_running: self.mousePlugged.connect(self._register_mouse) self.mouseUnplugged.connect(self._unregister_mouse) if self._resume_monitor: self._resume_monitor.resuming.connect(self._reset_registry) self._reset_registry() self.is_running = True def stop(self): """ Stop to observe mouse devices. Does nothing, if the manager is not running. """ if self.is_running: self.mousePlugged.disconnect(self._register_mouse) self.mouseUnplugged.disconnect(self._unregister_mouse) if self._resume_monitor: self._resume_monitor.resuming.disconnect(self._reset_registry) self._clear_registry() self.is_running = False def _unregister_mouse(self, device): """ Unregister the given mouse ``device``. If this is the last plugged mouse, :attr:`lastMouseUnplugged` is emitted with the given ``device``. """ try: self._mouse_registry.remove(device) except KeyError: pass else: if not self._mouse_registry: self.lastMouseUnplugged.emit(device) def _register_mouse(self, device): """ Register the given mouse ``device``. If this is the first plugged mouse, :attr:`firstMousePlugged` is emitted with the given ``device``. """ if device.serial not in self._ignored_mouses: if not self._mouse_registry: self.firstMousePlugged.emit(device) self._mouse_registry.add(device) def _reset_registry(self): """ Re-register all plugged mouses. """ self._clear_registry() for device in self.plugged_devices: self._register_mouse(device) def _clear_registry(self): """ Clear the registry of plugged mouse devices. """ for device in list(self._mouse_registry): self._unregister_mouse(device) @property def ignored_mouses(self): """ The list of ignored mouses. This property holds a list of serial numbers. Mouse devices with these serial numbers are simply ignored when plugged or unplugged. Modifying the returned list in place does not have any effect, assign to this property to change the list of ignored devices. You may also assign a list of :class:`~synaptiks.monitors.MouseDevice` objects. """ return list(self._ignored_mouses) @ignored_mouses.setter def ignored_mouses(self, devices): devices = set(d if isinstance(d, basestring) else d.serial for d in devices) if self._ignored_mouses != devices: self._ignored_mouses = devices if self.is_running: self._reset_registry()
bsd-2-clause
-3,596,675,670,361,718,300
35.717213
79
0.650854
false
snoopycrimecop/openmicroscopy
components/tools/OmeroPy/test/integration/gatewaytest/test_get_objects.py
1
45419
#!/usr/bin/env python # -*- coding: utf-8 -*- """ gateway tests - Testing the gateway.getObject() and deleteObjects() methods Copyright 2013-2015 Glencoe Software, Inc. All rights reserved. Use is subject to license terms supplied in LICENSE.txt pytest fixtures used as defined in conftest.py: - gatewaywrapper - author_testimg_generated - author_testimg_tiny """ from builtins import str from builtins import range from builtins import object import omero import uuid import pytest from omero.gateway.scripts import dbhelpers from omero.rtypes import wrap, rlong from omero.testlib import ITest from omero.gateway import BlitzGateway, KNOWN_WRAPPERS, DatasetWrapper, \ ProjectWrapper, ImageWrapper, ScreenWrapper, PlateWrapper from omero.model import DatasetI, \ ImageI, \ PlateI, \ ScreenI, \ WellI, \ WellSampleI try: int except Exception: # Python 3 long = int class TestDeleteObject (object): def testDeleteAnnotation(self, author_testimg_generated): image = author_testimg_generated gateway = image._conn # create Tag on Image and try to delete Tag tag = omero.gateway.TagAnnotationWrapper(gateway) ns_tag = "omero.gateway.test.get_objects.test_delete_annotation_tag" tag.setNs(ns_tag) tag.setValue("Test Delete Tag") tag = image.linkAnnotation(tag) tagId = tag.getId() handle = gateway.deleteObjects("Annotation", [tagId]) gateway._waitOnCmd(handle) assert gateway.getObject("Annotation", tagId) is None def testDeleteImage(self, gatewaywrapper, author_testimg_generated): image = author_testimg_generated imageId = image.getId() project = gatewaywrapper.getTestProject() projectId = project.getId() ns = "omero.gateway.test.get_objects.test_delete_image_comment" ns_tag = "omero.gateway.test.get_objects.test_delete_image_tag" # create Comment ann = omero.gateway.CommentAnnotationWrapper(gatewaywrapper.gateway) ann.setNs(ns) ann.setValue("Test Comment") ann = image.linkAnnotation(ann) # create Tag tag = omero.gateway.TagAnnotationWrapper(gatewaywrapper.gateway) tag.setNs(ns_tag) tag.setValue("Test Tag") tag = image.linkAnnotation(tag) # check the Comment assert gatewaywrapper.gateway.getObject( "Annotation", ann.id) is not None assert gatewaywrapper.gateway.getObject( "Annotation", tag.id) is not None # check Image, delete (wait) and check assert gatewaywrapper.gateway.getObject("Image", imageId) is not None handle = gatewaywrapper.gateway.deleteObjects("Image", [imageId]) gatewaywrapper.gateway._waitOnCmd(handle) assert gatewaywrapper.gateway.getObject("Image", imageId) is None # Comment should be deleted but not the Tag (becomes orphan) assert gatewaywrapper.gateway.getObject("Annotation", ann.id) is None assert gatewaywrapper.gateway.getObject( "Annotation", tag.id) is not None # Add the tag to project and delete (with Tags) assert gatewaywrapper.gateway.getObject( "Project", projectId) is not None project.linkAnnotation(tag) datasetIds = [d.getId() for d in project.listChildren()] assert len(datasetIds) > 0 handle = gatewaywrapper.gateway.deleteObjects( "Project", [projectId], deleteAnns=True, deleteChildren=True) gatewaywrapper.gateway._waitOnCmd(handle) assert gatewaywrapper.gateway.getObject("Project", projectId) is None assert gatewaywrapper.gateway.getObject("Annotation", tag.id) is None # Tag should be gone # check datasets gone too for dId in datasetIds: assert gatewaywrapper.gateway.getObject("Dataset", dId) is None class TestFindObject (object): def testIllegalObjTypeInt(self, gatewaywrapper): gatewaywrapper.loginAsAuthor() with pytest.raises(AttributeError): gatewaywrapper.gateway.getObject(1, int(1)) def testObjTypeUnicode(self, gatewaywrapper): gatewaywrapper.loginAsAuthor() a = gatewaywrapper.getTestProject() b = gatewaywrapper.gateway.getObject(u'Project', a.getId()) assert a.getId() == b.getId() def testObjTypeString(self, gatewaywrapper): gatewaywrapper.loginAsAuthor() a = gatewaywrapper.getTestProject() b = gatewaywrapper.gateway.getObject('Project', a.getId()) assert a.getId() == b.getId() def testFindProject(self, gatewaywrapper): gatewaywrapper.loginAsAuthor() project = gatewaywrapper.getTestProject() pName = project.getName() findProjects = list(gatewaywrapper.gateway.getObjects( "Project", None, attributes={"name": pName})) assert len(findProjects) > 0, "Did not find Project by name" for p in findProjects: assert p.getName() == pName, \ "All projects should have queried name" def testFindExperimenter(self, gatewaywrapper, author_testimg_tiny): omeName = author_testimg_tiny.getOwnerOmeName() group = author_testimg_tiny.getDetails().getGroup() groupName = group.getName() gatewaywrapper.loginAsAdmin() # findObjects findAuthor = list(gatewaywrapper.gateway.getObjects( "Experimenter", None, attributes={"omeName": omeName})) assert len(findAuthor) == 1, "Did not find Experimenter by omeName" assert findAuthor[0].omeName == omeName # findObject author = gatewaywrapper.gateway.getObject( "Experimenter", None, attributes={"omeName": omeName}) assert author is not None assert author.omeName == omeName # find group grp = gatewaywrapper.gateway.getObject( "ExperimenterGroup", None, attributes={"name": groupName}) assert grp is not None assert grp.getName() == groupName def testFindAnnotation(self, gatewaywrapper): gatewaywrapper.loginAsAuthor() # start by deleting any tag created by this method that may have been # left behind tag_value = "FindThisTag" find_ns = "omero.gateway.test.test_find_annotations" find_tag = gatewaywrapper.gateway.getObjects( "Annotation", attributes={"textValue": tag_value, "ns": find_ns}) ids = [t._obj.id.val for t in find_tag] if ids: gatewaywrapper.gateway.deleteObjects("Annotation", ids, wait=True) # create Tag tag = omero.gateway.TagAnnotationWrapper(gatewaywrapper.gateway) tag.setNs(find_ns) tag.setValue(tag_value) tag.save() tagId = tag.getId() # findObject by name find_tag = gatewaywrapper.gateway.getObject( "Annotation", attributes={"textValue": tag_value}) assert find_tag is not None assert find_tag.getValue() == tag_value # find by namespace find_tag = gatewaywrapper.gateway.getObject( "Annotation", attributes={"ns": find_ns}) assert find_tag is not None assert find_tag.getNs() == find_ns # find by text value find_tag = gatewaywrapper.gateway.getObject( "TagAnnotation", attributes={"textValue": tag_value}) assert find_tag is not None assert find_tag.getValue() == tag_value # create some other annotations... (not linked!) longAnn = omero.gateway.LongAnnotationWrapper(gatewaywrapper.gateway) longAnn.setValue(12345) longAnn.save() longId = longAnn.getId() boolAnn = omero.gateway.BooleanAnnotationWrapper( gatewaywrapper.gateway) boolAnn.setValue(True) boolAnn.save() boolId = boolAnn.getId() commAnn = omero.gateway.CommentAnnotationWrapper( gatewaywrapper.gateway) commAnn.setValue("This is a blitz gatewaytest Comment.") commAnn.save() commId = commAnn.getId() fileAnn = omero.gateway.FileAnnotationWrapper(gatewaywrapper.gateway) # An original file object needs to be linked to the annotation or it # will fail to be loaded on getObject(s). fileObj = omero.model.OriginalFileI() fileObj = omero.gateway.OriginalFileWrapper( gatewaywrapper.gateway, fileObj) fileObj.setName(omero.rtypes.rstring('a')) fileObj.setPath(omero.rtypes.rstring('a')) fileObj.setHash(omero.rtypes.rstring('a')) fileObj.setSize(omero.rtypes.rlong(0)) fileObj.save() fileAnn.setFile(fileObj) fileAnn.save() fileId = fileAnn.getId() doubleAnn = omero.gateway.DoubleAnnotationWrapper( gatewaywrapper.gateway) doubleAnn.setValue(1.23456) doubleAnn.save() doubleId = doubleAnn.getId() termAnn = omero.gateway.TermAnnotationWrapper(gatewaywrapper.gateway) termAnn.setValue("Metaphase") termAnn.save() termId = termAnn.getId() timeAnn = omero.gateway.TimestampAnnotationWrapper( gatewaywrapper.gateway) timeAnn.setValue(1000) timeAnn.save() timeId = timeAnn.getId() # list annotations of various types - check they include ones from # above tags = list(gatewaywrapper.gateway.getObjects("TagAnnotation")) for t in tags: assert t.OMERO_TYPE == tag.OMERO_TYPE assert tagId in [t.getId() for t in tags] longs = list(gatewaywrapper.gateway.getObjects("LongAnnotation")) for lng in longs: assert lng.OMERO_TYPE == longAnn.OMERO_TYPE assert longId in [lng.getId() for lng in longs] bools = list(gatewaywrapper.gateway.getObjects("BooleanAnnotation")) for b in bools: assert b.OMERO_TYPE == boolAnn.OMERO_TYPE assert boolId in [b.getId() for b in bools] comms = list(gatewaywrapper.gateway.getObjects("CommentAnnotation")) for c in comms: assert c.OMERO_TYPE == commAnn.OMERO_TYPE assert commId in [c.getId() for c in comms] files = list(gatewaywrapper.gateway.getObjects("FileAnnotation")) for f in files: assert f.OMERO_TYPE == fileAnn.OMERO_TYPE assert fileId in [f.getId() for f in files] doubles = list(gatewaywrapper.gateway.getObjects("DoubleAnnotation")) for d in doubles: assert d.OMERO_TYPE == doubleAnn.OMERO_TYPE assert doubleId in [d.getId() for d in doubles] terms = list(gatewaywrapper.gateway.getObjects("TermAnnotation")) for t in terms: assert t.OMERO_TYPE == termAnn.OMERO_TYPE assert termId in [t.getId() for t in terms] times = list(gatewaywrapper.gateway.getObjects("TimestampAnnotation")) for t in times: assert t.OMERO_TYPE == timeAnn.OMERO_TYPE assert timeId in [t.getId() for t in times] # delete what we created gatewaywrapper.gateway.deleteObjects( "Annotation", [longId, boolId, fileId, commId, tagId], wait=True) assert gatewaywrapper.gateway.getObject("Annotation", longId) is None assert gatewaywrapper.gateway.getObject("Annotation", boolId) is None assert gatewaywrapper.gateway.getObject("Annotation", fileId) is None assert gatewaywrapper.gateway.getObject("Annotation", commId) is None assert gatewaywrapper.gateway.getObject("Annotation", tagId) is None class TestGetObject (ITest): def testSearchObjects(self, gatewaywrapper): gatewaywrapper.loginAsAuthor() # search for Projects pros = list(gatewaywrapper.gateway.searchObjects( ["Project"], "weblitz")) for p in pros: # assert p.getId() in projectIds assert p.OMERO_CLASS == "Project", "Should only return Projects" # P/D/I is default objects to search # pdis = list( gatewaywrapper.gateway.simpleSearch("weblitz") ) # # method removed from blitz gateway # pdis.sort(key=lambda r: "%s%s"%(r.OMERO_CLASS, r.getId()) ) pdiResult = list(gatewaywrapper.gateway.searchObjects( None, "weblitz")) pdiResult.sort(key=lambda r: "%s%s" % (r.OMERO_CLASS, r.getId())) # can directly check that sorted lists are the same # for r1, r2 in zip(pdis, pdiResult): # assert r1.OMERO_CLASS == r2.OMERO_CLASS # assert r1.getId() == r2.getId() def testListProjects(self, gatewaywrapper): gatewaywrapper.loginAsAuthor() # params limit query by owner params = omero.sys.Parameters() params.theFilter = omero.sys.Filter() conn = gatewaywrapper.gateway # should be no Projects owned by root (in the current group) params.theFilter.ownerId = omero.rtypes.rlong(0) # owned by 'root' pros = conn.getObjects("Project", None, params) assert len(list(pros)) == 0, "Should be no Projects owned by root" # Also filter by owner using opts dict pros = conn.getObjects("Project", None, opts={'owner': 0}) assert len(list(pros)) == 0, "Should be no Projects owned by root" # filter by current user should get same as above. # owned by 'author' params.theFilter.ownerId = omero.rtypes.rlong( conn.getEventContext().userId) pros = list(conn.getObjects( "Project", None, params)) projects = list(conn.listProjects()) # check unordered lists are the same length & ids assert len(pros) == len(projects) projectIds = [p.getId() for p in projects] for p in pros: assert p.getId() in projectIds def testPagination(self, gatewaywrapper): gatewaywrapper.loginAsAuthor() params = omero.sys.ParametersI() # Only 3 images available limit = 2 params.page(0, limit) pros = list(gatewaywrapper.gateway.getObjects( "Project", None, params)) assert len(pros) == limit # Also using opts dict pros = list(gatewaywrapper.gateway.getObjects( "Project", None, opts={'offset': 0, 'limit': 2})) assert len(pros) == limit def testGetDatasetsByProject(self, gatewaywrapper): gatewaywrapper.loginAsAuthor() allDs = list(gatewaywrapper.gateway.getObjects("Dataset")) # Get Datasets by project.listChildren()... project = gatewaywrapper.getTestProject() dsIds = [d.id for d in project.listChildren()] # Get Datasets, filtering by project p = {'project': project.id} datasets = list(gatewaywrapper.gateway.getObjects("Dataset", opts=p)) # Check that not all Datasets are in Project (or test is invalid) assert len(allDs) > len(dsIds) # Should get same result both methods assert len(datasets) == len(dsIds) for d in datasets: assert d.id in dsIds @pytest.mark.parametrize("load_gem", [True, False]) def testListExperimentersAndGroups(self, gatewaywrapper, load_gem): gatewaywrapper.loginAsAuthor() conn = gatewaywrapper.gateway # experimenters - load_experimentergroups True by default opts = {'limit': 10} if not load_gem: opts['load_experimentergroups'] = False exps = conn.getObjects("Experimenter", opts=opts) for e in exps: # check iQuery has loaded at least one group assert e._obj.groupExperimenterMapLoaded == load_gem e.copyGroupExperimenterMap() # groups. load_experimenters True by default opts = {'limit': 10} if not load_gem: opts['load_experimenters'] = False gps = conn.getObjects("ExperimenterGroup", opts=opts) for grp in gps: assert grp._obj.groupExperimenterMapLoaded == load_gem grp.copyGroupExperimenterMap() def testListColleagues(self, gatewaywrapper): gatewaywrapper.loginAsAuthor() conn = gatewaywrapper.gateway # uses gateway.getObjects("ExperimenterGroup") - check this doesn't # throw colleagues = conn.listColleagues() for e in colleagues: e.getOmeName() def testFindExperimenterWithGroups(self, gatewaywrapper): gatewaywrapper.loginAsAuthor() conn = gatewaywrapper.gateway # check we can find some groups exp = conn.getObject( "Experimenter", attributes={'omeName': gatewaywrapper.USER.name}) for groupExpMap in exp.copyGroupExperimenterMap(): gName = groupExpMap.parent.name.val gId = groupExpMap.parent.id.val findG = gatewaywrapper.gateway.getObject( "ExperimenterGroup", attributes={'name': gName}) assert gId == findG.id, "Check we found the same group" @pytest.mark.parametrize("load", [True, False]) def testGetExperimentersByGroup(self, gatewaywrapper, load): """ Filter Groups by Experimenters and vice versa. We test with and without loading experimenters/groups to check that the query is built correctly in both cases """ gatewaywrapper.loginAsAdmin() conn = gatewaywrapper.gateway # Two users in the same group... client, exp1 = self.new_client_and_user() grp1_id = client.sf.getAdminService().getEventContext().groupId exp2 = self.new_user(group=grp1_id) # Another group with one user grp2 = self.new_group(experimenters=[exp1]) # get Groups by Experimenters (in 1 or 2 groups + user group) groups = list(conn.getObjects("ExperimenterGroup", opts={ "experimenter": exp2.id.val, 'load_experimenters': load})) assert len(groups) == 2 assert grp1_id in [g.id for g in groups] groups = list(conn.getObjects("ExperimenterGroup", opts={ "experimenter": exp1.id.val, 'load_experimenters': load})) assert len(groups) == 3 # get Experimenters by Group (returns 1 or 2 exps) exps = list(conn.getObjects("Experimenter", opts={ "experimentergroup": grp2.id.val, "load_experimentergroups": load})) assert len(exps) == 1 assert exps[0].id == exp1.id.val exps = list(conn.getObjects("Experimenter", opts={ "experimentergroup": grp1_id, "load_experimentergroups": load})) assert len(exps) == 2 def testGetExperimenter(self, gatewaywrapper): gatewaywrapper.loginAsAuthor() noExp = gatewaywrapper.gateway.getObject( "Experimenter", attributes={'omeName': "Dummy Fake Name"}) assert noExp is None, "Should not find any matching experimenter" findExp = gatewaywrapper.gateway.getObject( "Experimenter", attributes={'omeName': gatewaywrapper.USER.name}) exp = gatewaywrapper.gateway.getObject( "Experimenter", findExp.id) assert exp.omeName == findExp.omeName # check groupExperimenterMap loaded for exp groupIds = [] for groupExpMap in exp.copyGroupExperimenterMap(): assert findExp.id == groupExpMap.child.id.val groupIds.append(groupExpMap.parent.id.val) # for groupExpMap in experimenter.copyGroupExperimenterMap(): # assert findExp.id == groupExpMap.child.id.val groupGen = gatewaywrapper.gateway.getObjects( "ExperimenterGroup", groupIds, opts={'load_experimenters': True}) groups = list(groupGen) assert len(groups) == len(groupIds) for g in groups: assert g.getId() in groupIds for m in g.copyGroupExperimenterMap(): # check exps are loaded assert m.child def testGetAnnotations(self, gatewaywrapper, author_testimg_tiny): obj = author_testimg_tiny dataset = gatewaywrapper.getTestDataset() ns = "omero.gateway.test.get_objects.test_get_annotations_comment" ns_tag = "omero.gateway.test.get_objects.test_get_annotations_tag" # create Comment ann = omero.gateway.CommentAnnotationWrapper(gatewaywrapper.gateway) ann.setNs(ns) ann.setValue("Test Comment") ann = obj.linkAnnotation(ann) # create Tag tag = omero.gateway.TagAnnotationWrapper(gatewaywrapper.gateway) tag.setNs(ns_tag) tag.setValue("Test Tag") tag = obj.linkAnnotation(tag) dataset.linkAnnotation(tag) # get the Comment annotation = gatewaywrapper.gateway.getObject( "CommentAnnotation", ann.id) assert "Test Comment" == annotation.textValue assert ann.OMERO_TYPE == annotation.OMERO_TYPE # test getObject throws exception if more than 1 returned threw = True try: gatewaywrapper.gateway.getObject("Annotation") threw = False except Exception: threw = True assert threw, "getObject() didn't throw exception with >1 result" # get the Comment and Tag annGen = gatewaywrapper.gateway.getObjects( "Annotation", [ann.id, tag.id]) anns = list(annGen) assert len(anns) == 2 assert anns[0].ns in [ns, ns_tag] assert anns[1].ns in [ns, ns_tag] assert anns[0].OMERO_TYPE != anns[1].OMERO_TYPE # get all available annotation links on the image annLinks = gatewaywrapper.gateway.getAnnotationLinks("Image") for al in annLinks: assert isinstance(al.getAnnotation(), omero.gateway.AnnotationWrapper) assert al.parent.__class__ == omero.model.ImageI # get selected links - On image only annLinks = gatewaywrapper.gateway.getAnnotationLinks( "Image", parent_ids=[obj.getId()]) for al in annLinks: assert obj.getId() == al.parent.id.val assert al.parent.__class__ == omero.model.ImageI # get selected links - On image only annLinks = gatewaywrapper.gateway.getAnnotationLinks( "Image", parent_ids=[obj.getId()]) for al in annLinks: assert obj.getId() == al.parent.id.val assert al.parent.__class__ == omero.model.ImageI # compare with getObjectsByAnnotations annImages = list(gatewaywrapper.gateway.getObjectsByAnnotations( 'Image', [tag.getId()])) assert obj.getId() in [i.getId() for i in annImages] # params limit query by owner params = omero.sys.Parameters() params.theFilter = omero.sys.Filter() # should be no links owned by root (in the current group) params.theFilter.ownerId = omero.rtypes.rlong(0) # owned by 'root' annLinks = gatewaywrapper.gateway.getAnnotationLinks( "Image", parent_ids=[obj.getId()], params=params) assert len(list(annLinks)) == 0, \ "No annotations on this image by root" # links owned by author eid = gatewaywrapper.gateway.getEventContext().userId params.theFilter.ownerId = omero.rtypes.rlong(eid) # owned by 'author' omeName = gatewaywrapper.gateway.getObject( "Experimenter", eid).getName() annLinks = gatewaywrapper.gateway.getAnnotationLinks( "Image", parent_ids=[obj.getId()], params=params) for al in annLinks: assert al.getOwnerOmeName() == omeName # all links on Image with specific ns annLinks = gatewaywrapper.gateway.getAnnotationLinks("Image", ns=ns) for al in annLinks: assert al.getAnnotation().ns == ns # get all uses of the Tag - have to check various types separately annList = list(gatewaywrapper.gateway.getAnnotationLinks( "Image", ann_ids=[tag.id])) assert len(annList) == 1 for al in annList: assert al.getAnnotation().id == tag.id annList = list(gatewaywrapper.gateway.getAnnotationLinks( "Dataset", ann_ids=[tag.id])) assert len(annList) == 1 for al in annList: assert al.getAnnotation().id == tag.id # remove annotations obj.removeAnnotations(ns) dataset.unlinkAnnotations(ns_tag) # unlink tag obj.removeAnnotations(ns_tag) # delete tag def testGetImage(self, gatewaywrapper, author_testimg_tiny): testImage = author_testimg_tiny # This should return image wrapper image = gatewaywrapper.gateway.getObject("Image", testImage.id) # test a few methods that involve lazy loading, rendering etc. assert image.getSizeZ() == testImage.getSizeZ() assert image.getSizeY() == testImage.getSizeY() image.isGreyscaleRenderingModel() # loads rendering engine testImage.isGreyscaleRenderingModel() assert image._re.getDefaultZ() == testImage._re.getDefaultZ() assert image._re.getDefaultT() == testImage._re.getDefaultT() assert image.getOwnerOmeName == testImage.getOwnerOmeName assert image.getThumbVersion() is not None @pytest.mark.parametrize("load_pixels", [True, False]) @pytest.mark.parametrize("load_channels", [True, False]) def testGetImageLoadPixels(self, load_pixels, load_channels, gatewaywrapper, author_testimg_tiny): testImage = author_testimg_tiny conn = gatewaywrapper.gateway # By default (no opts), don't load pixels image = conn.getObject("Image", testImage.id) assert not image._obj.isPixelsLoaded() # parametrized opts... opts = {'load_pixels': load_pixels, 'load_channels': load_channels} image = conn.getObject("Image", testImage.id, opts=opts) # pixels are also loaded if load_channels pix_loaded = load_pixels or load_channels assert image._obj.isPixelsLoaded() == pix_loaded if pix_loaded: pixels = image._obj._pixelsSeq[0] assert pixels.getPixelsType().isLoaded() if load_channels: assert pixels.isChannelsLoaded() for c in pixels.copyChannels(): lc = c.getLogicalChannel() assert lc.getPhotometricInterpretation().isLoaded() else: assert not pixels.isChannelsLoaded() def testGetProject(self, gatewaywrapper): gatewaywrapper.loginAsAuthor() testProj = gatewaywrapper.getTestProject() p = gatewaywrapper.gateway.getObject("Project", testProj.getId()) assert testProj.getName() == p.getName() assert testProj.getDescription() == p.getDescription() assert testProj.getId() == p.getId() assert testProj.OMERO_CLASS == p.OMERO_CLASS assert testProj.countChildren_cached() == p.countChildren_cached() assert testProj.getOwnerOmeName == p.getOwnerOmeName def testTraversal(self, author_testimg_tiny): image = author_testimg_tiny # This should return image wrapper pr = image.getProject() ds = image.getParent() assert image.listParents()[0] == image.getParent() assert ds == image.getParent(withlinks=True)[0] assert image.getParent(withlinks=True) == \ image.listParents(withlinks=True)[0] assert ds.getParent() == pr assert pr.getParent() is None assert len(pr.listParents()) == 0 @pytest.mark.parametrize("orphaned", [True, False]) @pytest.mark.parametrize("load_pixels", [False, False]) def testListOrphans(self, orphaned, load_pixels, gatewaywrapper): # We login as 'User', since they have no other orphaned images gatewaywrapper.loginAsUser() conn = gatewaywrapper.gateway eid = conn.getUserId() # Create 5 orphaned images iids = [] for i in range(0, 5): img = gatewaywrapper.createTestImage(imageName=str(uuid.uuid1())) iids.append(img.id) # Create image in Dataset, to check this isn't found dataset = DatasetI() dataset.name = wrap('testListOrphans') image = ImageI() image.name = wrap('testListOrphans') dataset.linkImage(image) dataset = conn.getUpdateService().saveAndReturnObject(dataset) try: # Only test listOrphans() if orphaned if orphaned: # Pagination params = omero.sys.ParametersI() params.page(1, 3) findImagesInPage = list(conn.listOrphans("Image", eid=eid, params=params)) assert len(findImagesInPage) == 3 # No pagination (all orphans) findImages = list(conn.listOrphans("Image", loadPixels=load_pixels)) assert len(findImages) == 5 for p in findImages: assert p._obj.pixelsLoaded == load_pixels # Test getObjects() with 'orphaned' option opts = {'orphaned': orphaned, 'load_pixels': load_pixels} getImages = list(conn.getObjects("Image", opts=opts)) assert orphaned == (len(getImages) == 5) for p in getImages: assert p._obj.pixelsLoaded == load_pixels # Simply check this doesn't fail See https://github.com/ # openmicroscopy/openmicroscopy/pull/4950#issuecomment-264142956 dsIds = [d.id for d in conn.listOrphans("Dataset")] assert dataset.id.val in dsIds finally: # Cleanup - Delete what we created conn.deleteObjects('Image', iids, deleteAnns=True, wait=True) conn.deleteObjects('Dataset', [dataset.id.val], deleteChildren=True, wait=True) def testOrderById(self, gatewaywrapper): gatewaywrapper.loginAsUser() imageIds = list() for i in range(0, 3): iid = gatewaywrapper.createTestImage( "%s-testOrderById" % i).getId() imageIds.append(iid) images = gatewaywrapper.gateway.getObjects( "Image", imageIds, respect_order=True) resultIds = [i.id for i in images] assert imageIds == resultIds, "Images not ordered by ID" imageIds.reverse() reverseImages = gatewaywrapper.gateway.getObjects( "Image", imageIds, respect_order=True) reverseIds = [i.id for i in reverseImages] assert imageIds == reverseIds, "Images not ordered by ID" wrappedIds = [rlong(i) for i in imageIds] reverseImages = gatewaywrapper.gateway.getObjects( "Image", wrappedIds, respect_order=True) reverseIds = [i.id for i in reverseImages] assert imageIds == reverseIds, "fails when IDs is list of rlongs" invalidIds = imageIds[:] invalidIds[1] = 0 reverseImages = gatewaywrapper.gateway.getObjects( "Image", invalidIds, respect_order=True) reverseIds = [i.id for i in reverseImages] assert len(imageIds) - 1 == len(reverseIds), \ "One image not found by ID: 0" # Delete to clean up handle = gatewaywrapper.gateway.deleteObjects( 'Image', imageIds, deleteAnns=True) try: gatewaywrapper.gateway._waitOnCmd(handle) finally: handle.close() @pytest.mark.parametrize("datatype", ['Image', 'Dataset', 'Project', 'Screen', 'Plate']) def testGetObjectsByMapAnnotations(self, datatype): client, exp = self.new_client_and_user() conn = BlitzGateway(client_obj=client) def createTarget(datatype, name, key="", value="", ns=None): """ Creates an object and attaches a map annotation to it """ if datatype == "Image": tgt = ImageWrapper(conn, omero.model.ImageI()) tgt.setName(name) tgt.save() if datatype == "Dataset": tgt = DatasetWrapper(conn, omero.model.DatasetI()) tgt.setName(name) tgt.save() if datatype == "Project": tgt = ProjectWrapper(conn, omero.model.ProjectI()) tgt.setName(name) tgt.save() if datatype == "Screen": tgt = ScreenWrapper(conn, omero.model.ScreenI()) tgt.setName(name) tgt.save() if datatype == "Plate": tgt = PlateWrapper(conn, omero.model.PlateI()) tgt.setName(name) tgt.save() for _ in range(0, 2): # Add two map annotations to check that each object # is still just returned once. map_ann = omero.gateway.MapAnnotationWrapper(conn) map_ann.setValue([(key, value)]) if ns: map_ann.setNs(ns) map_ann.save() tgt.linkAnnotation(map_ann) return tgt name = str(uuid.uuid4()) key = str(uuid.uuid4()) value = str(uuid.uuid4()) ns = str(uuid.uuid4()) kv = createTarget(datatype, name, key=key, value=value) v = createTarget(datatype, name, key=str(uuid.uuid4()), value=value) k = createTarget(datatype, name, key=key, value=str(uuid.uuid4())) kvn = createTarget(datatype, name, key=key, value=value, ns=ns) n = createTarget(datatype, name, key=str(uuid.uuid4()), value=str(uuid.uuid4()), ns=ns) # 3x key matches, 3x value matches, 2x key+value matches, # 2x ns matches, 1x key+value+ns matches # No match results = list(conn.getObjectsByMapAnnotations(datatype, key=str(uuid.uuid4()))) assert len(results) == 0 # Key match results = list(conn.getObjectsByMapAnnotations(datatype, key=key)) assert len(results) == 3 ids = [r.getId() for r in results] assert k.getId() in ids assert kv.getId() in ids assert kvn.getId() in ids # Key wildcard match wc = "*"+key[2:12]+"*" results = list(conn.getObjectsByMapAnnotations(datatype, key=wc)) assert len(results) == 3 ids = [r.getId() for r in results] assert k.getId() in ids assert kv.getId() in ids assert kvn.getId() in ids # Value match results = list(conn.getObjectsByMapAnnotations(datatype, value=value)) assert len(results) == 3 ids = [r.getId() for r in results] assert v.getId() in ids assert kv.getId() in ids assert kvn.getId() in ids # Key+Value match results = list(conn.getObjectsByMapAnnotations(datatype, key=key, value=value)) assert len(results) == 2 ids = [r.getId() for r in results] assert kv.getId() in ids assert kvn.getId() in ids # Key+Value wildcard match wc = "*"+value[2:12]+"*" results = list(conn.getObjectsByMapAnnotations(datatype, key=key, value=wc)) assert len(results) == 2 ids = [r.getId() for r in results] assert kv.getId() in ids assert kvn.getId() in ids # Key+Value wildcard doesn't match wc = value[2:12]+"*" results = list(conn.getObjectsByMapAnnotations(datatype, key=key, value=wc)) assert len(results) == 0 # NS match results = list(conn.getObjectsByMapAnnotations(datatype, ns=ns)) assert len(results) == 2 ids = [r.getId() for r in results] assert n.getId() in ids assert kvn.getId() in ids # Key+Value+NS match results = list(conn.getObjectsByMapAnnotations(datatype, key=key, value=value, ns=ns)) assert len(results) == 1 assert kvn.getId() == results[0].getId() # Test limit results = list(conn.getObjectsByMapAnnotations(datatype)) assert len(results) == 5 results = list(conn.getObjectsByMapAnnotations(datatype, opts={"limit": 4})) assert len(results) == 4 class TestLeaderAndMemberOfGroup(object): @pytest.fixture(autouse=True) def setUp(self): """ Create a group with owner & member""" dbhelpers.USERS['group_owner'] = dbhelpers.UserEntry( 'group_owner', 'ome', firstname='Group', lastname='Owner', groupname="ownership_test", groupperms='rwr---', groupowner=True) dbhelpers.USERS['group_member'] = dbhelpers.UserEntry( 'group_member', 'ome', firstname='Group', lastname='Member', groupname="ownership_test", groupperms='rwr---', groupowner=False) dbhelpers.bootstrap(onlyUsers=True) def testGetGroupsLeaderOfAsLeader(self, gatewaywrapper): gatewaywrapper.doLogin(dbhelpers.USERS['group_owner']) assert gatewaywrapper.gateway.isLeader() grs = [g.id for g in gatewaywrapper.gateway.getGroupsLeaderOf()] assert len(grs) > 0 exp = gatewaywrapper.gateway.getObject( "Experimenter", attributes={'omeName': 'group_owner'}) assert exp.sizeOfGroupExperimenterMap() > 0 filter_system_groups = [gatewaywrapper.gateway.getAdminService() .getSecurityRoles().userGroupId] leaderOf = list() for groupExpMap in exp.copyGroupExperimenterMap(): gId = groupExpMap.parent.id.val if groupExpMap.owner.val and gId not in filter_system_groups: leaderOf.append(gId) assert(leaderOf == grs) def testGetGroupsLeaderOfAsMember(self, gatewaywrapper): gatewaywrapper.doLogin(dbhelpers.USERS['group_member']) assert not gatewaywrapper.gateway.isLeader() with pytest.raises(StopIteration): next(gatewaywrapper.gateway.getGroupsLeaderOf()) def testGetGroupsMemberOf(self, gatewaywrapper): gatewaywrapper.doLogin(dbhelpers.USERS['group_member']) assert not gatewaywrapper.gateway.isLeader() grs = [g.id for g in gatewaywrapper.gateway.getGroupsMemberOf()] assert len(grs) > 0 exp = gatewaywrapper.gateway.getObject( "Experimenter", attributes={'omeName': "group_member"}) assert exp.sizeOfGroupExperimenterMap() > 0 filter_system_groups = [gatewaywrapper.gateway.getAdminService() .getSecurityRoles().userGroupId] memberOf = list() for groupExpMap in exp.copyGroupExperimenterMap(): gId = groupExpMap.parent.id.val if not groupExpMap.owner.val and gId not in filter_system_groups: memberOf.append(gId) assert memberOf == grs def testGroupSummaryAsOwner(self, gatewaywrapper): """Test groupSummary() when Group loaded without experimenters.""" gatewaywrapper.doLogin(dbhelpers.USERS['group_owner']) expGr = gatewaywrapper.gateway.getObject( "ExperimenterGroup", attributes={'name': 'ownership_test'}) leaders, colleagues = expGr.groupSummary() assert len(leaders) == 1 assert len(colleagues) == 1 assert leaders[0].omeName == "group_owner" assert colleagues[0].omeName == "group_member" leaders, colleagues = expGr.groupSummary(exclude_self=True) assert len(leaders) == 0 assert len(colleagues) == 1 assert colleagues[0].omeName == "group_member" def testGroupSummaryAsMember(self, gatewaywrapper): gatewaywrapper.doLogin(dbhelpers.USERS['group_member']) expGr = gatewaywrapper.gateway.getObject( "ExperimenterGroup", attributes={'name': 'ownership_test'}) leaders, colleagues = expGr.groupSummary() assert len(leaders) == 1 assert len(colleagues) == 1 assert leaders[0].omeName == "group_owner" assert colleagues[0].omeName == "group_member" leaders, colleagues = expGr.groupSummary(exclude_self=True) assert len(leaders) == 1 assert leaders[0].omeName == "group_owner" assert len(colleagues) == 0 def testGroupSummaryAsOwnerDeprecated(self, gatewaywrapper): gatewaywrapper.doLogin(dbhelpers.USERS['group_owner']) summary = gatewaywrapper.gateway.groupSummary() assert len(summary["leaders"]) == 1 assert len(summary["colleagues"]) == 1 assert summary["leaders"][0].omeName == "group_owner" assert summary["colleagues"][0].omeName == "group_member" summary = gatewaywrapper.gateway.groupSummary(exclude_self=True) assert len(summary["leaders"]) == 0 assert len(summary["colleagues"]) == 1 assert summary["colleagues"][0].omeName == "group_member" def testGroupSummaryAsMemberDeprecated(self, gatewaywrapper): gatewaywrapper.doLogin(dbhelpers.USERS['group_member']) summary = gatewaywrapper.gateway.groupSummary() assert len(summary["leaders"]) == 1 assert len(summary["colleagues"]) == 1 assert summary["leaders"][0].omeName == "group_owner" assert summary["colleagues"][0].omeName == "group_member" summary = gatewaywrapper.gateway.groupSummary(exclude_self=True) assert len(summary["leaders"]) == 1 assert summary["leaders"][0].omeName == "group_owner" assert len(summary["colleagues"]) == 0 class TestListParents(ITest): def testSupportedObjects(self): """ Check that we are testing all objects where listParents() is supported. If this test fails, need to update tested_wrappers and add corresponding tests below """ tested_wrappers = ['plate', 'image', 'dataset', 'experimenter', 'well'] for key, wrapper in list(KNOWN_WRAPPERS.items()): if (hasattr(wrapper, 'PARENT_WRAPPER_CLASS') and wrapper.PARENT_WRAPPER_CLASS is not None): assert key in tested_wrappers def testListParentsPDI(self): """Test listParents() for Image in Dataset""" # Set up PDI client, exp = self.new_client_and_user() p = self.make_project(name="ListParents Test", client=client) d = self.make_dataset(name="ListParents Test", client=client) i = self.make_image(name="ListParents Test", client=client) self.link(p, d, client=client) self.link(d, i, client=client) conn = BlitzGateway(client_obj=client) image = conn.getObject("Image", i.id.val) # Traverse from Image -> Project dataset = image.listParents()[0] assert dataset.id == d.id.val project = dataset.listParents()[0] assert project.id == p.id.val # Project has no parent assert len(project.listParents()) == 0 def testListParentsSPW(self): """Test listParents() for Image in WellSample""" client, exp = self.new_client_and_user() conn = BlitzGateway(client_obj=client) # setup SPW-WS-Img... s = ScreenI() s.name = wrap('ScreenA') p = PlateI() p.name = wrap('PlateA') s.linkPlate(p) w = WellI() w.column = wrap(0) w.row = wrap(0) p.addWell(w) s = client.sf.getUpdateService().saveAndReturnObject(s) p = s.linkedPlateList()[0] w = p.copyWells()[0] i = self.make_image(name="SPW listParents", client=client) ws = WellSampleI() ws.image = i ws.well = WellI(w.id.val, False) w.addWellSample(ws) ws = client.sf.getUpdateService().saveAndReturnObject(ws) # Traverse from Image -> Screen image = conn.getObject("Image", i.id.val) wellSample = image.listParents()[0] well = wellSample.listParents()[0] assert well.id == w.id.val plate = well.listParents()[0] assert plate.id == p.id.val screen = plate.listParents()[0] assert screen.id == s.id.val # Screen has no parent assert len(screen.listParents()) == 0 def testExperimenterListParents(self): """Test listParents() for Experimenter in ExperimenterGroup.""" client, exp = self.new_client_and_user() conn = BlitzGateway(client_obj=client) userGroupId = conn.getAdminService().getSecurityRoles().userGroupId exp = conn.getUser() groups = exp.listParents() assert len(groups) == 2 gIds = [g.id for g in groups] assert userGroupId in gIds # ExperimenterGroup has no parent assert len(groups[0].listParents()) == 0
gpl-2.0
-8,658,934,620,634,377,000
39.918018
79
0.615381
false
WA4OSH/Learn_Python
oldLady.py
1
2383
#------------------------------------------------------------------------------- # Name: oldLady.py # Purpose: Demo of program control, loops, branches, etc. # # Author: Konrad Roeder, adapted from the nusery rhyme # There was an Old Lady song from the # Secret History of Nursery Rhymes Book # www.rhymes.uk/there_was_an_old_lady.htm # # Created: 04/16/2014 # Copyright: (cc) Konrad Roeder 2014 # Licence: CC by 4.0 #------------------------------------------------------------------------------- #There are seven animals in this song, one for each verse animalName = ['fly','spider','bird','cat','dog','cow','horse'] #Each verse in the song starts with this section, printing this line def printSectionA(verse): print("There was an old lady who swallowed a",animalName[verse-1]) #In section B, the line is different for each verse def printSectionB(verse): #if (verse == 1): Do nothing if (verse == 2): print("That wriggled and wiggled and tickled inside her") elif (verse == 3): print("How absurd to swallow a bird") elif (verse == 4): print("Fancy that to swallow a cat") elif (verse == 5): print("What a hog to swallow a dog") elif (verse == 6): print("I don't know how she swallowed a cow") elif (verse == 7): print("She's dead, of course!") def printSectionC(verse): #This section only has lines in the middle five verses if (verse < 7): #The for loop drops through on the first verse #In verses 2-6, it prints one line less than the verse number for line in range(verse-1, 0, -1): print("She swallowed the",animalName[line], "to catch the", animalName[line-1]) def printSectionD(verse): #This sections exists only in the first six verses if (verse < 7): print("I don't know why she swallowed a fly - Perhaps she will die!") print("") def song(): #Print the title print("There was an Old Lady song") print("") #Print each of the seven verses for verse in range(1,8): #Each verse has four sections printSectionA(verse) printSectionB(verse) printSectionC(verse) printSectionD(verse) #Print the song's coda (ending) print("") print("There was an Old Lady song") song()
cc0-1.0
2,651,610,528,392,757,000
33.536232
80
0.579522
false
linsalrob/EdwardsLab
patric/parse_gto.py
1
2770
""" Parse a GTO object """ import os import sys import argparse from roblib import bcolors import json def list_keys(gto, verbose=False): """ List the primary keys in the patric file :param gto: the json gto :param verbose: more output :return: """ print("{}".format("\n".join(gto.keys()))) def dump_json(gto, k, verbose=False): """ Print out the json representation of some data :param gto: the json gto :param k: the key to dump (none for everything) :param verbose: more output :return: """ if k: if k in gto: print(json.dumps(gto[k], indent=4)) else: sys.stderr.write(f"{bcolors.RED}ERROR: {k} not found.{bcolors.ENDC}\n") else: print(json.dumps(gto, indent=4)) def feature_tbl(gto, verbose=False): """ Print a tab separated feature table :param gto: the json gto :param verbose: more output :return: """ for peg in gto['features']: if 'location' not in peg: sys.stderr.write(f"{bcolors.RED}Error: no location found\n{bcolors.PINK}{peg}{bcolors.ENDC}\n") continue locs = [] for l in peg['location']: start = int(l[1]) if l[2] == '+': stop = (start + int(l[3])) - 1 elif l[2] == '-': start = (start - int(l[3])) + 1 stop = int(l[1]) else: sys.stderr.write(f"{bcolors.RED}Error: Don't know location l[2]\n{bcolors.ENDC}") continue locs.append(f"{l[0]} {start} - {stop} ({l[2]})") data = [ peg['id'], peg['function'], "; ".join(locs) ] print("\t".join(data)) if __name__ == '__main__': parser = argparse.ArgumentParser(description="Plot a heatmap") parser.add_argument('-f', help='gto file', required=True) parser.add_argument('-l', help='list the primary keys and exit', action='store_true') parser.add_argument('-d', help='dump some part of the json object', action='store_true') parser.add_argument('-p', help='print protein feature table', action='store_true') parser.add_argument('-k', help='json primary key (e.g. for dumping, etc)') parser.add_argument('-o', help='output file') parser.add_argument('-v', help='verbose output', action='store_true') args = parser.parse_args() gto = json.load(open(args.f, 'r')) if args.l: list_keys(gto, args.v) sys.exit(0) if args.d: dump_json(gto, args.k, args.v) sys.exit(0) if args.p: feature_tbl(gto, args.v) sys.exit(0) sys.stderr.write(f"{bcolors.RED}ERROR: You did not specify a command to run{bcolors.ENDC}\n")
mit
-2,918,537,910,248,930,000
27.556701
107
0.558123
false
lcpt/xc
verif/tests/elements/spring_test_01.py
1
2830
# -*- coding: utf-8 -*- # home made test __author__= "Luis C. Pérez Tato (LCPT) and Ana Ortega (AOO)" __copyright__= "Copyright 2015, LCPT and AOO" __license__= "GPL" __version__= "3.0" __email__= "[email protected]" K= 1000 # Spring constant l= 100 # Distance between nodes F= 1 # Force magnitude import xc_base import geom import xc from solution import predefined_solutions from model import predefined_spaces from materials import typical_materials # Model definition feProblem= xc.FEProblem() preprocessor= feProblem.getPreprocessor nodes= preprocessor.getNodeHandler # Problem type modelSpace= predefined_spaces.SolidMechanics2D(nodes) nodes.defaultTag= 1 #First node number. nod= nodes.newNodeXY(0,0) nod= nodes.newNodeXY(l,0.0) # Materials definition elast= typical_materials.defElasticMaterial(preprocessor, "elast",K) ''' We define nodes at the points where loads will be applied. We will not compute stresses so we can use an arbitrary cross section of unit area.''' # Elements definition elements= preprocessor.getElementHandler elements.defaultMaterial= "elast" elements.dimElem= 2 # Dimension of element space elements.defaultTag= 1 # sintaxis: Spring[<tag>] spring= elements.newElement("Spring",xc.ID([1,2])) # Constraints constraints= preprocessor.getBoundaryCondHandler # spc= constraints.newSPConstraint(1,0,0.0) # Node 1 spc= constraints.newSPConstraint(1,1,0.0) spc= constraints.newSPConstraint(2,1,0.0) # Node 2 # Loads definition loadHandler= preprocessor.getLoadHandler lPatterns= loadHandler.getLoadPatterns #Load modulation. ts= lPatterns.newTimeSeries("constant_ts","ts") lPatterns.currentTimeSeries= "ts" lPattern= "0" lp0= lPatterns.newLoadPattern("default",lPattern) #lPatterns.currentLoadPattern= lPattern # we check that loads are cummulated by the way. lp0.newNodalLoad(2,xc.Vector([F/2.0,0])) lp0.newNodalLoad(2,xc.Vector([F/2.0,0])) lPatterns.addToDomain(lPattern) # Append load pattern to domain. # Solution analisis= predefined_solutions.simple_static_linear(feProblem) result= analisis.analyze(1) nodes.calculateNodalReactions(True,1e-7) nod2= nodes.getNode(2) deltax= nod2.getDisp[0] deltay= nod2.getDisp[1] nod1= nodes.getNode(1) R= nod1.getReaction[0] elements= preprocessor.getElementHandler elem1= elements.getElement(1) elem1.getResistingForce() Ax= elem1.getMaterial().getStrain() # Spring elongation ratio1= (F+R/F) ratio2= ((K*deltax-F)/F) ratio3= ((deltax-Ax)/Ax) ''' print "R= ",R print "dx= ",deltax print "dy= ",deltay print "Ax= ",Ax print "ratio1= ",ratio1 print "ratio2= ",ratio2 print "ratio3= ",ratio3 ''' import os from miscUtils import LogMessages as lmsg fname= os.path.basename(__file__) if (abs(ratio1)<1e-5) & (abs(ratio2)<1e-5) & (abs(ratio3)<1e-5): print "test ",fname,": ok." else: lmsg.error(fname+' ERROR.')
gpl-3.0
-5,412,932,095,904,627,000
25.439252
68
0.7462
false
BenKettlewell/Livestreamer-Command-Line-Generator
livestreamerCLG.py
1
2267
''' Parses crunchyroll URLs and provides a string command line argument to download them. Utilizing youtube-dl to split sub and video files but livestreamer functionality can be added with minimal effort -h, --help Output this help document -u, --url Provide a single url -f, --file Provide location of csv file File format (do not include headers) crunchyroll_url,subtitle_url,season# #subtitle_url not implemented yet -c, Use cookie file located at $COOKIES instead of password auth --cookie-auth ''' from urlparse import urlparse import sys # Command Line Arguments import getopt # Parse CLI Args import re # Regular Expressions from CrunchyCSV import CrunchyCSV from Anime import Anime from crunchyroll import outputer from shell import downloader def main (argv): ''' This program has 3 distinct stages. 1. Request a set of urls from the user and store them 2. Parse and formulate the compiled Livestreamer command 3. Return the string to the user ''' urls = '' file_csv = '' auth_method = 'password' # parse command line options try: opts, args = getopt.getopt(argv, "hu:f:c", ["help","url=","file=","cookie-auth"]) except getopt.error, msg: print msg print "for help use --help" sys.exit(2) # process options for o, a in opts: if o in ("-h", "--help"): print __doc__ sys.exit(1) if o in ("-u", "--url"): urls = a print'urls are :', a if o in ("-f", "--file"): file_csv = a print'csv_file :', a if o in ("-c","--cookie-auth"): auth_method = 'cookies' print'using cookies' # process arguments for arg in args: process(arg) # process() is defined elsewhere if file_csv != '': crunchyCSV = CrunchyCSV(file_csv) print outputer.youtube_dl_string_for_CrunchyCSV(crunchyCSV, auth_method) print outputer.list_of_anime_filenames(crunchyCSV) else: anime = Anime(urls, '', '') print outputer.youtube_dl_string_for_Anime(anime, auth_method) print downloader.sub_call() if __name__ == "__main__": main(sys.argv[1:])
gpl-2.0
-2,636,595,250,218,402,000
31.385714
89
0.613586
false
root-mirror/root
tutorials/roofit/rf604_constraints.py
11
2705
## \file ## \ingroup tutorial_roofit ## \notebook -nodraw ## Likelihood and minimization: fitting with constraints ## ## \macro_code ## ## \date February 2018 ## \authors Clemens Lange, Wouter Verkerke (C++ version) from __future__ import print_function import ROOT # Create model and dataset # ---------------------------------------------- # Construct a Gaussian pdf x = ROOT.RooRealVar("x", "x", -10, 10) m = ROOT.RooRealVar("m", "m", 0, -10, 10) s = ROOT.RooRealVar("s", "s", 2, 0.1, 10) gauss = ROOT.RooGaussian("gauss", "gauss(x,m,s)", x, m, s) # Construct a flat pdf (polynomial of 0th order) poly = ROOT.RooPolynomial("poly", "poly(x)", x) # model = f*gauss + (1-f)*poly f = ROOT.RooRealVar("f", "f", 0.5, 0., 1.) model = ROOT.RooAddPdf( "model", "model", ROOT.RooArgList( gauss, poly), ROOT.RooArgList(f)) # Generate small dataset for use in fitting below d = model.generate(ROOT.RooArgSet(x), 50) # Create constraint pdf # ----------------------------------------- # Construct Gaussian constraint pdf on parameter f at 0.8 with # resolution of 0.1 fconstraint = ROOT.RooGaussian( "fconstraint", "fconstraint", f, ROOT.RooFit.RooConst(0.8), ROOT.RooFit.RooConst(0.1)) # Method 1 - add internal constraint to model # ------------------------------------------------------------------------------------- # Multiply constraint term with regular pdf using ROOT.RooProdPdf # Specify in fitTo() that internal constraints on parameter f should be # used # Multiply constraint with pdf modelc = ROOT.RooProdPdf( "modelc", "model with constraint", ROOT.RooArgList(model, fconstraint)) # Fit model (without use of constraint term) r1 = model.fitTo(d, ROOT.RooFit.Save()) # Fit modelc with constraint term on parameter f r2 = modelc.fitTo( d, ROOT.RooFit.Constrain( ROOT.RooArgSet(f)), ROOT.RooFit.Save()) # Method 2 - specify external constraint when fitting # ------------------------------------------------------------------------------------------ # Construct another Gaussian constraint pdf on parameter f at 0.8 with # resolution of 0.1 fconstext = ROOT.RooGaussian("fconstext", "fconstext", f, ROOT.RooFit.RooConst( 0.2), ROOT.RooFit.RooConst(0.1)) # Fit with external constraint r3 = model.fitTo(d, ROOT.RooFit.ExternalConstraints( ROOT.RooArgSet(fconstext)), ROOT.RooFit.Save()) # Print the fit results print("fit result without constraint (data generated at f=0.5)") r1.Print("v") print("fit result with internal constraint (data generated at f=0.5, is f=0.8+/-0.2)") r2.Print("v") print("fit result with (another) external constraint (data generated at f=0.5, is f=0.2+/-0.1)") r3.Print("v")
lgpl-2.1
5,271,455,174,840,602,000
28.402174
96
0.621072
false
mgagne/nova
nova/tests/unit/api/openstack/compute/contrib/test_quota_classes.py
1
6260
# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import webob from nova.api.openstack.compute.contrib import quota_classes from nova.api.openstack.compute import plugins from nova.api.openstack.compute.plugins.v3 import quota_classes \ as quota_classes_v21 from nova.api.openstack import extensions from nova import test from nova.tests.unit.api.openstack import fakes def quota_set(class_name): return {'quota_class_set': {'id': class_name, 'metadata_items': 128, 'ram': 51200, 'floating_ips': 10, 'fixed_ips': -1, 'instances': 10, 'injected_files': 5, 'cores': 20, 'injected_file_content_bytes': 10240, 'security_groups': 10, 'security_group_rules': 20, 'key_pairs': 100, 'injected_file_path_bytes': 255}} class QuotaClassSetsTestV21(test.TestCase): def setUp(self): super(QuotaClassSetsTestV21, self).setUp() self.req_admin = fakes.HTTPRequest.blank('', use_admin_context=True) self.req = fakes.HTTPRequest.blank('') self._setup() def _setup(self): ext_info = plugins.LoadedExtensionInfo() self.controller = quota_classes_v21.QuotaClassSetsController( extension_info=ext_info) def test_format_quota_set(self): raw_quota_set = { 'instances': 10, 'cores': 20, 'ram': 51200, 'floating_ips': 10, 'fixed_ips': -1, 'metadata_items': 128, 'injected_files': 5, 'injected_file_path_bytes': 255, 'injected_file_content_bytes': 10240, 'security_groups': 10, 'security_group_rules': 20, 'key_pairs': 100, } quota_set = self.controller._format_quota_set('test_class', raw_quota_set) qs = quota_set['quota_class_set'] self.assertEqual(qs['id'], 'test_class') self.assertEqual(qs['instances'], 10) self.assertEqual(qs['cores'], 20) self.assertEqual(qs['ram'], 51200) self.assertEqual(qs['floating_ips'], 10) self.assertEqual(qs['fixed_ips'], -1) self.assertEqual(qs['metadata_items'], 128) self.assertEqual(qs['injected_files'], 5) self.assertEqual(qs['injected_file_path_bytes'], 255) self.assertEqual(qs['injected_file_content_bytes'], 10240) self.assertEqual(qs['security_groups'], 10) self.assertEqual(qs['security_group_rules'], 20) self.assertEqual(qs['key_pairs'], 100) def test_quotas_show_as_admin(self): res_dict = self.controller.show(self.req_admin, 'test_class') self.assertEqual(res_dict, quota_set('test_class')) def test_quotas_show_as_unauthorized_user(self): self.assertRaises(webob.exc.HTTPForbidden, self.controller.show, self.req, 'test_class') def test_quotas_update_as_admin(self): body = {'quota_class_set': {'instances': 50, 'cores': 50, 'ram': 51200, 'floating_ips': 10, 'fixed_ips': -1, 'metadata_items': 128, 'injected_files': 5, 'injected_file_content_bytes': 10240, 'injected_file_path_bytes': 255, 'security_groups': 10, 'security_group_rules': 20, 'key_pairs': 100}} res_dict = self.controller.update(self.req_admin, 'test_class', body) self.assertEqual(res_dict, body) def test_quotas_update_as_user(self): body = {'quota_class_set': {'instances': 50, 'cores': 50, 'ram': 51200, 'floating_ips': 10, 'fixed_ips': -1, 'metadata_items': 128, 'injected_files': 5, 'injected_file_content_bytes': 10240, 'security_groups': 10, 'security_group_rules': 20, 'key_pairs': 100, }} self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, self.req, 'test_class', body) def test_quotas_update_with_empty_body(self): body = {} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, self.req_admin, 'test_class', body) def test_quotas_update_with_non_integer(self): body = {'quota_class_set': {'instances': "abc"}} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, self.req_admin, 'test_class', body) body = {'quota_class_set': {'instances': 50.5}} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, self.req_admin, 'test_class', body) body = {'quota_class_set': { 'instances': u'\u30aa\u30fc\u30d7\u30f3'}} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, self.req_admin, 'test_class', body) class QuotaClassSetsTestV2(QuotaClassSetsTestV21): def _setup(self): ext_mgr = extensions.ExtensionManager() ext_mgr.extensions = {} self.controller = quota_classes.QuotaClassSetsController(ext_mgr)
apache-2.0
2,523,223,996,149,545,500
41.297297
78
0.548722
false
szrlee/AIPS
RouteApp.py
1
1995
#!/usr/bin python2 # -*- coding: UTF-8 -*- # """def get_tree_graph(): #return graph pass According to POX wiki ,method spanning_tree returns it as dictionary like {s1:([(s2,port1),(s3,port2),...]),s2:([(s1,port),...]),...} #port refers to the port of s1 which connects to s2 A graph example for path searching. graph = {'A': ['B', 'C'],'B': ['C', 'D'], 'C': ['D'], 'D': ['C'], 'E': ['F'], 'F': ['C']}""" def find_shortest_route(graph, start, end, path=[]): #find the shortest path from start to end ,including ports INFO from one node to another path = path + [start] if start == end: return path if not graph. has_key(start): return None shortest = [] #Analyze the contents,([('h1', 'port11'), ('h2', 'port12'), ('s2', 'port13')]) start_list = graph[start] for item in start_list: if item[0] not in path: newpath = find_shortest_route(graph, item[0], end, path) if newpath: if not shortest or len(newpath) < len(shortest): shortest = newpath return shortest def get_shortest_route(graph, start, end): path = [] route = find_shortest_route(graph, start, end) #add the port info from one node to another for index in range(0, len(route)-1): for item in graph[route[index]]: #Contents ([('h1', 'port11'), ('h2', 'port12'), ('s2', 'port13')]) if item[0] == route[index+1]: path = path + [(route[index], item[1])] break path += [route[-1]] return path if __name__ == '__main__': graph = {'h1': ([('s1', 'porth1_s1')]), 'h2': ([('s3', 'porth2_s3')]), 's1': ([('h1', 'ports1_h1'), ('s2', 'ports1_s2')]), 's2': ([('s1', 'ports2_s1'), ('s3', 'ports2_s3')]), 's3': ([('s2', 'ports3_s2'), ('h2', 'ports3_h2')]) } print get_shortest_route(graph, 'h1', 'h2')
apache-2.0
1,809,886,606,186,799,000
29.227273
92
0.502757
false
valdur55/py3status
py3status/modules/keyboard_locks.py
1
2349
r""" Display NumLock, CapsLock, and ScrLock keys. Configuration parameters: cache_timeout: refresh interval for this module (default 1) format: display format for this module *(default '[\?if=num_lock&color=good NUM|\?color=bad NUM] ' '[\?if=caps_lock&color=good CAPS|\?color=bad CAPS] ' '[\?if=scroll_lock&color=good SCR|\?color=bad SCR]')* Control placeholders: {num_lock} a boolean based on xset data {caps_lock} a boolean based on xset data {scroll_lock} a boolean based on xset data Color options: color_good: Lock on color_bad: Lock off Examples: ``` # hide NUM, CAPS, SCR keyboard_locks { format = '\?color=good [\?if=num_lock NUM][\?soft ]' format += '[\?if=caps_lock CAPS][\?soft ][\?if=scroll_lock SCR]' } ``` @author lasers SAMPLE OUTPUT {'color': '#00FF00', 'full_text': 'NUM CAPS SCR'} no_locks {'color': '#FF0000', 'full_text': 'NUM CAPS SCR'} """ class Py3status: """ """ # available configuration parameters cache_timeout = 1 format = ( r"[\?if=num_lock&color=good NUM|\?color=bad NUM] " r"[\?if=caps_lock&color=good CAPS|\?color=bad CAPS] " r"[\?if=scroll_lock&color=good SCR|\?color=bad SCR]" ) def post_config_hook(self): items = [ "icon_num_on", "icon_num_off", "icon_caps_on", "icon_caps_off", "icon_scr_on", "icon_scr_off", ] if self.py3.format_contains(self.format, ["caps", "num", "scr"]) or ( any(getattr(self, v, None) is not None for v in items) ): raise Exception("please update the config for this module") # end deprecation self.locks = {} self.keyring = {"num_lock": "Num", "caps_lock": "Caps", "scroll_lock": "Scroll"} def keyboard_locks(self): xset_data = self.py3.command_output("xset q") for k, v in self.keyring.items(): self.locks[k] = "on" in xset_data.split("%s Lock:" % v)[1][0:6] return { "cached_until": self.py3.time_in(self.cache_timeout), "full_text": self.py3.safe_format(self.format, self.locks), } if __name__ == "__main__": """ Run module in test mode. """ from py3status.module_test import module_test module_test(Py3status)
bsd-3-clause
-3,356,702,791,891,390,500
26.635294
88
0.57301
false
MaxIV-KitsControls/netspot
netspot/ts_lib.py
1
5698
#!/usr/bin/python -tt """Junos Interface Troubleshooting library.""" import re import warnings import helpers from napalm import get_network_driver from jnpr.junos.exception import ConnectRefusedError, ConnectAuthError # JUNOS MAC table RE RE_VLAN = r'\s+([\w\d-]+)\s+' RE_MAC = r'\s?([*\w\d:]+)\s+' RE_TYPE = r'\s?([\w]+) ' RE_AGE = r'\s+([-\d:]+)' RE_INTERFACE = r'\s+([-.\w\d/]+)' RE_SWITCHING_TABLE = RE_VLAN + RE_MAC + RE_TYPE + RE_AGE + RE_INTERFACE class TroubleshootDevice(object): """Class to help troubleshoot device.""" def __init__(self, asset, loopback, ssh_key, interface_name): self.asset = asset self.loopback = loopback self.ssh_key = ssh_key self.interface_name = interface_name self.mac_address = None self.dhcp_logs = None self.dhcp_error = None self.log_entries = None self.interface = None self.error_message = None self.macs = {} self.lldp = {} def run(self): """Run troubleshooter.""" try: # Connect to asset driver = get_network_driver('junos') device = driver(self.loopback, 'automation', '', optional_args={'key_file': self.ssh_key}) device.open() with warnings.catch_warnings(record=True) as warning: warnings.filterwarnings('ignore') # Check interface cmd = 'show interfaces {0} detail'.format(self.interface_name) show_interface = device.cli([cmd]) self.interface = Interface(show_interface[cmd]) if self.interface.link_state == 'Up': # Get LLDP neighbor cmd = 'show lldp neighbors interface {0}'.format(self.interface_name) lldp_neighbor = device.cli([cmd]) self.lldp = LLDP(lldp_neighbor[cmd]) # Check MAC table cmd = 'show ethernet-switching table interface {0}'.format(self.interface_name) mac_table = device.cli([cmd]) self.macs = MACTable(mac_table[cmd]) # Search DHCP logs if MAC is specified if self.macs: self.mac_address = self.macs.mac_entries[0]['mac'] dhcp_server = helpers.get_dhcp_server(asset=self.asset) self.dhcp_logs, self.dhcp_error = helpers.search_dhcp_log(self.mac_address, dhcp_server) # Check log file cmd = 'show log messages' show_log = device.cli([cmd]) show_log = show_log[cmd] self.log_entries = re.findall(r'\n([\[\]\s\w\d:.-]+{0}[\s\w\d:.-]+)\n'.format(self.interface_name), show_log) device.close() except ConnectAuthError: self.error_message = 'Autentication failed to %s.' % self.loopback except ConnectRefusedError: self.error_message = 'Connection refused to %s.' % self.loopback except ValueError: self.error_message = 'No switch found.' class Interface(object): """Class to represent a JUNOS interface.""" def __init__(self, output): self.output = output self.link_state = '' self.speed = '' self.duplex = '' self.flapped = '' self.auto_neg = '' # Analyze output self.analyze_output() def analyze_output(self): """Anlyze the output from show interfaces X.""" # Link down match = re.search(r'Physical link is ([\w]+)', self.output) if match: self.link_state = match.groups()[0] # Speed match = re.search(r'Speed: ([\w\d]+),', self.output) if match: self.speed = match.groups()[0] # Duplex match = re.search(r'Duplex: ([\w-]+),', self.output) if match: self.duplex = match.groups()[0] # Last flapped match = re.search(r'Last flapped : ([\w\d ():-]+)\n', self.output) if match: self.flapped = match.groups()[0] # Auto negotiation match = re.search(r'Auto-negotiation: ([\w]+),', self.output) if match: self.auto_neg = match.groups()[0] class LLDP(object): """Parse and represent a LLDP neighbor.""" def __init__(self, output): self.output = output self.empty = True self.remote_chassis_id = '' self.remote_port_description = '' self.remote_system = '' # Analyze output self.analyze_output() if self.remote_chassis_id: self.empty = False def analyze_output(self): """Parse JUNOS show lldp neighboir interface X command.""" # Remote chassis ID match = re.search(r'Chassis ID\s+: ([\w\d:-]+)', self.output) if match: self.remote_chassis_id = match.groups()[0] # Remote port description match = re.search(r'Port description\s+: ([\w\d\/:-]+)', self.output) if match: self.remote_port_description = match.groups()[0] # Remote port system match = re.search(r'System name\s+: ([\w\d\/:-]+)', self.output) if match: self.remote_system = match.groups()[0] class MACTable(object): """Parse and save MAC entries from a JUNOS device.""" def __init__(self, output): self.output = output self.mac_entries = [] # Analyze output self.analyze_output() def analyze_output(self): """Parse JUNOS show ethernet-switching interface X command.""" # Remote chassis ID match = re.findall(RE_SWITCHING_TABLE, self.output) for entry in match: if entry[1] != '*': mac_entry = {'vlan': entry[0], 'mac': entry[1], 'type': entry[2], 'age': entry[3], 'interface': entry[4]} self.mac_entries.append(mac_entry) def __str__(self): if self.mac_entries: return self.mac_entries[0]['mac'] return None def main(): """Main.""" pass if __name__ == '__main__': main()
mit
-7,438,312,396,324,293,000
26.931373
107
0.586697
false
ndaniels/Ammolite
scripts/figure-generators/smsdIsoCompare.py
1
1534
import matplotlib.pyplot as plt from pylab import polyfit, poly1d, show, savefig import sys def isNumber( s): try: float(s) return True except ValueError: return False def makeGraph(X,Y, xName, yName, name="NoName"): fig = plt.figure() ax = fig.add_subplot(111) superName = "Comparison of {} and {}".format(xName,yName) outname = "{} from {}.png".format(superName,name) fig.suptitle(superName) ax.scatter(X,Y) fit = polyfit(X,Y,1) fit_fn = poly1d(fit) # fit_fn is now a function which takes in x and returns an estimate for y ax.plot(X,Y, 'yo', X, fit_fn(X), '--k') ax.set_xlabel('Size of MCS found by {}'.format(xName)) ax.set_ylabel('Size of MCS found by {}'.format(yName)) ax.text(1, 1, "y = {}*x + {}".format(fit[0], fit[1])) fig.savefig(outname) def buildIsoSMSDComparison( filename, outname="SMSD-IsoRank-comparison"): X, Y, xName, yName = [], [], "", "" with open( filename) as f: inComparison = False nameLine = False for line in f: if line.split()[0] == "COMPARISON_DELIMITER": if inComparison: makeGraph( X, Y, xName, yName, filename) inComparison = True nameLine = True X, Y = [], [] elif inComparison: l = line.split() if nameLine: xName, yName = l[0], l[1] nameLine = False else: X.append( float( l[0])) Y.append( float( l[1])) makeGraph( X, Y, xName, yName, filename) if __name__ == "__main__": args = sys.argv if(len(args) == 2): buildIsoSMSDComparison(args[1]) else: buildIsoSMSDComparison(args[1], args[2])
gpl-2.0
7,310,710,760,138,721,000
22.96875
95
0.634289
false
Frencil/box-python-sdk
test/unit/object/test_events.py
1
8316
# coding: utf-8 from __future__ import unicode_literals from itertools import chain import json from mock import Mock import pytest from requests.exceptions import Timeout from six.moves import map # pylint:disable=redefined-builtin from six.moves.urllib.parse import urlencode, urlunsplit # pylint:disable=import-error,no-name-in-module from boxsdk.network.default_network import DefaultNetworkResponse from boxsdk.object.events import Events, EventsStreamType, UserEventsStreamType from boxsdk.session.box_session import BoxResponse from boxsdk.util.ordered_dict import OrderedDict @pytest.fixture() def test_events(mock_box_session): return Events(mock_box_session) @pytest.fixture() def final_stream_position(): return 1348790499820 @pytest.fixture() def initial_stream_position(): return 1348790499819 # pylint:disable=no-member # pylint isn't currently smart enough to recognize the class member that was # added by the metaclass, when the metaclass was added by @add_metaclass() / # with_metaclass(). STREAM_TYPES_AS_ENUM_INSTANCES = list(EventsStreamType.__members__.values()) # pylint:enable=no-member STREAM_TYPES_AS_STRINGS = list(map(str, STREAM_TYPES_AS_ENUM_INSTANCES)) def test_events_stream_type_extended_enum_class_has_expected_members(): assert len(STREAM_TYPES_AS_ENUM_INSTANCES) >= 4 assert len(STREAM_TYPES_AS_STRINGS) >= 4 assert 'all' in STREAM_TYPES_AS_STRINGS assert 'changes' in STREAM_TYPES_AS_STRINGS assert 'sync' in STREAM_TYPES_AS_STRINGS assert 'admin_logs' in STREAM_TYPES_AS_STRINGS @pytest.fixture( scope='session', params=list(chain( [None], # Default behavior of not passing any stream_type STREAM_TYPES_AS_ENUM_INSTANCES, # Passing an enum instance STREAM_TYPES_AS_STRINGS, # Passing an enum value # For forwards compatibility, make sure that it works to pass a string # value that is not a member of the enum. ['future_stream_type'], )), ) def stream_type_param(request): """The value to pass as an Event method's stream_type parameter. :return: The parameter value, or `None` if no value should be passed. :rtype: :enum:`EventsStreamType` or `unicode` or `None` """ return request.param @pytest.fixture() def expected_stream_type(stream_type_param): """The stream type we expect to use. :rtype: `unicode` """ if stream_type_param is None: return UserEventsStreamType.ALL return stream_type_param @pytest.fixture() def stream_type_kwargs(stream_type_param): """The kwargs for stream_type to pass when invoking a method on `Events`. :rtype: `dict` """ if stream_type_param: return {'stream_type': stream_type_param} return {} @pytest.fixture() def expected_stream_type_params(expected_stream_type): """The stream_type-related params that we expect to pass to request methods. :rtype: :class:`OrderedDict` """ return OrderedDict(stream_type=expected_stream_type) @pytest.fixture() def empty_events_response(final_stream_position): # pylint:disable=redefined-outer-name mock_box_response = Mock(BoxResponse) mock_network_response = Mock(DefaultNetworkResponse) mock_box_response.network_response = mock_network_response mock_box_response.json.return_value = mock_json = {'next_stream_position': final_stream_position, 'entries': []} mock_box_response.content = json.dumps(mock_json).encode() mock_box_response.status_code = 200 mock_box_response.ok = True return mock_box_response @pytest.fixture() def long_poll_url(test_url, expected_stream_type_params): return urlunsplit(('', '', test_url, urlencode(expected_stream_type_params), '')) @pytest.fixture() def retry_timeout(): return 610 @pytest.fixture() def options_response_entry(long_poll_url, retry_timeout): return {'url': long_poll_url, 'retry_timeout': retry_timeout} @pytest.fixture() def options_response(options_response_entry, make_mock_box_request): # pylint:disable=redefined-outer-name mock_box_response, _ = make_mock_box_request( response={'entries': [options_response_entry]}, ) return mock_box_response @pytest.fixture() def new_change_long_poll_response(make_mock_box_request): mock_box_response, _ = make_mock_box_request( response={'message': 'new_change'}, ) return mock_box_response @pytest.fixture() def reconnect_long_poll_response(make_mock_box_request): mock_box_response, _ = make_mock_box_request( response={'message': 'reconnect'}, ) return mock_box_response @pytest.fixture() def max_retries_long_poll_response(make_mock_box_request): mock_box_response, _ = make_mock_box_request( response={'message': 'max_retries'}, ) return mock_box_response @pytest.fixture() def mock_event(): return { "type": "event", "event_id": "f82c3ba03e41f7e8a7608363cc6c0390183c3f83", "source": { "type": "folder", "id": "11446498", } } @pytest.fixture() def events_response(initial_stream_position, mock_event, make_mock_box_request): # pylint:disable=redefined-outer-name mock_box_response, _ = make_mock_box_request( response={"next_stream_position": initial_stream_position, "entries": [mock_event]}, ) return mock_box_response def test_get_events( test_events, mock_box_session, events_response, stream_type_kwargs, expected_stream_type_params, ): # pylint:disable=redefined-outer-name expected_url = test_events.get_url() mock_box_session.get.return_value = events_response events = test_events.get_events(**stream_type_kwargs) assert 'next_stream_position' in events mock_box_session.get.assert_any_call( expected_url, params=dict(limit=100, stream_position=0, **expected_stream_type_params), ) def test_get_long_poll_options( mock_box_session, test_events, stream_type_kwargs, expected_stream_type_params, options_response, options_response_entry, ): expected_url = test_events.get_url() mock_box_session.options.return_value = options_response long_poll_options = test_events.get_long_poll_options(**stream_type_kwargs) mock_box_session.options.assert_called_with(expected_url, params=expected_stream_type_params) assert long_poll_options == options_response_entry def test_generate_events_with_long_polling( test_events, mock_box_session, events_response, empty_events_response, initial_stream_position, long_poll_url, retry_timeout, options_response, new_change_long_poll_response, reconnect_long_poll_response, max_retries_long_poll_response, mock_event, stream_type_kwargs, expected_stream_type, expected_stream_type_params, ): # pylint:disable=redefined-outer-name expected_url = test_events.get_url() mock_box_session.options.return_value = options_response mock_box_session.get.side_effect = [ events_response, # initial call to get now stream position Timeout, reconnect_long_poll_response, max_retries_long_poll_response, new_change_long_poll_response, events_response, new_change_long_poll_response, empty_events_response, ] events = test_events.generate_events_with_long_polling(**stream_type_kwargs) assert next(events) == mock_event with pytest.raises(StopIteration): next(events) events.close() mock_box_session.options.assert_called_with(expected_url, params=expected_stream_type_params) mock_box_session.get.assert_any_call(expected_url, params={'stream_position': 'now', 'limit': 0, 'stream_type': expected_stream_type}) assert '/events' in expected_url mock_box_session.get.assert_any_call( expected_url, params=dict(limit=100, stream_position=initial_stream_position, **expected_stream_type_params), ) mock_box_session.get.assert_any_call( long_poll_url, timeout=retry_timeout, params={'stream_position': initial_stream_position}, )
apache-2.0
-5,183,375,154,726,981,000
29.686347
138
0.684343
false
alexisVallet/anime-bgrm
objectSegmentation.py
1
3095
import disjointSetForest as dsj import cv2 import numpy as np def toRowMajor(cols, i, j): return i * cols + j def fromRowMajor(cols, idx): return (idx / cols, idx % cols) class ObjectsSegmentation: """ Disjoint set forest, with the additional semantic element of an image to segment into background and objects (foreground). """ def __init__(self, image): rows, cols = image.shape[0:2] self.image = image self.segmentation = dsj.DisjointSetForest(rows * cols) self.background = None self.largest = None def find(self, i, j): """ Finds the root pixel of the segment containing pixel (i,j). """ rows, cols = self.image.shape[0:2] return fromRowMajor(cols, self.segmentation.find(toRowMajor(cols, i, j))) def unsafeUnion(self, i, j, k, l): """ Fuses the segments containing pixels (i,j) and (k,l) into a single segment. Doesn't check if either segment is the background. """ rows, cols = self.image.shape[0:2] newRoot = self.segmentation.union(toRowMajor(cols,i,j), toRowMajor(cols,k,l)) return fromRowMajor(cols, newRoot) def union(self, i, j, k, l): """ Fuses the segments containing pixels (i,j) and (k,l) into a single segment. Neither segments should be the background. """ rows, cols = self.image.shape[0:2] fstRoot = self.find(i,j) sndRoot = self.find(k,l) if fstRoot == self.background or sndRoot == self.background: raise ValueError("Cannot perform union of background pixels!") else: newRoot = self.segmentation.union(toRowMajor(cols,i,j), toRowMajor(cols,k,l)) newRootPixel = fromRowMajor(cols,newRoot) # keep track of the largest object if self.largest == None: self.largest = newRootPixel else: (li, lj) = self.largest largestSize = self.segmentation.compSize[toRowMajor(cols,li,lj)] if self.segmentation.compSize[newRoot] > largestSize: self.largest = newRootPixel def setBackground(self, i, j): """ Marks the (i,j) pixel as a background pixel. """ if self.background == None: self.background = (i,j) else: (k,l) = self.background self.background = self.unsafeUnion(k, l, i, j) def getLargestObject(self): return (0,0) if self.largest == None else self.largest def foregroundMask(self, fgVal=0, bgVal=255): rows, cols = self.image.shape[0:2] mask = np.empty([rows, cols], dtype=np.uint8) for i in range(0,rows): for j in range(0,cols): root = self.find(i,j) if root == self.background: mask[i,j] = bgVal else: mask[i,j] = fgVal return mask
gpl-2.0
-6,634,703,475,703,437,000
35.845238
88
0.555412
false
hguemar/cinder
cinder/utils.py
1
26017
# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utilities and helper functions.""" import contextlib import datetime import hashlib import inspect import os import pyclbr import re import shutil import stat import sys import tempfile from xml.dom import minidom from xml.parsers import expat from xml import sax from xml.sax import expatreader from xml.sax import saxutils from oslo.utils import importutils from oslo.utils import timeutils from oslo_concurrency import lockutils from oslo_concurrency import processutils from oslo_config import cfg import six from cinder.brick.initiator import connector from cinder import exception from cinder.i18n import _, _LE from cinder.openstack.common import log as logging CONF = cfg.CONF LOG = logging.getLogger(__name__) ISO_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S" PERFECT_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%f" synchronized = lockutils.synchronized_with_prefix('cinder-') def find_config(config_path): """Find a configuration file using the given hint. :param config_path: Full or relative path to the config. :returns: Full path of the config, if it exists. :raises: `cinder.exception.ConfigNotFound` """ possible_locations = [ config_path, os.path.join(CONF.state_path, "etc", "cinder", config_path), os.path.join(CONF.state_path, "etc", config_path), os.path.join(CONF.state_path, config_path), "/etc/cinder/%s" % config_path, ] for path in possible_locations: if os.path.exists(path): return os.path.abspath(path) raise exception.ConfigNotFound(path=os.path.abspath(config_path)) def as_int(obj, quiet=True): # Try "2" -> 2 try: return int(obj) except (ValueError, TypeError): pass # Try "2.5" -> 2 try: return int(float(obj)) except (ValueError, TypeError): pass # Eck, not sure what this is then. if not quiet: raise TypeError(_("Can not translate %s to integer.") % (obj)) return obj def is_int_like(val): """Check if a value looks like an int.""" try: return str(int(val)) == str(val) except Exception: return False def check_exclusive_options(**kwargs): """Checks that only one of the provided options is actually not-none. Iterates over all the kwargs passed in and checks that only one of said arguments is not-none, if more than one is not-none then an exception will be raised with the names of those arguments who were not-none. """ if not kwargs: return pretty_keys = kwargs.pop("pretty_keys", True) exclusive_options = {} for (k, v) in kwargs.iteritems(): if v is not None: exclusive_options[k] = True if len(exclusive_options) > 1: # Change the format of the names from pythonic to # something that is more readable. # # Ex: 'the_key' -> 'the key' if pretty_keys: names = [k.replace('_', ' ') for k in kwargs.keys()] else: names = kwargs.keys() names = ", ".join(sorted(names)) msg = (_("May specify only one of %s") % (names)) raise exception.InvalidInput(reason=msg) def execute(*cmd, **kwargs): """Convenience wrapper around oslo's execute() method.""" if 'run_as_root' in kwargs and 'root_helper' not in kwargs: kwargs['root_helper'] = get_root_helper() return processutils.execute(*cmd, **kwargs) def check_ssh_injection(cmd_list): ssh_injection_pattern = ['`', '$', '|', '||', ';', '&', '&&', '>', '>>', '<'] # Check whether injection attacks exist for arg in cmd_list: arg = arg.strip() # Check for matching quotes on the ends is_quoted = re.match('^(?P<quote>[\'"])(?P<quoted>.*)(?P=quote)$', arg) if is_quoted: # Check for unescaped quotes within the quoted argument quoted = is_quoted.group('quoted') if quoted: if (re.match('[\'"]', quoted) or re.search('[^\\\\][\'"]', quoted)): raise exception.SSHInjectionThreat(command=cmd_list) else: # We only allow spaces within quoted arguments, and that # is the only special character allowed within quotes if len(arg.split()) > 1: raise exception.SSHInjectionThreat(command=cmd_list) # Second, check whether danger character in command. So the shell # special operator must be a single argument. for c in ssh_injection_pattern: if c not in arg: continue result = arg.find(c) if not result == -1: if result == 0 or not arg[result - 1] == '\\': raise exception.SSHInjectionThreat(command=cmd_list) def create_channel(client, width, height): """Invoke an interactive shell session on server.""" channel = client.invoke_shell() channel.resize_pty(width, height) return channel def cinderdir(): import cinder return os.path.abspath(cinder.__file__).split('cinder/__init__.py')[0] def last_completed_audit_period(unit=None): """This method gives you the most recently *completed* audit period. arguments: units: string, one of 'hour', 'day', 'month', 'year' Periods normally begin at the beginning (UTC) of the period unit (So a 'day' period begins at midnight UTC, a 'month' unit on the 1st, a 'year' on Jan, 1) unit string may be appended with an optional offset like so: 'day@18' This will begin the period at 18:00 UTC. 'month@15' starts a monthly period on the 15th, and year@3 begins a yearly one on March 1st. returns: 2 tuple of datetimes (begin, end) The begin timestamp of this audit period is the same as the end of the previous. """ if not unit: unit = CONF.volume_usage_audit_period offset = 0 if '@' in unit: unit, offset = unit.split("@", 1) offset = int(offset) rightnow = timeutils.utcnow() if unit not in ('month', 'day', 'year', 'hour'): raise ValueError('Time period must be hour, day, month or year') if unit == 'month': if offset == 0: offset = 1 end = datetime.datetime(day=offset, month=rightnow.month, year=rightnow.year) if end >= rightnow: year = rightnow.year if 1 >= rightnow.month: year -= 1 month = 12 + (rightnow.month - 1) else: month = rightnow.month - 1 end = datetime.datetime(day=offset, month=month, year=year) year = end.year if 1 >= end.month: year -= 1 month = 12 + (end.month - 1) else: month = end.month - 1 begin = datetime.datetime(day=offset, month=month, year=year) elif unit == 'year': if offset == 0: offset = 1 end = datetime.datetime(day=1, month=offset, year=rightnow.year) if end >= rightnow: end = datetime.datetime(day=1, month=offset, year=rightnow.year - 1) begin = datetime.datetime(day=1, month=offset, year=rightnow.year - 2) else: begin = datetime.datetime(day=1, month=offset, year=rightnow.year - 1) elif unit == 'day': end = datetime.datetime(hour=offset, day=rightnow.day, month=rightnow.month, year=rightnow.year) if end >= rightnow: end = end - datetime.timedelta(days=1) begin = end - datetime.timedelta(days=1) elif unit == 'hour': end = rightnow.replace(minute=offset, second=0, microsecond=0) if end >= rightnow: end = end - datetime.timedelta(hours=1) begin = end - datetime.timedelta(hours=1) return (begin, end) class LazyPluggable(object): """A pluggable backend loaded lazily based on some value.""" def __init__(self, pivot, **backends): self.__backends = backends self.__pivot = pivot self.__backend = None def __get_backend(self): if not self.__backend: backend_name = CONF[self.__pivot] if backend_name not in self.__backends: raise exception.Error(_('Invalid backend: %s') % backend_name) backend = self.__backends[backend_name] if isinstance(backend, tuple): name = backend[0] fromlist = backend[1] else: name = backend fromlist = backend self.__backend = __import__(name, None, None, fromlist) LOG.debug('backend %s', self.__backend) return self.__backend def __getattr__(self, key): backend = self.__get_backend() return getattr(backend, key) class ProtectedExpatParser(expatreader.ExpatParser): """An expat parser which disables DTD's and entities by default.""" def __init__(self, forbid_dtd=True, forbid_entities=True, *args, **kwargs): # Python 2.x old style class expatreader.ExpatParser.__init__(self, *args, **kwargs) self.forbid_dtd = forbid_dtd self.forbid_entities = forbid_entities def start_doctype_decl(self, name, sysid, pubid, has_internal_subset): raise ValueError("Inline DTD forbidden") def entity_decl(self, entityName, is_parameter_entity, value, base, systemId, publicId, notationName): raise ValueError("<!ENTITY> forbidden") def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name): # expat 1.2 raise ValueError("<!ENTITY> forbidden") def reset(self): expatreader.ExpatParser.reset(self) if self.forbid_dtd: self._parser.StartDoctypeDeclHandler = self.start_doctype_decl if self.forbid_entities: self._parser.EntityDeclHandler = self.entity_decl self._parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl def safe_minidom_parse_string(xml_string): """Parse an XML string using minidom safely. """ try: return minidom.parseString(xml_string, parser=ProtectedExpatParser()) except sax.SAXParseException: raise expat.ExpatError() def xhtml_escape(value): """Escapes a string so it is valid within XML or XHTML. """ return saxutils.escape(value, {'"': '&quot;', "'": '&apos;'}) def get_from_path(items, path): """Returns a list of items matching the specified path. Takes an XPath-like expression e.g. prop1/prop2/prop3, and for each item in items, looks up items[prop1][prop2][prop3]. Like XPath, if any of the intermediate results are lists it will treat each list item individually. A 'None' in items or any child expressions will be ignored, this function will not throw because of None (anywhere) in items. The returned list will contain no None values. """ if path is None: raise exception.Error('Invalid mini_xpath') (first_token, sep, remainder) = path.partition('/') if first_token == '': raise exception.Error('Invalid mini_xpath') results = [] if items is None: return results if not isinstance(items, list): # Wrap single objects in a list items = [items] for item in items: if item is None: continue get_method = getattr(item, 'get', None) if get_method is None: continue child = get_method(first_token) if child is None: continue if isinstance(child, list): # Flatten intermediate lists for x in child: results.append(x) else: results.append(child) if not sep: # No more tokens return results else: return get_from_path(results, remainder) def is_valid_boolstr(val): """Check if the provided string is a valid bool string or not.""" val = str(val).lower() return (val == 'true' or val == 'false' or val == 'yes' or val == 'no' or val == 'y' or val == 'n' or val == '1' or val == '0') def is_none_string(val): """Check if a string represents a None value.""" if not isinstance(val, six.string_types): return False return val.lower() == 'none' def monkey_patch(): """If the CONF.monkey_patch set as True, this function patches a decorator for all functions in specified modules. You can set decorators for each modules using CONF.monkey_patch_modules. The format is "Module path:Decorator function". Example: 'cinder.api.ec2.cloud:' \ cinder.openstack.common.notifier.api.notify_decorator' Parameters of the decorator is as follows. (See cinder.openstack.common.notifier.api.notify_decorator) name - name of the function function - object of the function """ # If CONF.monkey_patch is not True, this function do nothing. if not CONF.monkey_patch: return # Get list of modules and decorators for module_and_decorator in CONF.monkey_patch_modules: module, decorator_name = module_and_decorator.split(':') # import decorator function decorator = importutils.import_class(decorator_name) __import__(module) # Retrieve module information using pyclbr module_data = pyclbr.readmodule_ex(module) for key in module_data.keys(): # set the decorator for the class methods if isinstance(module_data[key], pyclbr.Class): clz = importutils.import_class("%s.%s" % (module, key)) for method, func in inspect.getmembers(clz, inspect.ismethod): setattr( clz, method, decorator("%s.%s.%s" % (module, key, method), func)) # set the decorator for the function if isinstance(module_data[key], pyclbr.Function): func = importutils.import_class("%s.%s" % (module, key)) setattr(sys.modules[module], key, decorator("%s.%s" % (module, key), func)) def generate_glance_url(): """Generate the URL to glance.""" # TODO(jk0): This will eventually need to take SSL into consideration # when supported in glance. return "http://%s:%d" % (CONF.glance_host, CONF.glance_port) def make_dev_path(dev, partition=None, base='/dev'): """Return a path to a particular device. >>> make_dev_path('xvdc') /dev/xvdc >>> make_dev_path('xvdc', 1) /dev/xvdc1 """ path = os.path.join(base, dev) if partition: path += str(partition) return path def sanitize_hostname(hostname): """Return a hostname which conforms to RFC-952 and RFC-1123 specs.""" if isinstance(hostname, unicode): hostname = hostname.encode('latin-1', 'ignore') hostname = re.sub('[ _]', '-', hostname) hostname = re.sub('[^\w.-]+', '', hostname) hostname = hostname.lower() hostname = hostname.strip('.-') return hostname def hash_file(file_like_object): """Generate a hash for the contents of a file.""" checksum = hashlib.sha1() any(map(checksum.update, iter(lambda: file_like_object.read(32768), ''))) return checksum.hexdigest() def service_is_up(service): """Check whether a service is up based on last heartbeat.""" last_heartbeat = service['updated_at'] or service['created_at'] # Timestamps in DB are UTC. elapsed = (timeutils.utcnow() - last_heartbeat).total_seconds() return abs(elapsed) <= CONF.service_down_time def read_file_as_root(file_path): """Secure helper to read file as root.""" try: out, _err = execute('cat', file_path, run_as_root=True) return out except processutils.ProcessExecutionError: raise exception.FileNotFound(file_path=file_path) @contextlib.contextmanager def temporary_chown(path, owner_uid=None): """Temporarily chown a path. :params owner_uid: UID of temporary owner (defaults to current user) """ if owner_uid is None: owner_uid = os.getuid() orig_uid = os.stat(path).st_uid if orig_uid != owner_uid: execute('chown', owner_uid, path, run_as_root=True) try: yield finally: if orig_uid != owner_uid: execute('chown', orig_uid, path, run_as_root=True) @contextlib.contextmanager def tempdir(**kwargs): tmpdir = tempfile.mkdtemp(**kwargs) try: yield tmpdir finally: try: shutil.rmtree(tmpdir) except OSError as e: LOG.debug('Could not remove tmpdir: %s', e) def walk_class_hierarchy(clazz, encountered=None): """Walk class hierarchy, yielding most derived classes first.""" if not encountered: encountered = [] for subclass in clazz.__subclasses__(): if subclass not in encountered: encountered.append(subclass) # drill down to leaves first for subsubclass in walk_class_hierarchy(subclass, encountered): yield subsubclass yield subclass def get_root_helper(): return 'sudo cinder-rootwrap %s' % CONF.rootwrap_config def brick_get_connector_properties(): """wrapper for the brick calls to automatically set the root_helper needed for cinder. """ root_helper = get_root_helper() return connector.get_connector_properties(root_helper, CONF.my_ip) def brick_get_connector(protocol, driver=None, execute=processutils.execute, use_multipath=False, device_scan_attempts=3, *args, **kwargs): """Wrapper to get a brick connector object. This automatically populates the required protocol as well as the root_helper needed to execute commands. """ root_helper = get_root_helper() return connector.InitiatorConnector.factory(protocol, root_helper, driver=driver, execute=execute, use_multipath=use_multipath, device_scan_attempts= device_scan_attempts, *args, **kwargs) def require_driver_initialized(driver): """Verifies if `driver` is initialized If the driver is not initialized, an exception will be raised. :params driver: The driver instance. :raises: `exception.DriverNotInitialized` """ # we can't do anything if the driver didn't init if not driver.initialized: driver_name = driver.__class__.__name__ LOG.error(_LE("Volume driver %s not initialized") % driver_name) raise exception.DriverNotInitialized() def get_file_mode(path): """This primarily exists to make unit testing easier.""" return stat.S_IMODE(os.stat(path).st_mode) def get_file_gid(path): """This primarily exists to make unit testing easier.""" return os.stat(path).st_gid def _get_disk_of_partition(devpath, st=None): """Returns a disk device path from a partition device path, and stat for the device. If devpath is not a partition, devpath is returned as it is. For example, '/dev/sda' is returned for '/dev/sda1', and '/dev/disk1' is for '/dev/disk1p1' ('p' is prepended to the partition number if the disk name ends with numbers). """ diskpath = re.sub('(?:(?<=\d)p)?\d+$', '', devpath) if diskpath != devpath: try: st_disk = os.stat(diskpath) if stat.S_ISBLK(st_disk.st_mode): return (diskpath, st_disk) except OSError: pass # devpath is not a partition if st is None: st = os.stat(devpath) return (devpath, st) def get_blkdev_major_minor(path, lookup_for_file=True): """Get the device's "major:minor" number of a block device to control I/O ratelimit of the specified path. If lookup_for_file is True and the path is a regular file, lookup a disk device which the file lies on and returns the result for the device. """ st = os.stat(path) if stat.S_ISBLK(st.st_mode): path, st = _get_disk_of_partition(path, st) return '%d:%d' % (os.major(st.st_rdev), os.minor(st.st_rdev)) elif stat.S_ISCHR(st.st_mode): # No I/O ratelimit control is provided for character devices return None elif lookup_for_file: # lookup the mounted disk which the file lies on out, _err = execute('df', path) devpath = out.split("\n")[1].split()[0] if devpath[0] is not '/': # the file is on a network file system return None return get_blkdev_major_minor(devpath, False) else: msg = _("Unable to get a block device for file \'%s\'") % path raise exception.Error(msg) def check_string_length(value, name, min_length=0, max_length=None): """Check the length of specified string :param value: the value of the string :param name: the name of the string :param min_length: the min_length of the string :param max_length: the max_length of the string """ if not isinstance(value, six.string_types): msg = _("%s is not a string or unicode") % name raise exception.InvalidInput(message=msg) if len(value) < min_length: msg = _("%(name)s has a minimum character requirement of " "%(min_length)s.") % {'name': name, 'min_length': min_length} raise exception.InvalidInput(message=msg) if max_length and len(value) > max_length: msg = _("%(name)s has more than %(max_length)s " "characters.") % {'name': name, 'max_length': max_length} raise exception.InvalidInput(message=msg) _visible_admin_metadata_keys = ['readonly', 'attached_mode'] def add_visible_admin_metadata(volume): """Add user-visible admin metadata to regular metadata. Extracts the admin metadata keys that are to be made visible to non-administrators, and adds them to the regular metadata structure for the passed-in volume. """ visible_admin_meta = {} if volume.get('volume_admin_metadata'): for item in volume['volume_admin_metadata']: if item['key'] in _visible_admin_metadata_keys: visible_admin_meta[item['key']] = item['value'] # avoid circular ref when volume is a Volume instance elif (volume.get('admin_metadata') and isinstance(volume.get('admin_metadata'), dict)): for key in _visible_admin_metadata_keys: if key in volume['admin_metadata'].keys(): visible_admin_meta[key] = volume['admin_metadata'][key] if not visible_admin_meta: return # NOTE(zhiyan): update visible administration metadata to # volume metadata, administration metadata will rewrite existing key. if volume.get('volume_metadata'): orig_meta = list(volume.get('volume_metadata')) for item in orig_meta: if item['key'] in visible_admin_meta.keys(): item['value'] = visible_admin_meta.pop(item['key']) for key, value in visible_admin_meta.iteritems(): orig_meta.append({'key': key, 'value': value}) volume['volume_metadata'] = orig_meta # avoid circular ref when vol is a Volume instance elif (volume.get('metadata') and isinstance(volume.get('metadata'), dict)): volume['metadata'].update(visible_admin_meta) else: volume['metadata'] = visible_admin_meta def remove_invalid_filter_options(context, filters, allowed_search_options): """Remove search options that are not valid for non-admin API/context. """ if context.is_admin: # Allow all options return # Otherwise, strip out all unknown options unknown_options = [opt for opt in filters if opt not in allowed_search_options] bad_options = ", ".join(unknown_options) log_msg = "Removing options '%s' from query." % bad_options LOG.debug(log_msg) for opt in unknown_options: del filters[opt] def is_blk_device(dev): try: if stat.S_ISBLK(os.stat(dev).st_mode): return True return False except Exception: LOG.debug('Path %s not found in is_blk_device check' % dev) return False
apache-2.0
1,572,447,231,465,571,000
32.876302
79
0.597379
false
crypto101/arthur
arthur/test/test_util.py
1
4581
from arthur.util import MultiDeferred from twisted.internet import defer from twisted.trial import unittest class MultiDeferredTests(unittest.SynchronousTestCase): """ Tests for L{defer.MultiDeferred}, except now in Arthur. See tm.tl/6365. """ def setUp(self): self.multiDeferred = MultiDeferred() def test_callback(self): """ Any produced L{defer.Deferred}s have their callbacks called when the L{defer.MultiDeferred} does. """ a, b, c = [self.multiDeferred.tee() for _ in xrange(3)] self.assertNoResult(a) self.assertNoResult(b) self.assertNoResult(c) result = object() self.multiDeferred.callback(result) self.assertIdentical(self.successResultOf(a), result) self.assertIdentical(self.successResultOf(b), result) self.assertIdentical(self.successResultOf(c), result) def test_errback(self): """ Any produced L{defer.Deferred}s have their errbacks called when the L{defer.MultiDeferred} does. """ a, b, c = [self.multiDeferred.tee() for _ in xrange(3)] self.assertNoResult(a) self.assertNoResult(b) self.assertNoResult(c) error = RuntimeError() self.multiDeferred.errback(error) self.assertIdentical(self.failureResultOf(a, RuntimeError).value, error) self.assertIdentical(self.failureResultOf(b, RuntimeError).value, error) self.assertIdentical(self.failureResultOf(c, RuntimeError).value, error) def test_callbackAfterCallback(self): """ Calling C{callback} twice raises L{defer.AlreadyCalledError}. """ self.multiDeferred.callback(None) self.assertRaises(defer.AlreadyCalledError, self.multiDeferred.callback, None) def test_callbackAfterErrback(self): """ Calling C{callback} after C{errback} raises L{defer.AlreadyCalledError}. """ self.multiDeferred.errback(RuntimeError()) self.assertRaises(defer.AlreadyCalledError, self.multiDeferred.callback, None) def test_errbackAfterCallback(self): """ Calling C{errback} after C{callback} raises L{defer.AlreadyCalledError}. """ self.multiDeferred.callback(None) self.assertRaises(defer.AlreadyCalledError, self.multiDeferred.errback, RuntimeError()) def test_errbackAfterErrback(self): """ Calling C{errback} after C{errback} raises L{defer.AlreadyCalledError}. """ self.multiDeferred.errback(RuntimeError()) self.assertRaises(defer.AlreadyCalledError, self.multiDeferred.errback, RuntimeError()) def test_synchronousCallbacks(self): """ All callbacks are called sequentially, synchronously, and in the order they were produced. If one or more of the L{defer.Deferred}s produced by L{defer.MultiDeferred.tee} is waiting on a deferred that will never fire, all the other deferreds produced by that method are still fired. """ called = [] result = object() def callback(r, i): """ Checks this is the correct result, adds this deferreds index to the list of called deferreds, and then returns a deferred that will never fire. """ self.assertIdentical(r, result) called.append(i) return defer.Deferred() for i in range(10): self.multiDeferred.tee().addCallback(callback, i=i) self.assertEqual(called, []) self.multiDeferred.callback(result) self.assertEqual(called, range(10)) def test_alreadyFiredWithResult(self): """ If the C{MultiDeferred} already fired, C{tee} produces a C{Deferred} that has already been fired. """ result = object() self.multiDeferred.callback(result) d = self.multiDeferred.tee() self.assertIdentical(self.successResultOf(d), result) def test_alreadyFiredWithError(self): """ If the C{MultiDeferred} already fired with a failure, C{tee} produces a C{Deferred} that has already been fired with the failure. """ error = RuntimeError() self.multiDeferred.errback(error) d = self.multiDeferred.tee() failure = self.failureResultOf(d, RuntimeError) self.assertIdentical(failure.value, error)
isc
-7,686,488,387,015,034,000
31.956835
84
0.629993
false
OffenesJena/JenLoRa
LoPy/LoAirRohr01/lib/DHT22RinusW.py
1
2355
from machine import enable_irq, disable_irq import time # this onewire protocol code tested with Pycom LoPy device and AM2302/DHT22 sensor def getval(pin): ms = [1]*700 # needs long sample size to grab all the bits from the DHT time.sleep(1) pin(0) time.sleep_us(10000) pin(1) irqf = disable_irq() for i in range(len(ms)): ms[i] = pin() ## sample input and store value enable_irq(irqf) #for i in range(len(ms)): #print debug for checking raw data # print (ms[i]) return ms def decode(inp): res= [0]*5 bits=[] ix = 0 try: #if inp[0] == 1 : ix = inp.index(0, ix) ## skip to first 0 # ignore first '1' as probably sample of start signal. *But* code seems to be missing the start signal, so jump this line to ensure response signal is identified in next two lines. ix = inp.index(1,ix) ## skip first 0's to next 1 # ignore first '10' bits as probably the response signal. ix = inp.index(0,ix) ## skip first 1's to next 0 while len(bits) < len(res)*8 : ##need 5 * 8 bits : ix = inp.index(1,ix) ## index of next 1 ie = inp.index(0,ix) ## nr of 1's = ie-ix # print ('ie-ix:',ie-ix) bits.append(ie-ix) ix = ie except: print('6: decode error') # print('length:') # print(len(inp), len(bits)) return([0xff,0xff,0xff,0xff]) # print('bits:', bits) for i in range(len(res)): for v in bits[i*8:(i+1)*8]: #process next 8 bit res[i] = res[i]<<1 ##shift byte one place to left if v > 7: # less than 7 '1's is a zero, more than 7 1's in the sequence is a one res[i] = res[i]+1 ##and add 1 if lsb is 1 # print ('res', i, res[i]) if (res[0]+res[1]+res[2]+res[3])&0xff != res[4] : ##parity error! print("Checksum Error") print (res[0:4]) res= [0xff,0xff,0xff,0xff] # print ('res:', res[0:4]) return(res[0:4]) def DHT11(pin): res = decode(getval(pin)) temp = 10 * res[0] + res[1] hum = 10 * res[2] + res[3] return temp, hum def DHT22(pin): res = decode(getval(pin)) hum = res[0] * 256 + res[1] temp = res[2] * 256 + res[3] if (temp > 0x7fff): temp = 0x8000 - temp return temp, hum
apache-2.0
6,041,025,592,553,690,000
33.632353
248
0.546072
false
Ilias95/lib389
lib389/tests/dseldif_test.py
1
4107
# --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2017 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # import logging import pytest from lib389._constants import * from lib389.dseldif import DSEldif from lib389.topologies import topology_st as topo DEBUGGING = os.getenv('DEBUGGING', False) if DEBUGGING: logging.getLogger(__name__).setLevel(logging.DEBUG) else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) @pytest.mark.parametrize("entry_dn", (DN_CONFIG, DN_CONFIG_LDBM)) def test_get_singlevalue(topo, entry_dn): """Check that we can get an attribute value under different suffixes""" dse_ldif = DSEldif(topo.standalone) log.info("Get 'cn' attr from {}".format(entry_dn)) attr_values = dse_ldif.get(entry_dn, "cn") assert attr_values == ["config"] log.info("Get 'nonexistent' attr from {}".format(entry_dn)) attr_values = dse_ldif.get(entry_dn, "nonexistent") assert not attr_values def test_get_multivalue(topo): """Check that we can get attribute values""" dse_ldif = DSEldif(topo.standalone) log.info("Get objectClass from {}".format(DN_CONFIG)) attr_values = dse_ldif.get(DN_CONFIG, "objectClass") assert len(attr_values) == 3 assert "top" in attr_values assert "extensibleObject" in attr_values assert "nsslapdConfig" in attr_values @pytest.mark.parametrize("fake_attr_value", ("fake value", "fakevalue")) def test_add(topo, fake_attr_value): """Check that we can add an attribute to a given suffix""" dse_ldif = DSEldif(topo.standalone) fake_attr = "fakeAttr" log.info("Add {} to {}".format(fake_attr, DN_CONFIG)) dse_ldif.add(DN_CONFIG, fake_attr, fake_attr_value) attr_values = dse_ldif.get(DN_CONFIG, fake_attr) assert attr_values == [fake_attr_value] log.info("Clean up") dse_ldif.delete(DN_CONFIG, fake_attr) assert not dse_ldif.get(DN_CONFIG, fake_attr) def test_replace(topo): """Check that we can replace an attribute to a given suffix""" dse_ldif = DSEldif(topo.standalone) port_attr = "nsslapd-port" port_value = "390" log.info("Get default value of {}".format(port_attr)) default_value = dse_ldif.get(DN_CONFIG, port_attr)[0] log.info("Replace {} with {}".format(port_attr, port_value)) dse_ldif.replace(DN_CONFIG, port_attr, port_value) attr_values = dse_ldif.get(DN_CONFIG, port_attr) assert attr_values == [port_value] log.info("Restore default value") dse_ldif.replace(DN_CONFIG, port_attr, default_value) def test_delete_singlevalue(topo): """Check that we can delete an attribute from a given suffix""" dse_ldif = DSEldif(topo.standalone) fake_attr = "fakeAttr" fake_attr_values = ["fake1", "fake2", "fake3"] log.info("Add multivalued {} to {}".format(fake_attr, DN_CONFIG)) for value in fake_attr_values: dse_ldif.add(DN_CONFIG, fake_attr, value) log.info("Delete {}".format(fake_attr_values[0])) dse_ldif.delete(DN_CONFIG, fake_attr, fake_attr_values[0]) attr_values = dse_ldif.get(DN_CONFIG, fake_attr) assert len(attr_values) == 2 assert fake_attr_values[0] not in attr_values assert fake_attr_values[1] in attr_values assert fake_attr_values[2] in attr_values log.info("Clean up") dse_ldif.delete(DN_CONFIG, fake_attr) assert not dse_ldif.get(DN_CONFIG, fake_attr) def test_delete_multivalue(topo): """Check that we can delete attributes from a given suffix""" dse_ldif = DSEldif(topo.standalone) fake_attr = "fakeAttr" fake_attr_values = ["fake1", "fake2", "fake3"] log.info("Add multivalued {} to {}".format(fake_attr, DN_CONFIG)) for value in fake_attr_values: dse_ldif.add(DN_CONFIG, fake_attr, value) log.info("Delete all values of {}".format(fake_attr)) dse_ldif.delete(DN_CONFIG, fake_attr) assert not dse_ldif.get(DN_CONFIG, fake_attr)
gpl-3.0
5,447,008,069,091,832,000
30.592308
75
0.661066
false
darbaga/simple_compiler
virtual_machine.py
1
2139
class VirtualMachine: def __init__(self, ram_size=512, executing=True): self.data = {i: None for i in range(ram_size)} self.stack = [] self.executing = executing self.pc = 0 self.devices_start = 256 def push(self, value): """Push something onto the stack.""" self.stack += [value] def pop(self): """Pop something from the stack. Crash if empty.""" return self.stack.pop() def read_memory(self, index): """Read from memory, crashing if index is out of bounds.""" if isinstance(self.data[index], DeviceProxy): return self.data[index].read(index) else: return self.data[index] def write_memory(self, index, value): """Write to memory. Crash if index is out of bounds.""" if isinstance(self.data[index], DeviceProxy): self.data[index].write(index, value) else: self.data[index] = value def register_device(self, device, needed_addresses): """Given an instantiated device and the number of required addresses, registers it in memory""" # If not enough addresses, just error out if self.devices_start+needed_addresses > len(self.data): raise Exception('Not enough addresses to allocate') proxyed_device = DeviceProxy(device, self.devices_start) for i in range(self.devices_start, self.devices_start+needed_addresses): self.data[i] = proxyed_device self.devices_start += needed_addresses def run(self, bytecodes): self.bytecodes = bytecodes while self.executing: increment = self.bytecodes[self.pc].autoincrement self.bytecodes[self.pc].execute(self) if increment: self.pc += 1 class DeviceProxy: """Manages address translation between devices""" def __init__(self, device, pos): self.device = device self.pos = pos def read(self, index): return self.device.read(self.pos-index) def write(self, index, value): self.device.write(self.pos-index, value)
bsd-3-clause
2,304,611,489,600,544,300
34.65
103
0.610566
false
gladgod/zhiliao
zhiliao/twitter/defaults.py
1
2296
""" Default settings for the ``mezzanine.twitter`` app. Each of these can be overridden in your project's settings module, just like regular Django settings. The ``editable`` argument for each controls whether the setting is editable via Django's admin. Thought should be given to how a setting is actually used before making it editable, as it may be inappropriate - for example settings that are only read during startup shouldn't be editable, since changing them would require an application reload. """ from __future__ import unicode_literals from django.utils.translation import ugettext_lazy as _ from zhiliao.conf import register_setting from zhiliao.twitter import QUERY_TYPE_CHOICES, QUERY_TYPE_SEARCH register_setting( name="TWITTER_DEFAULT_QUERY_TYPE", label=_("Default Twitter Query Type"), description=_("Type of query that will be used to retrieve tweets for " "the default Twitter feed."), editable=True, default=QUERY_TYPE_SEARCH, choices=QUERY_TYPE_CHOICES, ) register_setting( name="TWITTER_DEFAULT_QUERY", label=_("Default Twitter Query"), description=_("Twitter query to use for the default query type. " "\n\n*Note:* Once you change this from the default, you'll need to " "configure each of the oAuth consumer/access key/secret settings. " "Please refer to http://dev.twitter.com for more information " "on creating an application and acquiring these settings."), editable=True, default="from:stephen_mcd mezzanine", ) register_setting( name="TWITTER_DEFAULT_NUM_TWEETS", label=_("Default Number of Tweets"), description=_("Number of tweets to display in the default Twitter feed."), editable=True, default=3, ) register_setting( name="TWITTER_CONSUMER_KEY", label=_("Twitter OAuth consumer key"), editable=True, default='', ) register_setting( name="TWITTER_CONSUMER_SECRET", label=_("Twitter OAuth consumer secret"), editable=True, default='', ) register_setting( name="TWITTER_ACCESS_TOKEN_KEY", label=_("Twitter OAuth access token"), editable=True, default='', ) register_setting( name="TWITTER_ACCESS_TOKEN_SECRET", label=_("Twitter OAuth access token secret"), editable=True, default='', )
bsd-3-clause
6,396,643,095,361,620,000
29.613333
78
0.708624
false
nmmmnu/MessageQueue
protocols/memcachedhandler.py
1
4321
# # Memcached protocol implementation # Nikolay Mihaylov [email protected] # # For Memcached telnet protocol see: # http://blog.elijaa.org/?post/2010/05/21/Memcached-telnet-command-summary import asynchat import time try: from cStringIO import StringIO except ImportError: from StringIO import StringIO class MemcachedHandler(asynchat.async_chat): commands_with_data = ['set', 'add', "sismember"] def __init__(self, sock, addr, processor): # # Constructs new Memcached protocol handler # # @param sock : socket from asyncore # @param addr : address from asyncore # @param processor : processor class # asynchat.async_chat.__init__(self, sock=sock) self.addr = addr self.started = time.time() self.lastping = time.time() self.head = "" self.data = "" self.processor = processor self.state_change("read_header") def state_change(self, state, size = 0): self.io = StringIO() if state == "read_header": self.state = state self.set_terminator("\r\n") return True if state == "read_data": # size == 0 is an error, but we will ignore it. if size < 0: return False self.state = state self.set_terminator(size + len("\r\n") ) return True # Unknown state ? return False def cmd_parse_head(self): m2 = self.head.split(" ") # clean up empty arguments. m = [] for x in m2: x = x.strip() if x != "": m.append(x) # for easy access, put some blanks at the end. while len(m) < 10: m.append("") return m def cmd_parse(self): self.lastping = time.time() args = self.cmd_parse_head() command = args[0].lower() if command == "get": key = args[1] x = self.processor.get(key) if x is None: self.push("END\r\n") return msg = "VALUE %s 0 %d\r\n%s\r\nEND\r\n" % (key, len(x), x) self.push(msg) return if command == "delete": key = args[1] x = self.processor.delete(key) if x: self.push("DELETED\r\n") return self.push("NOT_FOUND\r\n") return if command == "set": # It is protocol responsibility to check the size. try: size = int(args[4]) if len(self.data) > size: self.data = self.data[:size] except: pass key = args[1] x = self.processor.set(key, self.data) if x: self.push("STORED\r\n") return self.push("NOT_STORED\r\n") return if command == "add": # It is protocol responsibility to check the size. try: size = int(args[4]) if len(self.data) > size: self.data = self.data[:size] except: pass key = args[1] x = self.processor.add(key, self.data) if x: self.push("STORED\r\n") return self.push("NOT_STORED\r\n") return # Non standard command if command == "scard": key = args[1] x = self.processor.len(key) if x is None: x = "0" msg = "VALUE %s 0 %d\r\n%s\r\nEND\r\n" % (key, len(x), x) self.push(msg) return # Non standard command if command == "sismember": # It is protocol responsibility to check the size. try: size = int(args[4]) if len(self.data) > size: self.data = self.data[:size] except: pass key = args[1] x = self.processor.contains(key, self.data) if x: self.push("MEMBER\r\n") return self.push("NOT_MEMBER\r\n") return if command == "quit": self.push("QUIT\r\n") self.close() return # error, not implemented self.push("ERROR\r\n") return def state_read_header(self): self.head = self.io.getvalue() m = self.cmd_parse_head() if m[0] in self.commands_with_data: try: size = int(m[4]) except: size = 0 self.state_change("read_data", size) return self.state_change("read_header") self.cmd_parse() def state_read_data(self): self.data = self.io.getvalue() self.state_change("read_header") self.cmd_parse() def found_terminator(self): if self.state == "read_header": return self.state_read_header() if self.state == "read_data": return self.state_read_data() # Unknown state ? return False def collect_incoming_data(self, data): self.io.write(data)
gpl-3.0
3,332,516,228,514,705,400
15.123134
74
0.585744
false
pandysong/dxf2kicad_mod
dxf2kicad_mod.py
1
4880
# refer to http://pythonhosted.org/dxfgrabber/# # Note that there must not a line or shape overlapped import sys import math import functools from itertools import groupby import dxfgrabber import kicad_mod_format as kf def _arc_point(center, radius, angle_degree): ''' point defined by arc center,radius, and angel in degree ''' return (center[0] + radius * math.cos(angle_degree/180*math.pi), center[1] + radius * math.sin(angle_degree/180*math.pi)) def _endpoints(entity): ''' return a tuple of start and end points of the entity ''' if "LINE" == entity.dxftype: return (entity.start, entity.end) elif "ARC" == entity.dxftype: return (_arc_point(entity.center, entity.radius, entity.start_angle), _arc_point(entity.center, entity.radius, entity.end_angle)) else: raise TypeError( "[Error]: Unexpceted dxftype {}".format(entity.dxftype)) def _touched(p1, p2): distance_error = 1e-2 return ((math.fabs(p1[0]-p2[0]) < distance_error) and (math.fabs(p1[1]-p2[1]) < distance_error)) def _points_in_entity(ety): if 'LINE' == ety.dxftype: return [ety.start, ety.end] elif 'ARC' == ety.dxftype: if (ety.start_angle > ety.end_angle): ety.end_angle += 360 def angles(start_angle, end_angle, radius): ''' yields descrete angles with step length defined by radius ''' step = 1.0/ety.radius # larger radius indicates small steps angle = start_angle while True: yield angle if (angle + step > ety.end_angle): yield end_angle break else: angle += step return [_arc_point(ety.center, ety.radius, a) for a in angles(ety.start_angle, ety.end_angle, ety.radius)] else: raise TypeError( "[Error]: Unexpceted dxftype {}".format(ety.dxftype)) def fp_polys(layer, entities): ''' yields fp_poly cmd in the layer of `entities` ''' entities = list(entities) def _points_next_to(next_start): for e in entities: start, end = _endpoints(e) pts = _points_in_entity(e) if _touched(next_start, start): return pts, e elif _touched(next_start, end): pts.reverse() return pts, e return None, None def poly(e): start, next_start = _endpoints(e) yield [start] # yield start points while True: pts, pts_e = _points_next_to(next_start) if pts: entities.remove(pts_e) # remove from the set yield pts # yield a list of points next_start = pts[-1] # new start else: if _touched(next_start, start): return else: raise ValueError('Unclosed shape at {}'.format(next_start)) def polys(): while True: if not entities: return e = entities.pop() # pick up one yield poly(e) # yield an iterator which will yields points for p in polys(): poly_points = functools.reduce(lambda x, y: x+y, p) # we may use *point, but since there might be more than 3 values in one # point, we unpack it manually yield kf.fp_poly(children=(kf.pts(children=(kf.xy(point[0], point[1]) for point in poly_points)), kf.layer(layer), kf.width(0.001))) def _layer_entities(entities): seq = list(entities) seq.sort(key=lambda e: e.layer) groups = groupby(seq, lambda e: e.layer) return groups def cmds_from_entities(entities): ''' get all cmd (in kicad_mod_format) from entities which is the entities on all layers. ''' return functools.reduce(lambda x, y: x+y, (list(fp_polys(layer, entities)) for (layer, entities) in _layer_entities(dxf.entities))) if __name__ == '__main__': if len(sys.argv) < 2: print('usage:\n' ' save to a file: python {} ' 'inputfile.dxf > outputfile.kicad_mod\n' ' print to stdout: python {} inputfile.dxf'.format( sys.argv[0], sys.argv[0])) else: dxf = dxfgrabber.readfile(sys.argv[1]) print(str(kf.Module('autogenerated', children=cmds_from_entities(dxf.entities))))
gpl-3.0
8,028,408,619,376,183,000
31.197279
79
0.518648
false
cidles/poio-api
src/poioapi/io/graf.py
1
18028
# -*- coding: utf-8 -*- # # Poio Tools for Linguists # # Copyright (C) 2009-2013 Poio Project # Author: António Lopes <[email protected]> # URL: <http://media.cidles.eu/poio/> # For license information, see LICENSE.TXT """ This document contain the responsible methods to write and parse the GrAF files. The parser use the ContentHandler from SAX Xml module. """ from __future__ import absolute_import, unicode_literals import abc import codecs import os from xml.etree.ElementTree import tostring from xml.dom import minidom import graf # GrAF ID's separator GRAFSEPARATOR = ".." (TEXT, AUDIO, VIDEO, NONE) = ("text", "audio", "video", "none") class Tier: """A list of tiers. The name is the tier unique identification. """ __slots__ = ['name', 'annotation_space'] def __init__(self, name, annotation_space=None): self.name = name self.annotation_space = annotation_space class Annotation: """A list of annotations. The id is the annotation identification, the value the annotation value and the features are a dict type of values containing the annotation features. """ __slots__ = ['id', 'value', 'features'] def __init__(self, id, value, features=None): self.value = value self.id = id self.features = features class NodeId: """A list of nodes using a specific format. The prefix is the node type and the index the identification number. """ __slots__ = ['prefix', 'index'] def __init__(self, prefix, index): self.prefix = prefix self.index = str(index) def to_str(self): return "{0}{1}n{2}".format(self.prefix, GRAFSEPARATOR, self.index) def str_edge(self): return "e{0}".format(self.index) def str_region(self): return "{0}{1}r{2}".format(self.prefix, GRAFSEPARATOR, self.index) class PrimaryData: """This class represents the primary data of an AnnotationGraph object. """ def __init__(self): self.type = None self.external_link = None self.filename = None self.content = None class BaseParser(object): """This class is a base class to the parser classes in order to create GrAF objects. This class contains some methods that must be implemented other wise it will be raise a exception error. Although the methods that should be implemented with properly code are the get_root_tiers, get_child_tiers_for_tier and get_annotations_for_tier. The method tier_has_regions and region_for_annotation could simply return None or pass. Raises ------ NotImplementedError Method must be implemented. """ __metaclass__ = abc.ABCMeta @abc.abstractmethod def get_root_tiers(self): """Method to get the root tiers. The root tiers are defined by the parser when the method is implemented. Returns ------- list : array-like List of tiers type. """ raise NotImplementedError("Method must be implemented") @abc.abstractmethod def get_child_tiers_for_tier(self, tier): """Method that get the child tiers of a specific tier. Parameters ---------- tier : object Tier object. Returns ------- list : array-like List of tiers type. See also -------- Tier """ raise NotImplementedError("Method must be implemented") @abc.abstractmethod def get_annotations_for_tier(self, tier, annotation_parent=None): """Method that get all the annotations for a specific tier. The annotations can be filtered using an annotation parent. Parameters ---------- tier : object Tier object. annotation_parent : object Annotation object. Returns ------- list : array-like List of annotations type. See also -------- Tier, Annotation """ raise NotImplementedError("Method must be implemented") @abc.abstractmethod def tier_has_regions(self, tier): """Method to verify if a tier has regions. Parameters ---------- tier : object Tier object. Returns ------- has_region : bool A true or false variable. See also -------- Tier """ raise NotImplementedError("Method must be implemented") @abc.abstractmethod def region_for_annotation(self, annotation): """Method to get the regions values of a specific annotation. Parameters ---------- annotation : object Annotation object. Returns ------- regions : tuple A tuple with the two regions. See also -------- Annotation """ raise NotImplementedError("Method must be implemented") @abc.abstractmethod def get_primary_data(self): """Method to get the primary data of the GrAF file. Returns ------- primaryData : object Object type of PrimaryData class. See also -------- PrimaryData """ raise NotImplementedError("Method must be implemented") class BaseWriter(object): """This class is a base class to the writer classes in order to create files from GrAF objects. This class contains some methods that must be implemented other wise it will be raise a exception error. Raises ------ NotImplementedError Method must be implemented. """ __metaclass__ = abc.ABCMeta @abc.abstractmethod def write(self, outputfile, converter): """Method that will write the GrAF object into a specific format. Parameters ---------- outputfile : str The filename of the output file. The filename should be the header file for GrAF with the extension ".hdr". converter : Converter or AnnotationGraph A converter object. The converter object containes the data that will be use for output. All writers need at least a GrAF graph and the tier hierarchy, some will also need the primary data object. """ raise NotImplementedError("Method must be implemented") class GrAFConverter: """This class handles the conversion of different file formats into GrAF objects and back again. It uses a sub-class of BaseParser to get the annotations and the tier hierarchies. A sub-class of BaseWriter is used to write back the files. Please be aware that meta-data might get lost if you write to a file format from another one. This depends on whether the output file format can store all meta-data from the input file format. In any case all the data and annotation will be stored. """ def __init__(self, parser, writer=None): self.parser = parser self.writer = writer self.graf = graf.Graph() self.tier_hierarchies = [] self.meta_information = None self.primary_data = None self.original_file = None def write(self, outputfile): if self.writer: self.writer.write(outputfile, self) def parse(self): """This method will be the responsible to transform the parser into a GrAF object. This method also retrieves the tiers hierarchies. """ self._tiers_parent_list = [] self.root_tiers = [] tiers_hierarchy_map = {} for tier in self.parser.get_root_tiers(): self.root_tiers.append(tier.name) self._convert_tier(tier, None, None) i = 0 for t in self._tiers_parent_list: if t[1] is None: i += 1 tiers_hierarchy_map[str(i)] = [t[0]] else: self._append_tier_to_hierarchy(tiers_hierarchy_map[str(i)], t[1], t[0]) for i, hierarchy in tiers_hierarchy_map.items(): self.tier_hierarchies.append(hierarchy) if hasattr(self.parser, 'meta_information'): self.meta_information = self.parser.meta_information self.primary_data = self.parser.get_primary_data() if hasattr(self.parser, 'filepath') and \ isinstance(self.parser.filepath, str): self.original_file = os.path.abspath(self.parser.filepath) def _convert_tier(self, tier, parent_node, parent_annotation, parent_prefix=None): child_tiers = self.parser.get_child_tiers_for_tier(tier) if tier.annotation_space is None: prefix = tier.name annotation_name = prefix else: annotation_name = tier.annotation_space.replace(' ', '_') prefix = "{0}{1}{2}".format(annotation_name, GRAFSEPARATOR, tier.name) has_regions = False if self.parser.tier_has_regions(tier): has_regions = True self._add_tier_in_hierarchy_list(prefix, parent_prefix) annotations = self.parser.get_annotations_for_tier(tier, parent_annotation) for annotation in annotations: regions = None if has_regions: regions = self.parser.region_for_annotation(annotation) node_id = NodeId(prefix, annotation.id) self._add_node(node_id, annotation, annotation_name, regions, parent_node) self._add_root_nodes(prefix, node_id) if child_tiers: for t in child_tiers: self._convert_tier(t, node_id, annotation, prefix) if annotations == [] and child_tiers: for t in child_tiers: self._convert_tier(t, None, None, prefix) def _add_tier_in_hierarchy_list(self, prefix, parent_prefix): if not (prefix, parent_prefix) in self._tiers_parent_list: self._tiers_parent_list.append((prefix, parent_prefix)) def _append_tier_to_hierarchy(self, tiers_list, parent_tier, tier): for t in tiers_list: if isinstance(t, list): self._append_tier_to_hierarchy(t, parent_tier, tier) else: if t == parent_tier: tiers_list.append([tier]) def _add_node(self, node_id, annotation, annotation_name, regions, from_node_id): self._add_node_to_graph(node_id, regions, from_node_id) self._add_graf_annotation(annotation_name, annotation.id, node_id, annotation.value, annotation.features) def _add_root_nodes(self, prefix, node_id): if prefix in self.root_tiers: self.graf.header.roots.append(node_id.to_str()) def _add_graf_annotation(self, annotation_name, annotation_id, annotation_ref, annotation_value, annotation_features=None): annotation = graf.Annotation(annotation_name, annotation_features, annotation_id) if annotation_value is not None: annotation.features['annotation_value'] = annotation_value self.graf.nodes[annotation_ref.to_str()].annotations.add(annotation) if annotation_name in self.graf.annotation_spaces: #if annotation not in self.graf.annotation_spaces[annotation_name]: self.graf.annotation_spaces[annotation_name].add(annotation) else: annotation_space = graf.AnnotationSpace(annotation_name) annotation_space.add(annotation) self.graf.annotation_spaces.add(annotation_space) def _add_node_to_graph(self, node_id, regions=None, from_node_id=None): node = graf.Node(node_id.to_str()) if from_node_id is not None: edge_id = node_id.str_edge() self.graf.create_edge(self.graf.nodes[from_node_id.to_str()], node, edge_id) if regions is not None: region_id = node_id.str_region() region = graf.Region(region_id, *regions) node.add_region(region) self.graf.regions.add(region) self.graf.nodes.add(node) class Writer(BaseWriter): def __init__(self, **kwargs): self.tier_hierarchies = None self.meta_information = None self.standoffheader = graf.StandoffHeader(**kwargs) def _flatten_hierarchy_elements(self, elements): """Flat the elements appended to a new list of elements. Parameters ---------- elements : array_like An array of string values. Returns ------- flat_elements : array_like An array of flattened `elements`. """ flat_elements = [] for e in elements: if type(e) is list: flat_elements.extend(self._flatten_hierarchy_elements(e)) else: flat_elements.append(e) return flat_elements def write(self, outputfile, ag): """Writes an AnnotationGraph object as GrAF files. Parameters ---------- outputfile : str The filename of the output file. The filename should be the header file for GrAF with the extension ".hdr". ag : poioapi.annotationgraph.AnnotationGraph An AnnotationGraph object. The AG object containes the data that will be use for output. """ (basedirname, _) = os.path.splitext(outputfile) self._get_parents(ag.tier_hierarchies) standoffrenderer = graf.StandoffHeaderRenderer("{0}.hdr".format( basedirname)) for tier_name in self._flatten_hierarchy_elements( ag.tier_hierarchies): annotation_space = tier_name.split(GRAFSEPARATOR)[0] out_graf = graf.Graph() renderer = graf.GrafRenderer("{0}-{1}.xml".format( basedirname, annotation_space )) out_graf.nodes = [n for n in ag.graf.nodes if n.id.startswith(tier_name)] out_graf.edges = [e for e in ag.graf.edges if e.to_node.id.startswith(tier_name)] out_graf.regions = [r for r in ag.graf.regions if r.id.startswith(tier_name)] out_graf.annotation_spaces.add(graf.AnnotationSpace( annotation_space)) out_graf.header.add_dependency(self._parent[tier_name]) out_graf = self._add_root_nodes(ag.graf, annotation_space, out_graf) renderer.render(out_graf) basename = os.path.basename(basedirname) self.standoffheader.datadesc.add_annotation( "{0}-{1}.xml".format(basename, annotation_space), annotation_space) self._add_primary_data(ag.primary_data, basedirname) standoffrenderer.render(self.standoffheader) self._generate_metafile(basedirname, ag.meta_information) def _add_root_nodes(self, graph, annotation_space, out_graf): for root in graph.header.roots: if annotation_space in root: out_graf.header.roots.append(root) return out_graf def _get_parents(self, tier_hierarchies): self._parent = {} for h in tier_hierarchies: self._get_hierarchy_parents(h, None) def _get_hierarchy_parents(self, hierarchy, parent): for i, h in enumerate(hierarchy): if isinstance(h, list): self._get_hierarchy_parents(h, parent) else: self._parent[h] = parent if i is 0: parent = h.split(GRAFSEPARATOR)[0] def _add_primary_data(self, primary_data, basedirname): if primary_data.external_link: loc = primary_data.external_link elif primary_data.content: loc = self._create_raw_txt_file(primary_data.content, basedirname) elif primary_data.filename: loc = primary_data.filename self.standoffheader.datadesc.primaryData = {'loc': loc, 'f.id': primary_data.type} def _create_raw_txt_file(self, content, basedirname): filename = "{0}.txt".format(os.path.splitext(basedirname)[0]) file = os.path.abspath(filename) f = codecs.open(file, 'w', 'utf-8') f.write(content) f.close() return os.path.basename(filename) def _generate_metafile(self, basedirname, meta_information=None): """Generate a metafile with all the extra information extracted from a file when it is parsed. Parameters ---------- basedirname : str Base name of the inpufile. meta_information: ElementTree ElementTree with the extra information. """ if meta_information is not None: out = open("{0}-extinfo.xml".format(basedirname), "wb") doc = minidom.parseString(tostring(meta_information, encoding="utf-8")) out.write(doc.toprettyxml(encoding='utf-8')) out.close()
apache-2.0
-3,918,818,425,620,900,400
28.818803
80
0.568758
false
brain-research/mirage-rl-qprop
sandbox/rocky/tf/q_functions/continuous_mlp_q_function.py
1
6100
from sandbox.rocky.tf.q_functions.base import QFunction import numpy as np from rllab.core.serializable import Serializable from sandbox.rocky.tf.core.layers_powered import LayersPowered from sandbox.rocky.tf.core.layers import batch_norm from sandbox.rocky.tf.policies.base import StochasticPolicy from sandbox.rocky.tf.misc import tensor_utils import tensorflow as tf import sandbox.rocky.tf.core.layers as L class ContinuousMLPQFunction(QFunction, LayersPowered, Serializable): def __init__( self, env_spec, name='qnet', hidden_sizes=(32, 32), hidden_nonlinearity=tf.nn.relu, action_merge_layer=-2, output_nonlinearity=None, eqf_use_full_qf=False, eqf_sample_size=1, mqprop=False, bn=False): Serializable.quick_init(self, locals()) assert not env_spec.action_space.is_discrete self._env_spec = env_spec with tf.variable_scope(name): l_obs = L.InputLayer(shape=(None, env_spec.observation_space.flat_dim), name="obs") l_action = L.InputLayer(shape=(None, env_spec.action_space.flat_dim), name="actions") n_layers = len(hidden_sizes) + 1 if n_layers > 1: action_merge_layer = \ (action_merge_layer % n_layers + n_layers) % n_layers else: action_merge_layer = 1 l_hidden = l_obs for idx, size in enumerate(hidden_sizes): if bn: l_hidden = batch_norm(l_hidden) if idx == action_merge_layer: l_hidden = L.ConcatLayer([l_hidden, l_action]) l_hidden = L.DenseLayer( l_hidden, num_units=size, nonlinearity=hidden_nonlinearity, name="h%d" % (idx + 1) ) if action_merge_layer == n_layers: l_hidden = L.ConcatLayer([l_hidden, l_action]) l_output = L.DenseLayer( l_hidden, num_units=1, nonlinearity=output_nonlinearity, name="output" ) output_var = L.get_output(l_output, deterministic=True) output_var = tf.reshape(output_var, (-1,)) self._f_qval = tensor_utils.compile_function([l_obs.input_var, l_action.input_var], output_var) self._output_layer = l_output self._obs_layer = l_obs self._action_layer = l_action self._output_nonlinearity = output_nonlinearity self.eqf_use_full_qf=eqf_use_full_qf self.eqf_sample_size=eqf_sample_size self.mqprop=mqprop LayersPowered.__init__(self, [l_output]) def get_qval(self, observations, actions): return self._f_qval(observations, actions) def get_qval_sym(self, obs_var, action_var, **kwargs): qvals = L.get_output( self._output_layer, {self._obs_layer: obs_var, self._action_layer: action_var}, **kwargs ) return tf.reshape(qvals, (-1,)) def get_e_qval(self, observations, policy): if isinstance(policy, StochasticPolicy): agent_info = policy.dist_info(observations) means, log_stds = agent_info['mean'], agent_info['log_std'] if self.eqf_use_full_qf and self.eqf_sample_size > 1: observations = np.repeat(observations, self.eqf_sample_size, axis=0) means = np.repeat(means, self.eqf_sample_size, axis=0) stds = np.repeat(np.exp(log_stds), self.eqf_sample_size, axis=0) randoms = np.random.randn(*(means)) actions = means + stds * randoms all_qvals = self.get_qval(observations, actions) qvals = np.mean(all_qvals.reshape((-1,self.eqf_sample_size)),axis=1) else: qvals = self.get_qval(observations, means) else: actions, _ = policy.get_actions(observations) qvals = self.get_qval(observations, actions) return qvals def _get_e_qval_sym(self, obs_var, policy, **kwargs): if isinstance(policy, StochasticPolicy): agent_info = policy.dist_info_sym(obs_var) mean_var, log_std_var = agent_info['mean'], agent_info['log_std'] if self.eqf_use_full_qf: assert self.eqf_sample_size > 0 if self.eqf_sample_size == 1: action_var = tf.random_normal(shape=tf.shape(mean_var))*tf.exp(log_std_var) + mean_var return self.get_qval_sym(obs_var, action_var, **kwargs), action_var else: raise NotImplementedError else: return self.get_qval_sym(obs_var, mean_var, **kwargs), mean_var else: action_var = policy.get_action_sym(obs_var) return self.get_qval_sym(obs_var, action_var, **kwargs), action_var def get_e_qval_sym(self, obs_var, policy, **kwargs): return self._get_e_qval_sym(obs_var, policy, **kwargs)[0] def get_cv_sym(self, obs_var, action_var, policy, **kwargs): if self.eqf_use_full_qf: qvals = self.get_qval_sym(obs_var, action_var, deterministic=True, **kwargs) e_qvals = self.get_e_qval_sym(obs_var, policy, deterministic=True, **kwargs) return qvals - e_qvals else: if self.mqprop: # Just use zero-order Taylor expansion (aka just the constant qvals) qvals, action0 = self._get_e_qval_sym(obs_var, policy, deterministic=True, **kwargs) return qvals else: qvals, action0 = self._get_e_qval_sym(obs_var, policy, deterministic=True, **kwargs) # Use first-order Taylor expansion qprimes = tf.gradients(qvals, action0)[0] deltas = action_var - action0 return tf.reduce_sum(deltas * qprimes, 1)
mit
-6,248,511,351,676,138,000
40.216216
107
0.562787
false
blockstack/blockstack-server
integration_tests/blockstack_integration_tests/scenarios/name_import_expire_pre_reg_expire_pay2ns_multi.py
1
8928
#!/usr/bin/env python2 # -*- coding: utf-8 -*- """ Blockstack ~~~~~ copyright: (c) 2014-2015 by Halfmoon Labs, Inc. copyright: (c) 2016 by Blockstack.org This file is part of Blockstack Blockstack is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Blockstack is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Blockstack. If not, see <http://www.gnu.org/licenses/>. """ # activate F-day 2017 """ TEST ENV BLOCKSTACK_EPOCH_1_END_BLOCK 682 TEST ENV BLOCKSTACK_EPOCH_2_END_BLOCK 683 TEST ENV BLOCKSTACK_EPOCH_2_NAMESPACE_LIFETIME_MULTIPLIER 1 TEST ENV BLOCKSTACK_EPOCH_3_NAMESPACE_LIFETIME_MULTIPLIER 1 TEST ENV BLOCKSTACK_EPOCH_3_NAMESPACE_LIFETIME_GRACE_PERIOD 0 TEST ENV BLOCKSTACK_EPOCH_3_NAMESPACE_RECEIVE_FEES_PERIOD 22 """ import testlib import virtualchain import blockstack wallets = [ testlib.Wallet( "5JesPiN68qt44Hc2nT8qmyZ1JDwHebfoh9KQ52Lazb1m1LaKNj9", 100000000000 ), testlib.Wallet( "5KHqsiU9qa77frZb6hQy9ocV7Sus9RWJcQGYYBJJBb2Efj1o77e", 100000000000 ), testlib.Wallet( "5Kg5kJbQHvk1B64rJniEmgbD83FpZpbw2RjdAZEzTefs9ihN3Bz", 100000000000 ), testlib.Wallet( "5JuVsoS9NauksSkqEjbUZxWwgGDQbMwPsEfoRBSpLpgDX1RtLX7", 100000000000 ), testlib.Wallet( "5KEpiSRr1BrT8vRD7LKGCEmudokTh1iMHbiThMQpLdwBwhDJB1T", 100000000000 ) ] consensus = "17ac43c1d8549c3181b200f1bf97eb7d" def scenario( wallets, **kw ): testlib.blockstack_namespace_preorder( "test", wallets[1].addr, wallets[0].privkey ) testlib.next_block( **kw ) testlib.blockstack_namespace_reveal( "test", wallets[1].addr, 3, 250, 4, [6,5,4,3,2,1,0,0,0,0,0,0,0,0,0,0], 10, 10, wallets[0].privkey, version_bits=2) testlib.next_block( **kw ) resp = testlib.blockstack_name_import( "foo.test", wallets[3].addr, "11" * 20, wallets[1].privkey ) if 'error' in resp: print json.dumps( resp, indent=4 ) return False testlib.next_block( **kw ) testlib.blockstack_namespace_ready( "test", wallets[1].privkey ) testlib.next_block( **kw ) namespace_rec = testlib.blockstack_cli_get_namespace_blockchain_record("test") if 'error' in namespace_rec: print namespace_rec return False namespace_balance = testlib.get_balance(namespace_rec['address']) burn_balance = testlib.get_balance(blockstack.lib.config.BLOCKSTACK_BURN_ADDRESS) testlib.next_block( **kw ) testlib.next_block( **kw ) testlib.next_block( **kw ) testlib.next_block( **kw ) # expired res = testlib.blockstack_name_preorder( "foo.test", wallets[2].privkey, wallets[3].addr ) # +name_cost if 'error' in res: print res return False testlib.next_block( **kw ) res = testlib.blockstack_name_register( "foo.test", wallets[2].privkey, wallets[3].addr ) if 'error' in res: print res return False testlib.next_block( **kw ) testlib.next_block( **kw ) testlib.next_block( **kw ) testlib.next_block( **kw ) testlib.next_block( **kw ) # expired res = testlib.blockstack_name_preorder( "foo.test", wallets[3].privkey, wallets[4].addr ) # +name_cost if 'error' in res: print res return False testlib.next_block( **kw ) res = testlib.blockstack_name_register( "foo.test", wallets[3].privkey, wallets[4].addr ) if 'error' in res: print res return False testlib.next_block( **kw ) testlib.next_block( **kw ) res = testlib.blockstack_name_renew("foo.test", wallets[4].privkey) # +name_cost if 'error' in res: print res return False testlib.next_block( **kw ) testlib.next_block( **kw ) testlib.next_block( **kw ) testlib.next_block( **kw ) testlib.next_block( **kw ) # expired res = testlib.blockstack_name_preorder( "foo.test", wallets[2].privkey, wallets[3].addr ) # +name_cost if 'error' in res: print res return False testlib.next_block( **kw ) res = testlib.blockstack_name_register( "foo.test", wallets[2].privkey, wallets[3].addr ) if 'error' in res: print res return False testlib.next_block( **kw ) new_namespace_balance = testlib.get_balance(namespace_rec['address']) name_rec = testlib.get_name_blockchain_record('foo.test') name_cost = name_rec['op_fee'] testlib.next_block( **kw ) testlib.next_block( **kw ) # stop fee collection testlib.next_block( **kw ) testlib.next_block( **kw ) # expired if new_namespace_balance - namespace_balance != 4*name_cost: print 'address {} did not get credited'.format(namespace_rec['address']) print '{} != {} + 4*{}'.format(new_namespace_balance, namespace_balance, name_cost) return False # preorder should send to the null burn address now. res = testlib.blockstack_name_preorder( "foo2.test", wallets[4].privkey, wallets[0].addr ) # does not pay to namespace if 'error' in res: print res return False # try forcing it to the namespace burn address, to verify that it fails res = testlib.blockstack_name_preorder( "foo_fail.test", wallets[4].privkey, wallets[0].addr, burn_addr=namespace_rec['address'], expect_fail=True ) # does not pay to namespace (command fails) if 'error' not in res: print res return False res = testlib.blockstack_name_preorder( "foo_fail.test", wallets[4].privkey, wallets[0].addr, burn_addr=namespace_rec['address'], price={'units': 'BTC', 'amount': name_cost}, safety_checks=False, tx_fee=10000*5 ) # +name_cost if 'error' in res: print res return False testlib.next_block( **kw ) testlib.expect_snv_fail_at('foo_fail.test', testlib.get_current_block(**kw)) # should be accepted res = testlib.blockstack_name_register( "foo2.test", wallets[4].privkey, wallets[0].addr ) if 'error' in res: print res return False # should be rejected res = testlib.blockstack_name_register( "foo_fail.test", wallets[4].privkey, wallets[0].addr, safety_checks=False ) if 'error' in res: print res return False testlib.next_block( **kw ) testlib.expect_snv_fail_at('foo_fail.test', testlib.get_current_block(**kw)) # should have been rejected due to wrong burn address whois = testlib.blockstack_cli_whois('foo_fail.test') if 'error' not in whois: print whois return False new_burn_balance = testlib.get_balance(blockstack.lib.config.BLOCKSTACK_BURN_ADDRESS) new_namespace_balance = testlib.get_balance(namespace_rec['address']) name_rec_2 = testlib.get_name_blockchain_record('foo2.test') name_cost_2 = name_rec_2['op_fee'] # namespace should NOT have gotten the fee for foo_fail. It should only have gotten it for foo.test if new_namespace_balance - namespace_balance < 5*name_cost or new_namespace_balance - namespace_balance > 6*name_cost: print 'address {} got credited after fee capture period'.format(namespace_rec['address']) print '{} != {} + 5*{}'.format(new_namespace_balance, namespace_balance, name_cost) return False # burn address should have received the fee for the second name if new_burn_balance - name_cost_2 != burn_balance: print 'null burn address did not get credited' print '{} != {} + {}'.format(new_burn_balance, burn_balance, name_cost_2) return False def check( state_engine ): # not revealed, but ready ns = state_engine.get_namespace_reveal( "test" ) if ns is not None: print "namespace reveal exists" return False ns = state_engine.get_namespace( "test" ) if ns is None: print "no namespace" return False if ns['namespace_id'] != 'test': print "wrong namespace" return False for name in ['foo2.test']: # not preordered preorder = state_engine.get_name_preorder( name, virtualchain.make_payment_script(wallets[4].addr), wallets[0].addr ) if preorder is not None: print "preorder exists" return False # registered name_rec = state_engine.get_name( name ) if name_rec is None: print "name does not exist" return False # owned by if name_rec['address'] != wallets[0].addr or name_rec['sender'] != virtualchain.make_payment_script(wallets[0].addr): print "sender is wrong" return False return True
gpl-3.0
-6,916,285,018,192,869,000
35.292683
230
0.660506
false
diefenbach/django-cba
cba/layouts.py
1
1576
from . base import Component class Grid(Component): """A CSS grid layout. A grid consists arbitrary rows and 16 columns per row. see http://semantic-ui.com/collections/grid.html for more. """ template = "cba/layouts/grid.html" class Column(Component): """A column of a grid. width The width of the column. Valid values are 1-16. A row consist of maxmimal 16 columns but can be ended explicitly. """ template = "cba/layouts/column.html" # TODO: This needs be moved out of Python, to be independent of the use ui # system WIDTH = ["love", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten", "eleven", "twelve", "thirteen", "fourteen", "fifteen", "sixteen"] def __init__(self, id=None, width=16, *args, **kwargs): super(Column, self).__init__(id, *args, **kwargs) self.width = self.WIDTH[width] class Row(Component): """A row of a grid. It can be used to end a row explicitly. """ template = "cba/layouts/row.html" class Split(Component): """Splits the screen in two or more panels. All direct sub components are splitted into an own panel. Split components can be nested. direction The direction of the splitting. One of ``vertical`` or ``horizontal``. """ template = "cba/layouts/split.html" def __init__(self, id=None, direction="vertical", *args, **kwargs): super(Split, self).__init__(id, *args, **kwargs) self.direction = direction
bsd-3-clause
7,044,278,349,596,012,000
28.185185
107
0.607234
false
tvotyakov/codeeval
easy/one-zero-two-zeros/code.py
1
1832
#!python3 def count_of_zeros(num): ''' (int) -> int Returns count of zero bits in the binary representation of the given integer num. >>> count_of_zeros(0) 1 >>> count_of_zeros(1) 0 >>> count_of_zeros(2) 1 >>> count_of_zeros(4) 2 >>> count_of_zeros(6) 1 >>> count_of_zeros(7) 0 ''' if num == 0: return 1 i = 0 while num: i += num & 1 ^ 1 num >>= 1 return i def calc_num_with_zeros(zeros_count, max_num): ''' (int, int) -> int Returns count of integers from 1 to max_num including which contains zeros_count zeros in its binary representation. >>> calc_num_with_zeros(1, 1) 0 >>> calc_num_with_zeros(1, 2) 1 >>> calc_num_with_zeros(2, 4) 1 >>> calc_num_with_zeros(1, 8) 3 ''' return sum(1 for i in range(1, max_num + 1) if count_of_zeros(i) == zeros_count) def parse_input(in_str): ''' (string) -> tuple of integer Function expects string in_str which contains two integers delimited by space and returns tuple of the integers. >>> tuple(parse_input('1 1')) (1, 1) >>> tuple(parse_input('2 3')) (2, 3) >>> tuple(parse_input('1 8')) (1, 8) >>> tuple(parse_input('2 4')) (2, 4) ''' return map(int, in_str.split(' ')) if __name__ == '__main__': import sys if (len(sys.argv) <= 1): import doctest doctest.testmod() else: test_cases = open(sys.argv[1], 'r') for test in test_cases: test = test.rstrip('\n') if not test: continue # ignore an empty line print(calc_num_with_zeros(*parse_input(test))) test_cases.close()
gpl-2.0
-1,047,781,085,113,638,800
18.606742
59
0.504367
false
jan-lugfl/lugfl-members
lugflmembers/wsgi.py
1
1443
""" WSGI config for lugflmembers project. This module contains the WSGI application used by Django's development server and any production WSGI deployments. It should expose a module-level variable named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover this application via the ``WSGI_APPLICATION`` setting. Usually you will have the standard Django WSGI application here, but it also might make sense to replace the whole Django WSGI application with a custom one that later delegates to the Django one. For example, you could introduce WSGI middleware here, or combine a Django application with an application of another framework. """ import os # We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks # if running multiple sites in the same mod_wsgi process. To fix this, use # mod_wsgi daemon mode with each site in its own daemon process, or use # os.environ["DJANGO_SETTINGS_MODULE"] = "lugflmembers.settings" os.environ.setdefault("DJANGO_SETTINGS_MODULE", "lugflmembers.local_settings") # This application object is used by any WSGI server configured to use this # file. This includes Django's development server, if the WSGI_APPLICATION # setting points here. from django.core.wsgi import get_wsgi_application application = get_wsgi_application() # Apply WSGI middleware here. # from helloworld.wsgi import HelloWorldApplication # application = HelloWorldApplication(application)
mit
-7,515,940,728,358,359,000
44.09375
79
0.795565
false
lekston/ardupilot
Tools/autotest/autotest.py
1
18930
#!/usr/bin/env python """ APM automatic test suite Andrew Tridgell, October 2011 """ from __future__ import print_function import atexit import fnmatch import glob import optparse import os import shutil import signal import sys import time import traceback from apmrover2 import * from arducopter import * from quadplane import * from arduplane import * from ardusub import * from pysim import util from pymavlink import mavutil from pymavlink.generator import mavtemplate def buildlogs_dirpath(): return os.getenv("BUILDLOGS", util.reltopdir("../buildlogs")) def buildlogs_path(path): '''return a string representing path in the buildlogs directory''' bits = [buildlogs_dirpath()] if isinstance(path, list): bits.extend(path) else: bits.append(path) return os.path.join(*bits) def get_default_params(atype, binary): """Get default parameters.""" # use rover simulator so SITL is not starved of input HOME = mavutil.location(40.071374969556928, -105.22978898137808, 1583.702759, 246) if "plane" in binary or "rover" in binary: frame = "rover" else: frame = "+" home = "%f,%f,%u,%u" % (HOME.lat, HOME.lng, HOME.alt, HOME.heading) sitl = util.start_SITL(binary, wipe=True, model=frame, home=home, speedup=10, unhide_parameters=True) mavproxy = util.start_MAVProxy_SITL(atype) print("Dumping defaults") idx = mavproxy.expect(['Please Run Setup', 'Saved [0-9]+ parameters to (\S+)']) if idx == 0: # we need to restart it after eeprom erase util.pexpect_close(mavproxy) util.pexpect_close(sitl) sitl = util.start_SITL(binary, model=frame, home=home, speedup=10) mavproxy = util.start_MAVProxy_SITL(atype) idx = mavproxy.expect('Saved [0-9]+ parameters to (\S+)') parmfile = mavproxy.match.group(1) dest = buildlogs_path('%s-defaults.parm' % atype) shutil.copy(parmfile, dest) util.pexpect_close(mavproxy) util.pexpect_close(sitl) print("Saved defaults for %s to %s" % (atype, dest)) return True def build_all(): """Run the build_all.sh script.""" print("Running build_all.sh") if util.run_cmd(util.reltopdir('Tools/scripts/build_all.sh'), directory=util.reltopdir('.')) != 0: print("Failed build_all.sh") return False return True def build_binaries(): """Run the build_binaries.py script.""" print("Running build_binaries.py") # copy the script as it changes git branch, which can change the script while running orig = util.reltopdir('Tools/scripts/build_binaries.py') copy = util.reltopdir('./build_binaries.py') shutil.copy2(orig, copy) # also copy generate_manifest library: orig_gm = util.reltopdir('Tools/scripts/generate_manifest.py') copy_gm = util.reltopdir('./generate_manifest.py') shutil.copy2(orig_gm, copy_gm) if util.run_cmd(copy, directory=util.reltopdir('.')) != 0: print("Failed build_binaries.py") return False return True def build_devrelease(): """Run the build_devrelease.sh script.""" print("Running build_devrelease.sh") # copy the script as it changes git branch, which can change the script while running orig = util.reltopdir('Tools/scripts/build_devrelease.sh') copy = util.reltopdir('./build_devrelease.sh') shutil.copy2(orig, copy) if util.run_cmd(copy, directory=util.reltopdir('.')) != 0: print("Failed build_devrelease.sh") return False return True def build_examples(): """Build examples.""" for target in 'px4-v2', 'navio': print("Running build.examples for %s" % target) try: util.build_examples(target) except Exception as e: print("Failed build_examples on board=%s" % target) print(str(e)) return False return True def build_parameters(): """Run the param_parse.py script.""" print("Running param_parse.py") for vehicle in 'ArduPlane', 'ArduCopter', 'ArduSub', 'APMrover2', 'AntennaTracker': if util.run_cmd([util.reltopdir('Tools/autotest/param_metadata/param_parse.py'), '--vehicle', vehicle], directory=util.reltopdir('.')) != 0: print("Failed param_parse.py (%s)" % vehicle) return False return True def convert_gpx(): """Convert any tlog files to GPX and KML.""" mavlog = glob.glob(buildlogs_path("*.tlog")) passed = True for m in mavlog: util.run_cmd(util.reltopdir("modules/mavlink/pymavlink/tools/mavtogpx.py") + " --nofixcheck " + m) gpx = m + '.gpx' kml = m + '.kml' try: util.run_cmd('gpsbabel -i gpx -f %s -o kml,units=m,floating=1,extrude=1 -F %s' % (gpx, kml)) except CalledProcessError as e: passed = False try: util.run_cmd('zip %s.kmz %s.kml' % (m, m)) except CalledProcessError as e: passed = False util.run_cmd("mavflightview.py --imagefile=%s.png %s" % (m, m)) return passed def test_prerequisites(): """Check we have the right directories and tools to run tests.""" print("Testing prerequisites") util.mkdir_p(buildlogs_dirpath()) return True def alarm_handler(signum, frame): """Handle test timeout.""" global results, opts try: results.add('TIMEOUT', '<span class="failed-text">FAILED</span>', opts.timeout) util.pexpect_close_all() convert_gpx() write_fullresults() os.killpg(0, signal.SIGKILL) except Exception: pass sys.exit(1) def should_run_step(step): """See if a step should be skipped.""" for skip in skipsteps: if fnmatch.fnmatch(step.lower(), skip.lower()): return False return True __bin_names = { "ArduCopter" : "arducopter", "ArduPlane" : "arduplane", "APMrover2" : "ardurover", "AntennaTracker" : "antennatracker", "CopterAVC" : "arducopter-heli", "QuadPlane" : "arduplane", "ArduSub" : "ardusub" } def binary_path(step, debug=False): try: vehicle = step.split(".")[1] except Exception: return None if vehicle in __bin_names: binary_name = __bin_names[vehicle] else: # cope with builds that don't have a specific binary return None binary_basedir = "sitl" binary = util.reltopdir(os.path.join('build', binary_basedir, 'bin', binary_name)) if not os.path.exists(binary): if os.path.exists(binary + ".exe"): binary += ".exe" else: raise ValueError("Binary (%s) does not exist" % (binary,)) return binary def run_step(step): """Run one step.""" # remove old logs util.run_cmd('/bin/rm -f logs/*.BIN logs/LASTLOG.TXT') if step == "prerequisites": return test_prerequisites() build_opts = { "j": opts.j, "debug": opts.debug, "clean": not opts.no_clean, "configure": not opts.no_configure, } if step == 'build.ArduPlane': return util.build_SITL('bin/arduplane', **build_opts) if step == 'build.APMrover2': return util.build_SITL('bin/ardurover', **build_opts) if step == 'build.ArduCopter': return util.build_SITL('bin/arducopter', **build_opts) if step == 'build.AntennaTracker': return util.build_SITL('bin/antennatracker', **build_opts) if step == 'build.Helicopter': return util.build_SITL('bin/arducopter-heli', **build_opts) if step == 'build.ArduSub': return util.build_SITL('bin/ardusub', **build_opts) binary = binary_path(step, debug=opts.debug) if step.startswith("defaults"): vehicle = step[9:] return get_default_params(vehicle, binary) fly_opts = { "viewerip": opts.viewerip, "use_map": opts.map, "valgrind": opts.valgrind, "gdb": opts.gdb, "gdbserver": opts.gdbserver, } if opts.speedup is not None: fly_opts["speedup"] = opts.speedup if step == 'fly.ArduCopter': arducopter = AutoTestCopter(binary, frame=opts.frame, **fly_opts) return arducopter.autotest() if step == 'fly.CopterAVC': arducopter = AutoTestCopter(binary, **fly_opts) return arducopter.autotest_heli() if step == 'fly.ArduPlane': arduplane = AutoTestPlane(binary, **fly_opts) return arduplane.autotest() if step == 'fly.QuadPlane': quadplane = AutoTestQuadPlane(binary, **fly_opts) return quadplane.autotest() if step == 'drive.APMrover2': apmrover2 = AutoTestRover(binary, frame=opts.frame, **fly_opts) return apmrover2.autotest() if step == 'dive.ArduSub': ardusub = AutoTestSub(binary, **fly_opts) return ardusub.autotest() if step == 'build.All': return build_all() if step == 'build.Binaries': return build_binaries() if step == 'build.DevRelease': return build_devrelease() if step == 'build.Examples': return build_examples() if step == 'build.Parameters': return build_parameters() if step == 'convertgpx': return convert_gpx() raise RuntimeError("Unknown step %s" % step) class TestResult(object): """Test result class.""" def __init__(self, name, result, elapsed): self.name = name self.result = result self.elapsed = "%.1f" % elapsed class TestFile(object): """Test result file.""" def __init__(self, name, fname): self.name = name self.fname = fname class TestResults(object): """Test results class.""" def __init__(self): self.date = time.asctime() self.githash = util.run_cmd('git rev-parse HEAD', output=True, directory=util.reltopdir('.')).strip() self.tests = [] self.files = [] self.images = [] def add(self, name, result, elapsed): """Add a result.""" self.tests.append(TestResult(name, result, elapsed)) def addfile(self, name, fname): """Add a result file.""" self.files.append(TestFile(name, fname)) def addimage(self, name, fname): """Add a result image.""" self.images.append(TestFile(name, fname)) def addglob(self, name, pattern): """Add a set of files.""" for f in glob.glob(buildlogs_path(pattern)): self.addfile(name, os.path.basename(f)) def addglobimage(self, name, pattern): """Add a set of images.""" for f in glob.glob(buildlogs_path(pattern)): self.addimage(name, os.path.basename(f)) def write_webresults(results_to_write): """Write webpage results.""" t = mavtemplate.MAVTemplate() for h in glob.glob(util.reltopdir('Tools/autotest/web/*.html')): html = util.loadfile(h) f = open(buildlogs_path(os.path.basename(h)), mode='w') t.write(f, html, results_to_write) f.close() for f in glob.glob(util.reltopdir('Tools/autotest/web/*.png')): shutil.copy(f, buildlogs_path(os.path.basename(f))) def write_fullresults(): """Write out full results set.""" global results results.addglob("Google Earth track", '*.kmz') results.addfile('Full Logs', 'autotest-output.txt') results.addglob('DataFlash Log', '*-log.bin') results.addglob("MAVLink log", '*.tlog') results.addglob("GPX track", '*.gpx') # results common to all vehicles: vehicle_files = [ ('{vehicle} build log', '{vehicle}.txt'), ('{vehicle} code size', '{vehicle}.sizes.txt'), ('{vehicle} stack sizes', '{vehicle}.framesizes.txt'), ('{vehicle} defaults', '{vehicle}-defaults.parm'), ('{vehicle} core', '{vehicle}.core'), ('{vehicle} ELF', '{vehicle}.elf'), ] vehicle_globs = [('{vehicle} log', '{vehicle}-*.BIN'), ] for vehicle in 'ArduPlane','ArduCopter','APMrover2','AntennaTracker', 'ArduSub': subs = { 'vehicle': vehicle } for vehicle_file in vehicle_files: description = vehicle_file[0].format(**subs) filename = vehicle_file[1].format(**subs) results.addfile(description, filename) for vehicle_glob in vehicle_globs: description = vehicle_glob[0].format(**subs) glob = vehicle_glob[1].format(**subs) results.addglob(description, glob) results.addglob("CopterAVC log", 'CopterAVC-*.BIN') results.addfile("CopterAVC core", 'CopterAVC.core') results.addglob('APM:Libraries documentation', 'docs/libraries/index.html') results.addglob('APM:Plane documentation', 'docs/ArduPlane/index.html') results.addglob('APM:Copter documentation', 'docs/ArduCopter/index.html') results.addglob('APM:Rover documentation', 'docs/APMrover2/index.html') results.addglob('APM:Sub documentation', 'docs/ArduSub/index.html') results.addglobimage("Flight Track", '*.png') write_webresults(results) def check_logs(step): """Check for log files from a step.""" print("check step: ", step) if step.startswith('fly.'): vehicle = step[4:] elif step.startswith('drive.'): vehicle = step[6:] else: return logs = glob.glob("logs/*.BIN") for log in logs: bname = os.path.basename(log) newname = buildlogs_path("%s-%s" % (vehicle, bname)) print("Renaming %s to %s" % (log, newname)) shutil.move(log, newname) corefile = "core" if os.path.exists(corefile): newname = buildlogs_path("%s.core" % vehicle) print("Renaming %s to %s" % (corefile, newname)) shutil.move(corefile, newname) try: util.run_cmd('/bin/cp build/sitl/bin/* %s' % buildlogs_dirpath(), directory=util.reltopdir('.')) except Exception: print("Unable to save binary") def run_tests(steps): """Run a list of steps.""" global results passed = True failed = [] for step in steps: util.pexpect_close_all() t1 = time.time() print(">>>> RUNNING STEP: %s at %s" % (step, time.asctime())) try: if run_step(step): results.add(step, '<span class="passed-text">PASSED</span>', time.time() - t1) print(">>>> PASSED STEP: %s at %s" % (step, time.asctime())) check_logs(step) else: print(">>>> FAILED STEP: %s at %s" % (step, time.asctime())) passed = False failed.append(step) results.add(step, '<span class="failed-text">FAILED</span>', time.time() - t1) except Exception as msg: passed = False failed.append(step) print(">>>> FAILED STEP: %s at %s (%s)" % (step, time.asctime(), msg)) traceback.print_exc(file=sys.stdout) results.add(step, '<span class="failed-text">FAILED</span>', time.time() - t1) check_logs(step) if not passed: print("FAILED %u tests: %s" % (len(failed), failed)) util.pexpect_close_all() write_fullresults() return passed if __name__ == "__main__": ############## main program ############# os.environ['PYTHONUNBUFFERED'] = '1' os.putenv('TMPDIR', util.reltopdir('tmp')) parser = optparse.OptionParser("autotest") parser.add_option("--skip", type='string', default='', help='list of steps to skip (comma separated)') parser.add_option("--list", action='store_true', default=False, help='list the available steps') parser.add_option("--viewerip", default=None, help='IP address to send MAVLink and fg packets to') parser.add_option("--map", action='store_true', default=False, help='show map') parser.add_option("--experimental", default=False, action='store_true', help='enable experimental tests') parser.add_option("--timeout", default=3000, type='int', help='maximum runtime in seconds') parser.add_option("--speedup", default=None, type='int', help='speedup to run the simulations at') parser.add_option("--valgrind", default=False, action='store_true', help='run ArduPilot binaries under valgrind') parser.add_option("--gdb", default=False, action='store_true', help='run ArduPilot binaries under gdb') parser.add_option("--debug", default=False, action='store_true', help='make built binaries debug binaries') parser.add_option("-j", default=None, type='int', help='build CPUs') parser.add_option("--frame", type='string', default=None, help='specify frame type') parser.add_option("--gdbserver", default=False, action='store_true', help='run ArduPilot binaries under gdbserver') parser.add_option("--no-clean", default=False, action='store_true', help='do not clean before building', dest="no_clean") parser.add_option("--no-configure", default=False, action='store_true', help='do not configure before building', dest="no_configure") opts, args = parser.parse_args() steps = [ 'prerequisites', 'build.All', 'build.Binaries', # 'build.DevRelease', 'build.Examples', 'build.Parameters', 'build.ArduPlane', 'defaults.ArduPlane', 'fly.ArduPlane', 'fly.QuadPlane', 'build.APMrover2', 'defaults.APMrover2', 'drive.APMrover2', 'build.ArduCopter', 'defaults.ArduCopter', 'fly.ArduCopter', 'build.Helicopter', 'fly.CopterAVC', 'build.AntennaTracker', 'build.ArduSub', 'defaults.ArduSub', 'dive.ArduSub', 'convertgpx', ] skipsteps = opts.skip.split(',') # ensure we catch timeouts signal.signal(signal.SIGALRM, alarm_handler) signal.alarm(opts.timeout) if opts.list: for step in steps: print(step) sys.exit(0) util.mkdir_p(buildlogs_dirpath()) lckfile = buildlogs_path('autotest.lck') print("lckfile=%s" % repr(lckfile)) lck = util.lock_file(lckfile) if lck is None: print("autotest is locked - exiting. lckfile=(%s)" % (lckfile,)) sys.exit(0) atexit.register(util.pexpect_close_all) if len(args) > 0: # allow a wildcard list of steps matched = [] for a in args: matches = [step for step in steps if fnmatch.fnmatch(step.lower(), a.lower())] if not len(matches): print("No steps matched {}".format(a)) sys.exit(1) matched.extend(matches) steps = matched # skip steps according to --skip option: steps_to_run = [ s for s in steps if should_run_step(s) ] results = TestResults() try: if not run_tests(steps_to_run): sys.exit(1) except KeyboardInterrupt: util.pexpect_close_all() sys.exit(1) except Exception: # make sure we kill off any children util.pexpect_close_all() raise
gpl-3.0
-2,348,797,625,056,130,600
31.414384
148
0.609245
false