repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
no13bus/suited4you | settings.py | 1 | 1454 | #!/usr/bin/env python
#coding=utf8
import os
settings = {
'gzip': True,
'autoescape': 'xhtml_escape',
'static_path':os.path.join(os.path.dirname(__file__), 'static'),
'template_path':os.path.join(os.path.dirname(__file__), 'templates'),
"xsrf_cookies": True,
"cookie_secret": "61oETzKXQAGaYdkL5gEmGeJJFuYh7EQnp2XdTP1o/Vo=",
"login_url": "/login",
'debug': True,
}
# DB_CONNECT_STRING = 'mysql+mysqldb://root:root@localhost/v2exfriends?charset=utf8'
import redis
pool = redis.ConnectionPool(host='localhost', port=6379, db=1)
RD = redis.Redis(connection_pool=pool)
#### libs settings
from libs.github import *
from libs.reddit import *
from libs.stackoverflow import *
GITHUB_TOKEN = '05aa6e1541ecdb53f473b8e32f2a4e45b1ea0a27'
gh = GitHub(GITHUB_TOKEN)
reddit = Reddit()
sof = Sof()
#celery settings
CELERYD_POOL_RESTARTS = True
CELERYD_FORCE_EXECV = True
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
from datetime import timedelta
CELERYBEAT_SCHEDULE = {
'users_chain_3600': {
'task': 'tasks.users_chain',
'schedule': timedelta(seconds=3620),
},
}
####dev enviriment or deploy enviriment
import socket
if socket.gethostname() == 'jqh-virtual-machine' or socket.gethostname() == 'no13busdeMacBook-Air.local' or socket.gethostname() == 'localhost':
try:
from settings_dev import *
except ImportError:
pass | mit | -1,263,488,909,710,361,000 | 24.086207 | 144 | 0.691197 | false |
SGenheden/Scripts | Projects/Gpcr/gpcr_anal_lifetimes.py | 1 | 5680 | # Author: Samuel Genheden [email protected]
"""
Program to analyse state files in order to compute lifetimes of molecules or other
states
The state series can be files on disc or logical combinations of already open state
series. It will analyse group of files, each group with a number of repeats.
Median and maximum lifetime will be written out standard output, as well as
average and standerd error in case of multiple repeats
Examples
--------
gpcr_anal_lifetimes.py -f r1_md3_en_fit_chol.mstate.6.dat r1_md3_en_fit_chol.mid.dat 1a2
r1_md3_en_fit_chol.resstate.6.dat
-l chol/mol chol/bur chol-on/bur chol/hlx --helical 4 --mol b2
will read cholesterol in contact and buried cholesterol, and combine it to also analyse
buried cholesterols in contact. The fourth file exemplifies the helical lifetime analysis.
"""
import os
import sys
import argparse
import numpy as np
import pycontacts
import gpcr_lib
SEP = "\n"+" "*18
def _make_helical(state,mol) :
"""
Convert a residual state matrix to a
helical state matrix by looking at the residues
in the helices.
Parameters
----------
state : nxR Numpy array
the state matrix,
n is the number of snapshots
R is the number of residues
mol : string
the identifier of the molecule
Returns
-------
nxH Numpy array
the helical state matrix, H is the number of helices
"""
helices = gpcr_lib.load_template(mol).rhelices
hstate = np.zeros([state.shape[0],len(helices)],dtype=np.uint8)
for i,h in enumerate(helices) :
hstate[:,i] = np.any(state[:,h[0]-1:h[1]],axis=1)
return hstate
def _anal_lifetimes(files,labels,helical,mol,time) :
"""
Main work routine to analyse life times
Parameters
----------
files : list of string
the state files to analyse
labels : list of string
a label for each state file
helical : list of integers
indicator to do helical transformation for some files
mol : string
identifier of molecule
time : float
the total simulation time
Returns
-------
list of Numpy arrays
the results
"""
# Read in each state file or do a logical transformation
states = []
for filename,label in zip(files,labels) :
if os.path.isfile(filename) :
states.append(gpcr_lib.read_statefile(filename))
else :
states.append(gpcr_lib.logical_expr(filename.replace(" ",""),*states))
# Conversion factor from snapshot lifetime to ns lifetime
ns_per_snapshot = time / float(states[0].shape[0])
# Perform helical transformation
if helical is not None :
for h in helical :
states[h-1] = _make_helical(states[h-1],mol)
# Concatenate all state matrices and do lifetime analysis
all_states = np.concatenate(states,axis=1)
life_av,life_max = pycontacts.lifetime(all_states)
# Write out statistics
print "%15s\t%8s\t%8s"%("","Median","Max")
nused = 0
results = []
for i,(state,label) in enumerate(zip(states,labels),1) :
life_av_s = life_av[nused:nused+state.shape[1]]*ns_per_snapshot
life_max_s = life_max[nused:nused+state.shape[1]]*ns_per_snapshot
nused = nused+state.shape[1]
if helical is not None and i in helical :
print "%-18s%s"%(label,SEP.join("\t%8.3f\t%8.3f"%(a,m) for (a,m) in zip(life_av_s,life_max_s)))
results.append(np.concatenate([life_av_s,life_max_s]))
else :
print "%-18s\t%8.3f\t%8.3f"%(label,np.median(life_av_s),life_max_s.mean())
results.append(np.array([np.median(life_av_s),life_max_s.mean()]))
return results
if __name__ == '__main__' :
# Setup a parser of the command-line arguments
parser = argparse.ArgumentParser(description="Analysing lifetimes from state files")
parser.add_argument('-f','--files',nargs='+',help="a list of input files.",default=[])
parser.add_argument('-l','--labels',nargs='+',help="a label for each state.",default=[])
parser.add_argument('--helical',nargs='+',type=int,help="flag to perform helical transformation for a state file")
parser.add_argument('--mol',choices=["b2","a2a","b2_a","a2a_a"],help="the protein molecules, should be either 'b2' or 'a2a'",default="b2")
parser.add_argument('--time',type=float,help="total simulation time in ns",default=50000)
parser.add_argument('--repeats',nargs="+",help="replacement pattern for multiple repeats",default=["r1_","r2_","r3_","r4_","r5_"])
args = parser.parse_args()
res0 = _anal_lifetimes(args.files,args.labels,args.helical,args.mol,args.time)
if args.repeats is not None :
# Allocate arrays for total statistics
results = []
for r0 in res0 :
results.append(np.zeros([r0.shape[0],len(args.repeats)]))
results[-1][:,0] = r0
for ri,r in enumerate(args.repeats[1:],1) :
files2 = [f.replace(args.repeats[0],r) for f in args.files]
print ""
res = _anal_lifetimes(files2,args.labels,args.helical,args.mol,args.time)
# Accumulate to the total statistics
for i,r in enumerate(res) :
results[i][:,ri] = r
# Write out statistics over repeats
print "\n%15s\t%8s\t%8s"%("","Tot-Med.","Tot-Max")
for i,(label,r) in enumerate(zip(args.labels,results),1) :
rav = r.mean(axis=1)
rstd = r.std(axis=1)/np.sqrt(r.shape[1])
if args.helical is not None and i in args.helical :
l2 = rav.shape[0] / 2
print "%-18s%s"%(label,SEP.join("\t%8.3f\t%8.3f\t%8.3f\t%8.3f"%(a,au,m,mu) for (a,m,au,mu) in zip(rav[:l2],rav[l2:],rstd[:l2],rstd[l2:])))
else :
print "%-18s\t%8.3f\t%8.3f\t%8.3f\t%8.3f"%(label,rav[0],rstd[0],rav[1],rstd[1]) | mit | 6,737,768,644,979,249,000 | 34.72956 | 147 | 0.654401 | false |
xeBuz/Ordbogen | app/models/base.py | 1 | 1276 | from abc import abstractmethod
from app import db
class BaseModel(db.Model):
__abstract__ = True
def __init__(self, **kwargs):
"""
Initialize the BaseModel. This method will receive a unknown list with key=value elements
From that list, will use just the key matching with a column name in the table.
:param kwargs:
"""
columns = [m.key for m in self.__table__.columns]
for key in kwargs.keys():
if key in columns:
self.__setattr__(key, kwargs[key])
@abstractmethod
def __repr__(self):
return
@staticmethod
def required_fields():
"""
Required fields
:return:
"""
return None
@staticmethod
def model_fields():
"""
Usable field list from the model, because not all the fields should be modified
:return:
"""
return None
def save(self):
"""
Add the object to the transaction and commit the session
"""
db.session.add(self)
db.session.commit()
def delete(self):
"""
Remove the object from the transaction and commit the session
"""
db.session.delete(self)
db.session.commit()
| mpl-2.0 | -1,038,764,262,403,710,300 | 22.2 | 97 | 0.558777 | false |
phoebusliang/parallel-lettuce | tests/functional/test_runner.py | 1 | 74405 | # -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falcão <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import random
import lettuce
from mock import Mock, patch
from sure import expect
from StringIO import StringIO
from os.path import dirname, join, abspath
from nose.tools import assert_equals, with_setup, assert_raises
from lettuce.fs import FeatureLoader
from lettuce.core import Feature, fs, StepDefinition
from lettuce.terrain import world
from lettuce import Runner
from tests.asserts import assert_lines
from tests.asserts import prepare_stderr
from tests.asserts import prepare_stdout
from tests.asserts import assert_stderr_lines
from tests.asserts import assert_stdout_lines
from tests.asserts import assert_stderr_lines_with_traceback
from tests.asserts import assert_stdout_lines_with_traceback
current_dir = abspath(dirname(__file__))
lettuce_dir = abspath(dirname(lettuce.__file__))
ojoin = lambda *x: join(current_dir, 'output_features', *x)
sjoin = lambda *x: join(current_dir, 'syntax_features', *x)
tjoin = lambda *x: join(current_dir, 'tag_features', *x)
bjoin = lambda *x: join(current_dir, 'bg_features', *x)
lettuce_path = lambda *x: fs.relpath(join(lettuce_dir, *x))
call_line = StepDefinition.__call__.im_func.func_code.co_firstlineno + 5
def joiner(callback, name):
return callback(name, "%s.feature" % name)
feature_name = lambda name: joiner(ojoin, name)
syntax_feature_name = lambda name: joiner(sjoin, name)
tag_feature_name = lambda name: joiner(tjoin, name)
bg_feature_name = lambda name: joiner(bjoin, name)
@with_setup(prepare_stderr)
def test_try_to_import_terrain():
"Runner tries to import terrain, but has a nice output when it fail"
sandbox_path = ojoin('..', 'sandbox')
original_path = abspath(".")
os.chdir(sandbox_path)
try:
import lettuce
reload(lettuce)
raise AssertionError('The runner should raise ImportError !')
except SystemExit:
assert_stderr_lines_with_traceback(
'Lettuce has tried to load the conventional environment module '
'"terrain"\nbut it has errors, check its contents and '
'try to run lettuce again.\n\nOriginal traceback below:\n\n'
"Traceback (most recent call last):\n"
' File "%(lettuce_core_file)s", line 44, in <module>\n'
' terrain = fs.FileSystem._import("terrain")\n'
' File "%(lettuce_fs_file)s", line 63, in _import\n'
' module = imp.load_module(name, fp, pathname, description)\n'
' File "%(terrain_file)s", line 18\n'
' it is here just to cause a syntax error\n'
" ^\n"
'SyntaxError: invalid syntax\n' % {
'lettuce_core_file': abspath(join(lettuce_dir, '__init__.py')),
'lettuce_fs_file': abspath(join(lettuce_dir, 'fs.py')),
'terrain_file': abspath(lettuce_path('..', 'tests', 'functional', 'sandbox', 'terrain.py')),
}
)
finally:
os.chdir(original_path)
def test_feature_representation_without_colors():
"Feature represented without colors"
feature_file = ojoin('..', 'simple_features', '1st_feature_dir', 'some.feature')
feature = Feature.from_file(feature_file)
assert_lines(
feature.represented(),
"Feature: Addition # tests/functional/simple_features/1st_feature_dir/some.feature:5\n"
" In order to avoid silly mistakes # tests/functional/simple_features/1st_feature_dir/some.feature:6\n"
" As a math idiot # tests/functional/simple_features/1st_feature_dir/some.feature:7\n"
" I want to be told the sum of two numbers # tests/functional/simple_features/1st_feature_dir/some.feature:8\n"
)
def test_scenario_outline_representation_without_colors():
"Scenario Outline represented without colors"
feature_file = ojoin('..', 'simple_features', '1st_feature_dir', 'some.feature')
feature = Feature.from_file(feature_file)
assert_equals(
feature.scenarios[0].represented(),
" Scenario Outline: Add two numbers # tests/functional/simple_features/1st_feature_dir/some.feature:10\n"
)
def test_scenario_representation_without_colors():
"Scenario represented without colors"
feature_file = ojoin('runner_features', 'first.feature')
feature = Feature.from_file(feature_file)
assert_equals(
feature.scenarios[0].represented(),
" Scenario: Do nothing # tests/functional/output_features/runner_features/first.feature:6\n"
)
def test_undefined_step_represent_string():
"Undefined step represented without colors"
feature_file = ojoin('runner_features', 'first.feature')
feature = Feature.from_file(feature_file)
step = feature.scenarios[0].steps[0]
assert_equals(
step.represent_string(step.sentence),
" Given I do nothing # tests/functional/output_features/runner_features/first.feature:7\n"
)
assert_equals(
step.represent_string("foo bar"),
" foo bar # tests/functional/output_features/runner_features/first.feature:7\n"
)
def test_defined_step_represent_string():
"Defined step represented without colors"
feature_file = ojoin('runner_features', 'first.feature')
feature_dir = ojoin('runner_features')
loader = FeatureLoader(feature_dir)
world._output = StringIO()
world._is_colored = False
loader.find_and_load_step_definitions()
feature = Feature.from_file(feature_file)
step = feature.scenarios[0].steps[0]
step.run(True)
assert_equals(
step.represent_string(step.sentence),
" Given I do nothing # tests/functional/output_features/runner_features/dumb_steps.py:6\n"
)
@with_setup(prepare_stdout)
def test_output_with_success_colorless2():
"Testing the colorless output of a successful feature"
runner = Runner(join(abspath(dirname(__file__)), 'output_features', 'runner_features'), verbosity=3)
runner.run()
assert_stdout_lines(
"\n"
"Feature: Dumb feature # tests/functional/output_features/runner_features/first.feature:1\n"
" In order to test success # tests/functional/output_features/runner_features/first.feature:2\n"
" As a programmer # tests/functional/output_features/runner_features/first.feature:3\n"
" I want to see that the output is green # tests/functional/output_features/runner_features/first.feature:4\n"
"\n"
" Scenario: Do nothing # tests/functional/output_features/runner_features/first.feature:6\n"
" Given I do nothing # tests/functional/output_features/runner_features/dumb_steps.py:6\n"
"\n"
"1 feature (1 passed)\n"
"1 scenario (1 passed)\n"
"1 step (1 passed)\n"
)
@with_setup(prepare_stdout)
def test_output_with_success_colorless():
"A feature with two scenarios should separate the two scenarios with a new line (in colorless mode)."
runner = Runner(join(abspath(dirname(__file__)), 'output_features', 'many_successful_scenarios'), verbosity=3)
runner.run()
assert_stdout_lines(
"\n"
"Feature: Dumb feature # tests/functional/output_features/many_successful_scenarios/first.feature:1\n"
" In order to test success # tests/functional/output_features/many_successful_scenarios/first.feature:2\n"
" As a programmer # tests/functional/output_features/many_successful_scenarios/first.feature:3\n"
" I want to see that the output is green # tests/functional/output_features/many_successful_scenarios/first.feature:4\n"
"\n"
" Scenario: Do nothing # tests/functional/output_features/many_successful_scenarios/first.feature:6\n"
" Given I do nothing # tests/functional/output_features/many_successful_scenarios/dumb_steps.py:6\n"
"\n"
" Scenario: Do nothing (again) # tests/functional/output_features/many_successful_scenarios/first.feature:9\n"
" Given I do nothing (again) # tests/functional/output_features/many_successful_scenarios/dumb_steps.py:6\n"
"\n"
"1 feature (1 passed)\n"
"2 scenarios (2 passed)\n"
"2 steps (2 passed)\n"
)
@with_setup(prepare_stdout)
def test_output_with_success_colorful():
"Testing the output of a successful feature"
runner = Runner(join(abspath(dirname(__file__)), 'output_features', 'runner_features'), verbosity=4)
runner.run()
assert_stdout_lines(
"\n"
"\033[1;37mFeature: Dumb feature \033[1;30m# tests/functional/output_features/runner_features/first.feature:1\033[0m\n"
"\033[1;37m In order to test success \033[1;30m# tests/functional/output_features/runner_features/first.feature:2\033[0m\n"
"\033[1;37m As a programmer \033[1;30m# tests/functional/output_features/runner_features/first.feature:3\033[0m\n"
"\033[1;37m I want to see that the output is green \033[1;30m# tests/functional/output_features/runner_features/first.feature:4\033[0m\n"
"\n"
"\033[1;37m Scenario: Do nothing \033[1;30m# tests/functional/output_features/runner_features/first.feature:6\033[0m\n"
"\033[1;30m Given I do nothing \033[1;30m# tests/functional/output_features/runner_features/dumb_steps.py:6\033[0m\n"
"\033[A\033[1;32m Given I do nothing \033[1;30m# tests/functional/output_features/runner_features/dumb_steps.py:6\033[0m\n"
"\n"
"\033[1;37m1 feature (\033[1;32m1 passed\033[1;37m)\033[0m\n"
"\033[1;37m1 scenario (\033[1;32m1 passed\033[1;37m)\033[0m\n"
"\033[1;37m1 step (\033[1;32m1 passed\033[1;37m)\033[0m\n"
)
@with_setup(prepare_stdout)
def test_output_with_success_colorful_newline():
"A feature with two scenarios should separate the two scenarios with a new line (in color mode)."
runner = Runner(join(abspath(dirname(__file__)), 'output_features', 'many_successful_scenarios'), verbosity=4)
runner.run()
assert_stdout_lines(
"\n"
"\033[1;37mFeature: Dumb feature \033[1;30m# tests/functional/output_features/many_successful_scenarios/first.feature:1\033[0m\n"
"\033[1;37m In order to test success \033[1;30m# tests/functional/output_features/many_successful_scenarios/first.feature:2\033[0m\n"
"\033[1;37m As a programmer \033[1;30m# tests/functional/output_features/many_successful_scenarios/first.feature:3\033[0m\n"
"\033[1;37m I want to see that the output is green \033[1;30m# tests/functional/output_features/many_successful_scenarios/first.feature:4\033[0m\n"
"\n"
"\033[1;37m Scenario: Do nothing \033[1;30m# tests/functional/output_features/many_successful_scenarios/first.feature:6\033[0m\n"
"\033[1;30m Given I do nothing \033[1;30m# tests/functional/output_features/many_successful_scenarios/dumb_steps.py:6\033[0m\n"
"\033[A\033[1;32m Given I do nothing \033[1;30m# tests/functional/output_features/many_successful_scenarios/dumb_steps.py:6\033[0m\n"
"\n"
"\033[1;37m Scenario: Do nothing (again) \033[1;30m# tests/functional/output_features/many_successful_scenarios/first.feature:9\033[0m\n"
"\033[1;30m Given I do nothing (again) \033[1;30m# tests/functional/output_features/many_successful_scenarios/dumb_steps.py:6\033[0m\n"
"\033[A\033[1;32m Given I do nothing (again) \033[1;30m# tests/functional/output_features/many_successful_scenarios/dumb_steps.py:6\033[0m\n"
"\n"
"\033[1;37m1 feature (\033[1;32m1 passed\033[1;37m)\033[0m\n"
"\033[1;37m2 scenarios (\033[1;32m2 passed\033[1;37m)\033[0m\n"
"\033[1;37m2 steps (\033[1;32m2 passed\033[1;37m)\033[0m\n"
)
@with_setup(prepare_stdout)
def test_output_with_success_colorless_many_features():
"Testing the output of many successful features"
runner = Runner(join(abspath(dirname(__file__)), 'output_features', 'many_successful_features'), verbosity=3)
runner.run()
assert_stdout_lines(
"\n"
"Feature: First feature, of many # tests/functional/output_features/many_successful_features/one.feature:1\n"
" In order to make lettuce more robust # tests/functional/output_features/many_successful_features/one.feature:2\n"
" As a programmer # tests/functional/output_features/many_successful_features/one.feature:3\n"
" I want to test its output on many features # tests/functional/output_features/many_successful_features/one.feature:4\n"
"\n"
" Scenario: Do nothing # tests/functional/output_features/many_successful_features/one.feature:6\n"
" Given I do nothing # tests/functional/output_features/many_successful_features/dumb_steps.py:6\n"
" Then I see that the test passes # tests/functional/output_features/many_successful_features/dumb_steps.py:8\n"
"\n"
"Feature: Second feature, of many # tests/functional/output_features/many_successful_features/two.feature:1\n"
" I just want to see it green :) # tests/functional/output_features/many_successful_features/two.feature:2\n"
"\n"
" Scenario: Do nothing # tests/functional/output_features/many_successful_features/two.feature:4\n"
" Given I do nothing # tests/functional/output_features/many_successful_features/dumb_steps.py:6\n"
" Then I see that the test passes # tests/functional/output_features/many_successful_features/dumb_steps.py:8\n"
"\n"
"2 features (2 passed)\n"
"2 scenarios (2 passed)\n"
"4 steps (4 passed)\n"
)
@with_setup(prepare_stdout)
def test_output_with_success_colorful_many_features():
"Testing the colorful output of many successful features"
runner = Runner(join(abspath(dirname(__file__)), 'output_features', 'many_successful_features'), verbosity=4)
runner.run()
assert_stdout_lines(
"\n"
"\033[1;37mFeature: First feature, of many \033[1;30m# tests/functional/output_features/many_successful_features/one.feature:1\033[0m\n"
"\033[1;37m In order to make lettuce more robust \033[1;30m# tests/functional/output_features/many_successful_features/one.feature:2\033[0m\n"
"\033[1;37m As a programmer \033[1;30m# tests/functional/output_features/many_successful_features/one.feature:3\033[0m\n"
"\033[1;37m I want to test its output on many features \033[1;30m# tests/functional/output_features/many_successful_features/one.feature:4\033[0m\n"
"\n"
"\033[1;37m Scenario: Do nothing \033[1;30m# tests/functional/output_features/many_successful_features/one.feature:6\033[0m\n"
"\033[1;30m Given I do nothing \033[1;30m# tests/functional/output_features/many_successful_features/dumb_steps.py:6\033[0m\n"
"\033[A\033[1;32m Given I do nothing \033[1;30m# tests/functional/output_features/many_successful_features/dumb_steps.py:6\033[0m\n"
"\033[1;30m Then I see that the test passes \033[1;30m# tests/functional/output_features/many_successful_features/dumb_steps.py:8\033[0m\n"
"\033[A\033[1;32m Then I see that the test passes \033[1;30m# tests/functional/output_features/many_successful_features/dumb_steps.py:8\033[0m\n"
"\n"
"\033[1;37mFeature: Second feature, of many \033[1;30m# tests/functional/output_features/many_successful_features/two.feature:1\033[0m\n"
"\033[1;37m I just want to see it green :) \033[1;30m# tests/functional/output_features/many_successful_features/two.feature:2\033[0m\n"
"\n"
"\033[1;37m Scenario: Do nothing \033[1;30m# tests/functional/output_features/many_successful_features/two.feature:4\033[0m\n"
"\033[1;30m Given I do nothing \033[1;30m# tests/functional/output_features/many_successful_features/dumb_steps.py:6\033[0m\n"
"\033[A\033[1;32m Given I do nothing \033[1;30m# tests/functional/output_features/many_successful_features/dumb_steps.py:6\033[0m\n"
"\033[1;30m Then I see that the test passes \033[1;30m# tests/functional/output_features/many_successful_features/dumb_steps.py:8\033[0m\n"
"\033[A\033[1;32m Then I see that the test passes \033[1;30m# tests/functional/output_features/many_successful_features/dumb_steps.py:8\033[0m\n"
"\n"
"\033[1;37m2 features (\033[1;32m2 passed\033[1;37m)\033[0m\n"
"\033[1;37m2 scenarios (\033[1;32m2 passed\033[1;37m)\033[0m\n"
"\033[1;37m4 steps (\033[1;32m4 passed\033[1;37m)\033[0m\n"
)
@with_setup(prepare_stdout)
def test_output_when_could_not_find_features():
"Testing the colorful output of many successful features"
path = fs.relpath(join(abspath(dirname(__file__)), 'no_features', 'unexistent-folder'))
runner = Runner(path, verbosity=4)
runner.run()
assert_stdout_lines(
'\033[1;31mOops!\033[0m\n'
'\033[1;37mcould not find features at \033[1;33m./%s\033[0m\n' % path
)
@with_setup(prepare_stdout)
def test_output_when_could_not_find_features_colorless():
"Testing the colorful output of many successful features colorless"
path = fs.relpath(join(abspath(dirname(__file__)), 'no_features', 'unexistent-folder'))
runner = Runner(path, verbosity=3)
runner.run()
assert_stdout_lines(
'Oops!\n'
'could not find features at ./%s\n' % path
)
@with_setup(prepare_stdout)
def test_output_when_could_not_find_features_verbosity_level_2():
"Testing the colorful output of many successful features colorless"
path = fs.relpath(join(abspath(dirname(__file__)), 'no_features', 'unexistent-folder'))
runner = Runner(path, verbosity=2)
runner.run()
assert_stdout_lines(
'Oops!\n'
'could not find features at ./%s\n' % path
)
@with_setup(prepare_stdout)
def test_output_with_success_colorless_with_table():
"Testing the colorless output of success with table"
runner = Runner(feature_name('success_table'), verbosity=3)
runner.run()
assert_stdout_lines(
'\n'
'Feature: Table Success # tests/functional/output_features/success_table/success_table.feature:1\n'
'\n'
' Scenario: Add two numbers ♥ # tests/functional/output_features/success_table/success_table.feature:2\n'
' Given I have 0 bucks # tests/functional/output_features/success_table/success_table_steps.py:28\n'
' And that I have these items: # tests/functional/output_features/success_table/success_table_steps.py:32\n'
' | name | price |\n'
' | Porsche | 200000 |\n'
' | Ferrari | 400000 |\n'
' When I sell the "Ferrari" # tests/functional/output_features/success_table/success_table_steps.py:42\n'
' Then I have 400000 bucks # tests/functional/output_features/success_table/success_table_steps.py:28\n'
' And my garage contains: # tests/functional/output_features/success_table/success_table_steps.py:47\n'
' | name | price |\n'
' | Porsche | 200000 |\n'
'\n'
'1 feature (1 passed)\n'
'1 scenario (1 passed)\n'
'5 steps (5 passed)\n'
)
@with_setup(prepare_stdout)
def test_output_with_success_colorful_with_table():
"Testing the colorful output of success with table"
runner = Runner(feature_name('success_table'), verbosity=4)
runner.run()
assert_stdout_lines(
'\n'
'\033[1;37mFeature: Table Success \033[1;30m# tests/functional/output_features/success_table/success_table.feature:1\033[0m\n'
'\n'
'\033[1;37m Scenario: Add two numbers ♥ \033[1;30m# tests/functional/output_features/success_table/success_table.feature:2\033[0m\n'
'\033[1;30m Given I have 0 bucks \033[1;30m# tests/functional/output_features/success_table/success_table_steps.py:28\033[0m\n'
'\033[A\033[1;32m Given I have 0 bucks \033[1;30m# tests/functional/output_features/success_table/success_table_steps.py:28\033[0m\n'
'\033[1;30m And that I have these items: \033[1;30m# tests/functional/output_features/success_table/success_table_steps.py:32\033[0m\n'
'\033[1;30m \033[1;37m |\033[1;30m name \033[1;37m |\033[1;30m price \033[1;37m |\033[1;30m\033[0m\n'
'\033[1;30m \033[1;37m |\033[1;30m Porsche\033[1;37m |\033[1;30m 200000\033[1;37m |\033[1;30m\033[0m\n'
'\033[1;30m \033[1;37m |\033[1;30m Ferrari\033[1;37m |\033[1;30m 400000\033[1;37m |\033[1;30m\033[0m\n'
'\033[A\033[A\033[A\033[A\033[1;32m And that I have these items: \033[1;30m# tests/functional/output_features/success_table/success_table_steps.py:32\033[0m\n'
'\033[1;32m \033[1;37m |\033[1;32m name \033[1;37m |\033[1;32m price \033[1;37m |\033[1;32m\033[0m\n'
'\033[1;32m \033[1;37m |\033[1;32m Porsche\033[1;37m |\033[1;32m 200000\033[1;37m |\033[1;32m\033[0m\n'
'\033[1;32m \033[1;37m |\033[1;32m Ferrari\033[1;37m |\033[1;32m 400000\033[1;37m |\033[1;32m\033[0m\n'
'\033[1;30m When I sell the "Ferrari" \033[1;30m# tests/functional/output_features/success_table/success_table_steps.py:42\033[0m\n'
'\033[A\033[1;32m When I sell the "Ferrari" \033[1;30m# tests/functional/output_features/success_table/success_table_steps.py:42\033[0m\n'
'\033[1;30m Then I have 400000 bucks \033[1;30m# tests/functional/output_features/success_table/success_table_steps.py:28\033[0m\n'
'\033[A\033[1;32m Then I have 400000 bucks \033[1;30m# tests/functional/output_features/success_table/success_table_steps.py:28\033[0m\n'
'\033[1;30m And my garage contains: \033[1;30m# tests/functional/output_features/success_table/success_table_steps.py:47\033[0m\n'
'\033[1;30m \033[1;37m |\033[1;30m name \033[1;37m |\033[1;30m price \033[1;37m |\033[1;30m\033[0m\n'
'\033[1;30m \033[1;37m |\033[1;30m Porsche\033[1;37m |\033[1;30m 200000\033[1;37m |\033[1;30m\033[0m\n'
'\033[A\033[A\033[A\033[1;32m And my garage contains: \033[1;30m# tests/functional/output_features/success_table/success_table_steps.py:47\033[0m\n'
'\033[1;32m \033[1;37m |\033[1;32m name \033[1;37m |\033[1;32m price \033[1;37m |\033[1;32m\033[0m\n'
'\033[1;32m \033[1;37m |\033[1;32m Porsche\033[1;37m |\033[1;32m 200000\033[1;37m |\033[1;32m\033[0m\n'
'\n'
"\033[1;37m1 feature (\033[1;32m1 passed\033[1;37m)\033[0m\n"
"\033[1;37m1 scenario (\033[1;32m1 passed\033[1;37m)\033[0m\n"
"\033[1;37m5 steps (\033[1;32m5 passed\033[1;37m)\033[0m\n"
)
@with_setup(prepare_stdout)
def test_output_with_failed_colorless_with_table():
"Testing the colorless output of failed with table"
runner = Runner(feature_name('failed_table'), verbosity=3)
runner.run()
assert_stdout_lines_with_traceback(
("\n"
"Feature: Table Fail # tests/functional/output_features/failed_table/failed_table.feature:1\n"
"\n"
" Scenario: See it fail # tests/functional/output_features/failed_table/failed_table.feature:2\n"
u" Given I have a dumb step that passes ♥ # tests/functional/output_features/failed_table/failed_table_steps.py:20\n"
" And this one fails # tests/functional/output_features/failed_table/failed_table_steps.py:24\n"
" Traceback (most recent call last):\n"
' File "%(lettuce_core_file)s", line %(call_line)d, in __call__\n'
" ret = self.function(self.step, *args, **kw)\n"
' File "%(step_file)s", line 25, in tof\n'
" assert False\n"
" AssertionError\n"
" Then this one will be skipped # tests/functional/output_features/failed_table/failed_table_steps.py:28\n"
" And this one will be skipped # tests/functional/output_features/failed_table/failed_table_steps.py:28\n"
" And this one does not even has definition # tests/functional/output_features/failed_table/failed_table.feature:12 (undefined)\n"
"\n"
"1 feature (0 passed)\n"
"1 scenario (0 passed)\n"
"5 steps (1 failed, 2 skipped, 1 undefined, 1 passed)\n"
"\n"
"You can implement step definitions for undefined steps with these snippets:\n"
"\n"
"# -*- coding: utf-8 -*-\n"
"from lettuce import step\n"
"\n"
"@step(u'And this one does not even has definition')\n"
"def and_this_one_does_not_even_has_definition(step):\n"
" assert False, 'This step must be implemented'\n"
"\n"
"List of failed scenarios:\n"
" Scenario: See it fail # tests/functional/output_features/failed_table/failed_table.feature:2\n"
"\n") % {
'lettuce_core_file': lettuce_path('core.py'),
'step_file': abspath(lettuce_path('..', 'tests', 'functional', 'output_features', 'failed_table', 'failed_table_steps.py')),
'call_line': call_line,
}
)
@with_setup(prepare_stdout)
def test_output_with_failed_colorful_with_table():
"Testing the colorful output of failed with table"
runner = Runner(feature_name('failed_table'), verbosity=4)
runner.run()
assert_stdout_lines_with_traceback(
"\n"
"\033[1;37mFeature: Table Fail \033[1;30m# tests/functional/output_features/failed_table/failed_table.feature:1\033[0m\n"
"\n"
"\033[1;37m Scenario: See it fail \033[1;30m# tests/functional/output_features/failed_table/failed_table.feature:2\033[0m\n"
u"\033[1;30m Given I have a dumb step that passes ♥ \033[1;30m# tests/functional/output_features/failed_table/failed_table_steps.py:20\033[0m\n"
u"\033[A\033[1;32m Given I have a dumb step that passes ♥ \033[1;30m# tests/functional/output_features/failed_table/failed_table_steps.py:20\033[0m\n"
"\033[1;30m And this one fails \033[1;30m# tests/functional/output_features/failed_table/failed_table_steps.py:24\033[0m\n"
"\033[A\033[0;31m And this one fails \033[1;41;33m# tests/functional/output_features/failed_table/failed_table_steps.py:24\033[0m\n"
"\033[1;31m Traceback (most recent call last):\n"
' File "%(lettuce_core_file)s", line %(call_line)d, in __call__\n'
" ret = self.function(self.step, *args, **kw)\n"
' File "%(step_file)s", line 25, in tof\n'
" assert False\n"
" AssertionError\033[0m\n"
"\033[1;30m Then this one will be skipped \033[1;30m# tests/functional/output_features/failed_table/failed_table_steps.py:28\033[0m\n"
"\033[A\033[0;36m Then this one will be skipped \033[1;30m# tests/functional/output_features/failed_table/failed_table_steps.py:28\033[0m\n"
"\033[1;30m And this one will be skipped \033[1;30m# tests/functional/output_features/failed_table/failed_table_steps.py:28\033[0m\n"
"\033[A\033[0;36m And this one will be skipped \033[1;30m# tests/functional/output_features/failed_table/failed_table_steps.py:28\033[0m\n"
"\033[0;33m And this one does not even has definition \033[1;30m# tests/functional/output_features/failed_table/failed_table.feature:12\033[0m\n"
"\n"
"\033[1;37m1 feature (\033[0;31m0 passed\033[1;37m)\033[0m\n"
"\033[1;37m1 scenario (\033[0;31m0 passed\033[1;37m)\033[0m\n"
"\033[1;37m5 steps (\033[0;31m1 failed\033[1;37m, \033[0;36m2 skipped\033[1;37m, \033[0;33m1 undefined\033[1;37m, \033[1;32m1 passed\033[1;37m)\033[0m\n"
"\n"
"\033[0;33mYou can implement step definitions for undefined steps with these snippets:\n"
"\n"
"# -*- coding: utf-8 -*-\n"
"from lettuce import step\n"
"\n"
"@step(u'And this one does not even has definition')\n"
"def and_this_one_does_not_even_has_definition(step):\n"
" assert False, 'This step must be implemented'\033[0m"
"\n"
"\n"
"\033[1;31mList of failed scenarios:\n"
"\033[0;31m Scenario: See it fail # tests/functional/output_features/failed_table/failed_table.feature:2\n"
"\033[0m\n" % {
'lettuce_core_file': lettuce_path('core.py'),
'step_file': abspath(lettuce_path('..', 'tests', 'functional', 'output_features', 'failed_table', 'failed_table_steps.py')),
'call_line': call_line,
}
)
@with_setup(prepare_stdout)
def test_output_with_successful_outline_colorless():
"With colorless output, a successful outline scenario should print beautifully."
runner = Runner(feature_name('success_outline'), verbosity=3)
runner.run()
assert_stdout_lines(
'\n'
'Feature: Successful Scenario Outline # tests/functional/output_features/success_outline/success_outline.feature:1\n'
' As lettuce author # tests/functional/output_features/success_outline/success_outline.feature:2\n'
' In order to finish the first release # tests/functional/output_features/success_outline/success_outline.feature:3\n'
u' I want to make scenario outlines work ♥ # tests/functional/output_features/success_outline/success_outline.feature:4\n'
'\n'
' Scenario Outline: fill a web form # tests/functional/output_features/success_outline/success_outline.feature:6\n'
' Given I open browser at "http://www.my-website.com/" # tests/functional/output_features/success_outline/success_outline_steps.py:21\n'
' And click on "sign-up" # tests/functional/output_features/success_outline/success_outline_steps.py:25\n'
' When I fill the field "username" with "<username>" # tests/functional/output_features/success_outline/success_outline_steps.py:29\n'
' And I fill the field "password" with "<password>" # tests/functional/output_features/success_outline/success_outline_steps.py:29\n'
' And I fill the field "password-confirm" with "<password>" # tests/functional/output_features/success_outline/success_outline_steps.py:29\n'
' And I fill the field "email" with "<email>" # tests/functional/output_features/success_outline/success_outline_steps.py:29\n'
' And I click "done" # tests/functional/output_features/success_outline/success_outline_steps.py:33\n'
' Then I see the title of the page is "<title>" # tests/functional/output_features/success_outline/success_outline_steps.py:37\n'
'\n'
' Examples:\n'
' | username | password | email | title |\n'
' | john | doe-1234 | [email protected] | John \| My Website |\n'
' | mary | wee-9876 | [email protected] | Mary \| My Website |\n'
' | foo | foo-bar | [email protected] | Foo \| My Website |\n'
'\n'
'1 feature (1 passed)\n'
'3 scenarios (3 passed)\n'
'24 steps (24 passed)\n'
)
@with_setup(prepare_stdout)
def test_output_with_successful_outline_colorful():
"With colored output, a successful outline scenario should print beautifully."
runner = Runner(feature_name('success_outline'), verbosity=4)
runner.run()
assert_stdout_lines_with_traceback(
'\n'
'\033[1;37mFeature: Successful Scenario Outline \033[1;30m# tests/functional/output_features/success_outline/success_outline.feature:1\033[0m\n'
'\033[1;37m As lettuce author \033[1;30m# tests/functional/output_features/success_outline/success_outline.feature:2\033[0m\n'
'\033[1;37m In order to finish the first release \033[1;30m# tests/functional/output_features/success_outline/success_outline.feature:3\033[0m\n'
u'\033[1;37m I want to make scenario outlines work ♥ \033[1;30m# tests/functional/output_features/success_outline/success_outline.feature:4\033[0m\n'
'\n'
'\033[1;37m Scenario Outline: fill a web form \033[1;30m# tests/functional/output_features/success_outline/success_outline.feature:6\033[0m\n'
'\033[0;36m Given I open browser at "http://www.my-website.com/" \033[1;30m# tests/functional/output_features/success_outline/success_outline_steps.py:21\033[0m\n'
'\033[0;36m And click on "sign-up" \033[1;30m# tests/functional/output_features/success_outline/success_outline_steps.py:25\033[0m\n'
'\033[0;36m When I fill the field "username" with "<username>" \033[1;30m# tests/functional/output_features/success_outline/success_outline_steps.py:29\033[0m\n'
'\033[0;36m And I fill the field "password" with "<password>" \033[1;30m# tests/functional/output_features/success_outline/success_outline_steps.py:29\033[0m\n'
'\033[0;36m And I fill the field "password-confirm" with "<password>" \033[1;30m# tests/functional/output_features/success_outline/success_outline_steps.py:29\033[0m\n'
'\033[0;36m And I fill the field "email" with "<email>" \033[1;30m# tests/functional/output_features/success_outline/success_outline_steps.py:29\033[0m\n'
'\033[0;36m And I click "done" \033[1;30m# tests/functional/output_features/success_outline/success_outline_steps.py:33\033[0m\n'
'\033[0;36m Then I see the title of the page is "<title>" \033[1;30m# tests/functional/output_features/success_outline/success_outline_steps.py:37\033[0m\n'
'\n'
'\033[1;37m Examples:\033[0m\n'
'\033[0;36m \033[1;37m |\033[0;36m username\033[1;37m |\033[0;36m password\033[1;37m |\033[0;36m email \033[1;37m |\033[0;36m title \033[1;37m |\033[0;36m\033[0m\n'
'\033[1;32m \033[1;37m |\033[1;32m john \033[1;37m |\033[1;32m doe-1234\033[1;37m |\033[1;32m [email protected]\033[1;37m |\033[1;32m John \| My Website\033[1;37m |\033[1;32m\033[0m\n'
'\033[1;32m \033[1;37m |\033[1;32m mary \033[1;37m |\033[1;32m wee-9876\033[1;37m |\033[1;32m [email protected]\033[1;37m |\033[1;32m Mary \| My Website\033[1;37m |\033[1;32m\033[0m\n'
'\033[1;32m \033[1;37m |\033[1;32m foo \033[1;37m |\033[1;32m foo-bar \033[1;37m |\033[1;32m [email protected] \033[1;37m |\033[1;32m Foo \| My Website \033[1;37m |\033[1;32m\033[0m\n'
'\n'
"\033[1;37m1 feature (\033[1;32m1 passed\033[1;37m)\033[0m\n"
"\033[1;37m3 scenarios (\033[1;32m3 passed\033[1;37m)\033[0m\n"
"\033[1;37m24 steps (\033[1;32m24 passed\033[1;37m)\033[0m\n"
)
@with_setup(prepare_stdout)
def test_output_with_failful_outline_colorless():
"With colorless output, an unsuccessful outline scenario should print beautifully."
runner = Runner(feature_name('fail_outline'), verbosity=3)
runner.run()
assert_stdout_lines_with_traceback(
'\n'
'Feature: Failful Scenario Outline # tests/functional/output_features/fail_outline/fail_outline.feature:1\n'
' As lettuce author # tests/functional/output_features/fail_outline/fail_outline.feature:2\n'
' In order to finish the first release # tests/functional/output_features/fail_outline/fail_outline.feature:3\n'
u' I want to make scenario outlines work ♥ # tests/functional/output_features/fail_outline/fail_outline.feature:4\n'
'\n'
' Scenario Outline: fill a web form # tests/functional/output_features/fail_outline/fail_outline.feature:6\n'
' Given I open browser at "http://www.my-website.com/" # tests/functional/output_features/fail_outline/fail_outline_steps.py:21\n'
' And click on "sign-up" # tests/functional/output_features/fail_outline/fail_outline_steps.py:25\n'
' When I fill the field "username" with "<username>" # tests/functional/output_features/fail_outline/fail_outline_steps.py:29\n'
' And I fill the field "password" with "<password>" # tests/functional/output_features/fail_outline/fail_outline_steps.py:29\n'
' And I fill the field "password-confirm" with "<password>" # tests/functional/output_features/fail_outline/fail_outline_steps.py:29\n'
' And I fill the field "email" with "<email>" # tests/functional/output_features/fail_outline/fail_outline_steps.py:29\n'
' And I click "done" # tests/functional/output_features/fail_outline/fail_outline_steps.py:33\n'
' Then I see the message "<message>" # tests/functional/output_features/fail_outline/fail_outline_steps.py:37\n'
'\n'
' Examples:\n'
' | username | password | email | message |\n'
' | john | doe-1234 | [email protected] | Welcome, John |\n'
' | mary | wee-9876 | [email protected] | Welcome, Mary |\n'
" Traceback (most recent call last):\n"
' File "%(lettuce_core_file)s", line %(call_line)d, in __call__\n'
" ret = self.function(self.step, *args, **kw)\n"
' File "%(step_file)s", line 30, in when_i_fill_the_field_x_with_y\n'
" if field == 'password' and value == 'wee-9876': assert False\n"
" AssertionError\n"
' | foo | foo-bar | [email protected] | Welcome, Foo |\n'
'\n'
'1 feature (0 passed)\n'
'3 scenarios (2 passed)\n'
'24 steps (1 failed, 4 skipped, 19 passed)\n'
'\n'
'List of failed scenarios:\n'
' Scenario Outline: fill a web form # tests/functional/output_features/fail_outline/fail_outline.feature:6\n'
'\n' % {
'lettuce_core_file': lettuce_path('core.py'),
'step_file': abspath(lettuce_path('..', 'tests', 'functional', 'output_features', 'fail_outline', 'fail_outline_steps.py')),
'call_line': call_line,
}
)
@with_setup(prepare_stdout)
def test_output_with_failful_outline_colorful():
"With colored output, an unsuccessful outline scenario should print beautifully."
runner = Runner(feature_name('fail_outline'), verbosity=4)
runner.run()
assert_stdout_lines_with_traceback(
'\n'
'\033[1;37mFeature: Failful Scenario Outline \033[1;30m# tests/functional/output_features/fail_outline/fail_outline.feature:1\033[0m\n'
'\033[1;37m As lettuce author \033[1;30m# tests/functional/output_features/fail_outline/fail_outline.feature:2\033[0m\n'
'\033[1;37m In order to finish the first release \033[1;30m# tests/functional/output_features/fail_outline/fail_outline.feature:3\033[0m\n'
u'\033[1;37m I want to make scenario outlines work ♥ \033[1;30m# tests/functional/output_features/fail_outline/fail_outline.feature:4\033[0m\n'
'\n'
'\033[1;37m Scenario Outline: fill a web form \033[1;30m# tests/functional/output_features/fail_outline/fail_outline.feature:6\033[0m\n'
'\033[0;36m Given I open browser at "http://www.my-website.com/" \033[1;30m# tests/functional/output_features/fail_outline/fail_outline_steps.py:21\033[0m\n'
'\033[0;36m And click on "sign-up" \033[1;30m# tests/functional/output_features/fail_outline/fail_outline_steps.py:25\033[0m\n'
'\033[0;36m When I fill the field "username" with "<username>" \033[1;30m# tests/functional/output_features/fail_outline/fail_outline_steps.py:29\033[0m\n'
'\033[0;36m And I fill the field "password" with "<password>" \033[1;30m# tests/functional/output_features/fail_outline/fail_outline_steps.py:29\033[0m\n'
'\033[0;36m And I fill the field "password-confirm" with "<password>" \033[1;30m# tests/functional/output_features/fail_outline/fail_outline_steps.py:29\033[0m\n'
'\033[0;36m And I fill the field "email" with "<email>" \033[1;30m# tests/functional/output_features/fail_outline/fail_outline_steps.py:29\033[0m\n'
'\033[0;36m And I click "done" \033[1;30m# tests/functional/output_features/fail_outline/fail_outline_steps.py:33\033[0m\n'
'\033[0;36m Then I see the message "<message>" \033[1;30m# tests/functional/output_features/fail_outline/fail_outline_steps.py:37\033[0m\n'
'\n'
'\033[1;37m Examples:\033[0m\n'
'\033[0;36m \033[1;37m |\033[0;36m username\033[1;37m |\033[0;36m password\033[1;37m |\033[0;36m email \033[1;37m |\033[0;36m message \033[1;37m |\033[0;36m\033[0m\n'
'\033[1;32m \033[1;37m |\033[1;32m john \033[1;37m |\033[1;32m doe-1234\033[1;37m |\033[1;32m [email protected]\033[1;37m |\033[1;32m Welcome, John\033[1;37m |\033[1;32m\033[0m\n'
'\033[1;32m \033[1;37m |\033[1;32m mary \033[1;37m |\033[1;32m wee-9876\033[1;37m |\033[1;32m [email protected]\033[1;37m |\033[1;32m Welcome, Mary\033[1;37m |\033[1;32m\033[0m\n'
"\033[1;31m Traceback (most recent call last):\n"
' File "%(lettuce_core_file)s", line %(call_line)d, in __call__\n'
" ret = self.function(self.step, *args, **kw)\n"
' File "%(step_file)s", line 30, in when_i_fill_the_field_x_with_y\n'
" if field == 'password' and value == 'wee-9876': assert False\n"
" AssertionError\033[0m\n"
'\033[1;32m \033[1;37m |\033[1;32m foo \033[1;37m |\033[1;32m foo-bar \033[1;37m |\033[1;32m [email protected] \033[1;37m |\033[1;32m Welcome, Foo \033[1;37m |\033[1;32m\033[0m\n'
'\n'
"\033[1;37m1 feature (\033[0;31m0 passed\033[1;37m)\033[0m\n"
"\033[1;37m3 scenarios (\033[1;32m2 passed\033[1;37m)\033[0m\n"
"\033[1;37m24 steps (\033[0;31m1 failed\033[1;37m, \033[0;36m4 skipped\033[1;37m, \033[1;32m19 passed\033[1;37m)\033[0m\n"
"\n"
"\033[1;31mList of failed scenarios:\n"
"\033[0;31m Scenario Outline: fill a web form # tests/functional/output_features/fail_outline/fail_outline.feature:6\n"
"\033[0m\n" % {
'lettuce_core_file': lettuce_path('core.py'),
'step_file': abspath(lettuce_path('..', 'tests', 'functional', 'output_features', 'fail_outline', 'fail_outline_steps.py')),
'call_line': call_line,
}
)
@with_setup(prepare_stdout)
def test_output_snippets_with_groups_within_double_quotes_colorless():
"Testing that the proposed snippet is clever enough to identify groups within double quotes. colorless"
runner = Runner(feature_name('double-quoted-snippet'), verbosity=3)
runner.run()
assert_stdout_lines(
u'\n'
u'Feature: double-quoted snippet proposal # tests/functional/output_features/double-quoted-snippet/double-quoted-snippet.feature:1\n'
u'\n'
u' Scenario: Propose matched groups # tests/functional/output_features/double-quoted-snippet/double-quoted-snippet.feature:2\n'
u' Given I have "stuff here" and "more @#$%ˆ& bizar sutff h3r3" # tests/functional/output_features/double-quoted-snippet/double-quoted-snippet.feature:3 (undefined)\n'
u'\n'
u'1 feature (0 passed)\n'
u'1 scenario (0 passed)\n'
u'1 step (1 undefined, 0 passed)\n'
u'\n'
u'You can implement step definitions for undefined steps with these snippets:\n'
u'\n'
u"# -*- coding: utf-8 -*-\n"
u'from lettuce import step\n'
u'\n'
u'@step(u\'Given I have "([^\"]*)" and "([^\"]*)"\')\n'
u'def given_i_have_group1_and_group2(step, group1, group2):\n'
u' assert False, \'This step must be implemented\'\n'
)
@with_setup(prepare_stdout)
def test_output_snippets_with_groups_within_double_quotes_colorful():
"Testing that the proposed snippet is clever enough to identify groups within double quotes. colorful"
runner = Runner(feature_name('double-quoted-snippet'), verbosity=4)
runner.run()
assert_stdout_lines(
u'\n'
u'\033[1;37mFeature: double-quoted snippet proposal \033[1;30m# tests/functional/output_features/double-quoted-snippet/double-quoted-snippet.feature:1\033[0m\n'
u'\n'
u'\033[1;37m Scenario: Propose matched groups \033[1;30m# tests/functional/output_features/double-quoted-snippet/double-quoted-snippet.feature:2\033[0m\n'
u'\033[0;33m Given I have "stuff here" and "more @#$%ˆ& bizar sutff h3r3" \033[1;30m# tests/functional/output_features/double-quoted-snippet/double-quoted-snippet.feature:3\033[0m\n'
u'\n'
"\033[1;37m1 feature (\033[0;31m0 passed\033[1;37m)\033[0m\n"
"\033[1;37m1 scenario (\033[0;31m0 passed\033[1;37m)\033[0m\n"
"\033[1;37m1 step (\033[0;33m1 undefined\033[1;37m, \033[1;32m0 passed\033[1;37m)\033[0m\n"
u'\n'
u'\033[0;33mYou can implement step definitions for undefined steps with these snippets:\n'
u'\n'
u"# -*- coding: utf-8 -*-\n"
u'from lettuce import step\n'
u'\n'
u'@step(u\'Given I have "([^"]*)" and "([^"]*)"\')\n'
u'def given_i_have_group1_and_group2(step, group1, group2):\n'
u' assert False, \'This step must be implemented\'\033[0m\n'
)
@with_setup(prepare_stdout)
def test_output_snippets_with_groups_within_single_quotes_colorless():
"Testing that the proposed snippet is clever enough to identify groups within single quotes. colorless"
runner = Runner(feature_name('single-quoted-snippet'), verbosity=3)
runner.run()
assert_stdout_lines(
u'\n'
u'Feature: single-quoted snippet proposal # tests/functional/output_features/single-quoted-snippet/single-quoted-snippet.feature:1\n'
u'\n'
u' Scenario: Propose matched groups # tests/functional/output_features/single-quoted-snippet/single-quoted-snippet.feature:2\n'
u' Given I have \'stuff here\' and \'more @#$%ˆ& bizar sutff h3r3\' # tests/functional/output_features/single-quoted-snippet/single-quoted-snippet.feature:3 (undefined)\n'
u'\n'
u'1 feature (0 passed)\n'
u'1 scenario (0 passed)\n'
u'1 step (1 undefined, 0 passed)\n'
u'\n'
u'You can implement step definitions for undefined steps with these snippets:\n'
u'\n'
u"# -*- coding: utf-8 -*-\n"
u'from lettuce import step\n'
u'\n'
u'@step(u\'Given I have \\\'([^\\\']*)\\\' and \\\'([^\\\']*)\\\'\')\n'
u'def given_i_have_group1_and_group2(step, group1, group2):\n'
u' assert False, \'This step must be implemented\'\n'
)
@with_setup(prepare_stdout)
def test_output_snippets_with_groups_within_single_quotes_colorful():
"Testing that the proposed snippet is clever enough to identify groups within single quotes. colorful"
runner = Runner(feature_name('single-quoted-snippet'), verbosity=4)
runner.run()
assert_stdout_lines(
u'\n'
u'\033[1;37mFeature: single-quoted snippet proposal \033[1;30m# tests/functional/output_features/single-quoted-snippet/single-quoted-snippet.feature:1\033[0m\n'
u'\n'
u'\033[1;37m Scenario: Propose matched groups \033[1;30m# tests/functional/output_features/single-quoted-snippet/single-quoted-snippet.feature:2\033[0m\n'
u'\033[0;33m Given I have \'stuff here\' and \'more @#$%ˆ& bizar sutff h3r3\' \033[1;30m# tests/functional/output_features/single-quoted-snippet/single-quoted-snippet.feature:3\033[0m\n'
u'\n'
"\033[1;37m1 feature (\033[0;31m0 passed\033[1;37m)\033[0m\n"
"\033[1;37m1 scenario (\033[0;31m0 passed\033[1;37m)\033[0m\n"
"\033[1;37m1 step (\033[0;33m1 undefined\033[1;37m, \033[1;32m0 passed\033[1;37m)\033[0m\n"
u'\n'
u'\033[0;33mYou can implement step definitions for undefined steps with these snippets:\n'
u'\n'
u"# -*- coding: utf-8 -*-\n"
u'from lettuce import step\n'
u'\n'
u'@step(u\'Given I have \\\'([^\\\']*)\\\' and \\\'([^\\\']*)\\\'\')\n'
u'def given_i_have_group1_and_group2(step, group1, group2):\n'
u' assert False, \'This step must be implemented\'\033[0m\n'
)
@with_setup(prepare_stdout)
def test_output_snippets_with_groups_within_redundant_quotes():
"Testing that the proposed snippet is clever enough to avoid duplicating the same snippet"
runner = Runner(feature_name('redundant-steps-quotes'), verbosity=3)
runner.run()
assert_stdout_lines(
u'\n'
u'Feature: avoid duplicating same snippet # tests/functional/output_features/redundant-steps-quotes/redundant-steps-quotes.feature:1\n'
u'\n'
u' Scenario: Propose matched groups # tests/functional/output_features/redundant-steps-quotes/redundant-steps-quotes.feature:2\n'
u' Given I have "stuff here" and "more @#$%ˆ& bizar sutff h3r3" # tests/functional/output_features/redundant-steps-quotes/redundant-steps-quotes.feature:3 (undefined)\n'
u' Given I have "blablabla" and "12345" # tests/functional/output_features/redundant-steps-quotes/redundant-steps-quotes.feature:4 (undefined)\n'
u'\n'
u'1 feature (0 passed)\n'
u'1 scenario (0 passed)\n'
u'2 steps (2 undefined, 0 passed)\n'
u'\n'
u'You can implement step definitions for undefined steps with these snippets:\n'
u'\n'
u"# -*- coding: utf-8 -*-\n"
u'from lettuce import step\n'
u'\n'
u'@step(u\'Given I have "([^"]*)" and "([^"]*)"\')\n'
u'def given_i_have_group1_and_group2(step, group1, group2):\n'
u' assert False, \'This step must be implemented\'\n'
)
@with_setup(prepare_stdout)
def test_output_snippets_with_normalized_unicode_names():
"Testing that the proposed snippet is clever enough normalize method names even with latin accents"
runner = Runner(feature_name('latin-accents'), verbosity=3)
runner.run()
assert_stdout_lines(
u"\n"
u"Funcionalidade: melhorar o output de snippets do lettuce # tests/functional/output_features/latin-accents/latin-accents.feature:2\n"
u" Como autor do lettuce # tests/functional/output_features/latin-accents/latin-accents.feature:3\n"
u" Eu quero ter um output refinado de snippets # tests/functional/output_features/latin-accents/latin-accents.feature:4\n"
u" Para melhorar, de uma forma geral, a vida do programador # tests/functional/output_features/latin-accents/latin-accents.feature:5\n"
u"\n"
u" Cenário: normalizar snippets com unicode # tests/functional/output_features/latin-accents/latin-accents.feature:7\n"
u" Dado que eu tenho palavrões e outras situações # tests/functional/output_features/latin-accents/latin-accents.feature:8 (undefined)\n"
u" E várias palavras acentuadas são úteis, tais como: \"(é,não,léo,chororó,chácara,epígrafo)\" # tests/functional/output_features/latin-accents/latin-accents.feature:9 (undefined)\n"
u" Então eu fico felizão # tests/functional/output_features/latin-accents/latin-accents.feature:10 (undefined)\n"
u"\n"
u"1 feature (0 passed)\n"
u"1 scenario (0 passed)\n"
u"3 steps (3 undefined, 0 passed)\n"
u"\n"
u"You can implement step definitions for undefined steps with these snippets:\n"
u"\n"
u"# -*- coding: utf-8 -*-\n"
u"from lettuce import step\n"
u"\n"
u"@step(u'Dado que eu tenho palavrões e outras situações')\n"
u"def dado_que_eu_tenho_palavroes_e_outras_situacoes(step):\n"
u" assert False, 'This step must be implemented'\n"
u"@step(u'E várias palavras acentuadas são úteis, tais como: \"([^\"]*)\"')\n"
u"def e_varias_palavras_acentuadas_sao_uteis_tais_como_group1(step, group1):\n"
u" assert False, 'This step must be implemented'\n"
u"@step(u'Então eu fico felizão')\n"
u"def entao_eu_fico_felizao(step):\n"
u" assert False, 'This step must be implemented'\n"
)
@with_setup(prepare_stdout)
def test_output_level_2_success():
'Output with verbosity 2 must show only the scenario names, followed by "... OK" in case of success'
runner = Runner(join(abspath(dirname(__file__)), 'output_features', 'many_successful_scenarios'), verbosity=2)
runner.run()
assert_stdout_lines(
"Do nothing ... OK\n"
"Do nothing (again) ... OK\n"
"\n"
"1 feature (1 passed)\n"
"2 scenarios (2 passed)\n"
"2 steps (2 passed)\n"
)
@with_setup(prepare_stdout)
def test_output_level_2_fail():
'Output with verbosity 2 must show only the scenario names, followed by "... FAILED" in case of fail'
runner = Runner(feature_name('failed_table'), verbosity=2)
runner.run()
assert_stdout_lines_with_traceback(
"See it fail ... FAILED\n"
"\n"
"\n"
"<Step: \"And this one fails\">\n"
"Traceback (most recent call last):\n"
' File "%(lettuce_core_file)s", line %(call_line)d, in __call__\n'
" ret = self.function(self.step, *args, **kw)\n"
' File "%(step_file)s", line 25, in tof\n'
" assert False\n"
"AssertionError\n"
"\n"
"1 feature (0 passed)\n"
"1 scenario (0 passed)\n"
"5 steps (1 failed, 2 skipped, 1 undefined, 1 passed)\n" % {
'lettuce_core_file': lettuce_path('core.py'),
'step_file': abspath(lettuce_path('..', 'tests', 'functional', 'output_features', 'failed_table', 'failed_table_steps.py')),
'call_line': call_line,
}
)
@with_setup(prepare_stdout)
def test_output_level_2_error():
'Output with verbosity 2 must show only the scenario names, followed by "... ERROR" in case of fail'
runner = Runner(feature_name('error_traceback'), verbosity=2)
runner.run()
assert_stdout_lines_with_traceback(
"It should pass ... OK\n"
"It should raise an exception different of AssertionError ... ERROR\n"
"\n"
"\n"
"<Step: \"Given my step that blows a exception\">\n"
"Traceback (most recent call last):\n"
' File "%(lettuce_core_file)s", line %(call_line)d, in __call__\n'
" ret = self.function(self.step, *args, **kw)\n"
' File "%(step_file)s", line 10, in given_my_step_that_blows_a_exception\n'
" raise RuntimeError\n"
"RuntimeError\n"
"\n"
"1 feature (0 passed)\n"
"2 scenarios (1 passed)\n"
"2 steps (1 failed, 1 passed)\n" % {
'lettuce_core_file': lettuce_path('core.py'),
'step_file': abspath(lettuce_path('..', 'tests', 'functional', 'output_features', 'error_traceback', 'error_traceback_steps.py')),
'call_line': call_line,
}
)
@with_setup(prepare_stdout)
def test_output_level_1_success():
'Output with verbosity 2 must show only the scenario names, followed by "... OK" in case of success'
runner = Runner(join(abspath(dirname(__file__)), 'output_features', 'many_successful_scenarios'), verbosity=1)
runner.run()
assert_stdout_lines(
".."
"\n"
"1 feature (1 passed)\n"
"2 scenarios (2 passed)\n"
"2 steps (2 passed)\n"
)
@with_setup(prepare_stdout)
def test_output_level_1_fail():
'Output with verbosity 2 must show only the scenario names, followed by "... FAILED" in case of fail'
runner = Runner(feature_name('failed_table'), verbosity=1)
runner.run()
assert_stdout_lines_with_traceback(
"F\n"
"\n"
"<Step: \"And this one fails\">\n"
"Traceback (most recent call last):\n"
' File "%(lettuce_core_file)s", line %(call_line)d, in __call__\n'
" ret = self.function(self.step, *args, **kw)\n"
' File "%(step_file)s", line 25, in tof\n'
" assert False\n"
"AssertionError\n"
"\n"
"1 feature (0 passed)\n"
"1 scenario (0 passed)\n"
"5 steps (1 failed, 2 skipped, 1 undefined, 1 passed)\n" % {
'lettuce_core_file': lettuce_path('core.py'),
'step_file': abspath(lettuce_path('..', 'tests', 'functional', 'output_features', 'failed_table', 'failed_table_steps.py')),
'call_line': call_line,
}
)
@with_setup(prepare_stdout)
def test_output_level_1_error():
'Output with verbosity 2 must show only the scenario names, followed by "... ERROR" in case of fail'
runner = Runner(feature_name('error_traceback'), verbosity=1)
runner.run()
assert_stdout_lines_with_traceback(
".E\n"
"\n"
"<Step: \"Given my step that blows a exception\">\n"
"Traceback (most recent call last):\n"
' File "%(lettuce_core_file)s", line %(call_line)d, in __call__\n'
" ret = self.function(self.step, *args, **kw)\n"
' File "%(step_file)s", line 10, in given_my_step_that_blows_a_exception\n'
" raise RuntimeError\n"
"RuntimeError\n"
"\n"
"1 feature (0 passed)\n"
"2 scenarios (1 passed)\n"
"2 steps (1 failed, 1 passed)\n" % {
'lettuce_core_file': lettuce_path('core.py'),
'step_file': abspath(lettuce_path('..', 'tests', 'functional', 'output_features', 'error_traceback', 'error_traceback_steps.py')),
'call_line': call_line,
}
)
@with_setup(prepare_stdout)
def test_commented_scenario():
'Test one commented scenario'
runner = Runner(feature_name('commented_feature'), verbosity=1)
runner.run()
assert_stdout_lines(
"."
"\n"
"1 feature (1 passed)\n"
"1 scenario (1 passed)\n"
"1 step (1 passed)\n"
)
@with_setup(prepare_stdout)
def test_blank_step_hash_value():
"syntax checking: Blank in step hash column = empty string"
from lettuce import step
@step('ignore step')
def ignore_step(step):
pass
@step('string length calc')
def string_lenth_calc(step):
for hash in step.hashes:
if len(hash["string"]) + len(hash["string2"]) != int(hash["length"]):
raise AssertionError("fail")
filename = syntax_feature_name('blank_values_in_hash')
runner = Runner(filename, verbosity=1)
runner.run()
assert_stdout_lines(
"."
"\n"
"1 feature (1 passed)\n"
"1 scenario (1 passed)\n"
"4 steps (4 passed)\n"
)
@with_setup(prepare_stdout)
def test_run_only_fast_tests():
"Runner can filter by tags"
from lettuce import step
good_one = Mock()
bad_one = Mock()
@step('I wait for 0 seconds')
def wait_for_0_seconds(step):
good_one(step.sentence)
@step('the time passed is 0 seconds')
def time_passed_0_sec(step):
good_one(step.sentence)
@step('I wait for 60 seconds')
def wait_for_60_seconds(step):
bad_one(step.sentence)
@step('the time passed is 1 minute')
def time_passed_1_min(step):
bad_one(step.sentence)
filename = tag_feature_name('timebound')
runner = Runner(filename, verbosity=1, tags=['fast-ish'])
runner.run()
assert_stdout_lines(
"."
"\n"
"1 feature (1 passed)\n"
"1 scenario (1 passed)\n"
"2 steps (2 passed)\n"
)
def test_run_random():
"Randomise the feature order"
path = fs.relpath(join(abspath(dirname(__file__)), 'no_features', 'unexistent-folder'))
runner = Runner(path, random=True)
assert_equals(True, runner.random)
with patch.object(random, 'shuffle') as pshuffle:
runner.run()
pshuffle.assert_called_once_with([])
@with_setup(prepare_stdout)
def test_background_with_header():
"Running background with header"
from lettuce import step, world
@step(ur'the variable "(\w+)" holds (\d+)')
def set_variable(step, name, value):
setattr(world, name, int(value))
@step(ur'the variable "(\w+)" is equal to (\d+)')
def check_variable(step, name, expected):
expected = int(expected)
expect(world).to.have.property(name).being.equal(expected)
@step(ur'the variable "(\w+)" times (\d+) is equal to (\d+)')
def multiply_and_verify(step, name, times, expected):
times = int(times)
expected = int(expected)
(getattr(world, name) * times).should.equal(expected)
filename = bg_feature_name('header')
runner = Runner(filename, verbosity=1)
runner.run()
assert_stdout_lines(
".."
"\n"
"1 feature (1 passed)\n"
"2 scenarios (2 passed)\n"
"7 steps (7 passed)\n"
)
@with_setup(prepare_stdout)
def test_background_without_header():
"Running background without header"
from lettuce import step, world, before, after
actions = {}
@before.each_background
def register_background_before(background):
actions['before'] = unicode(background)
@after.each_background
def register_background_after(background, results):
actions['after'] = {
'background': unicode(background),
'results': results,
}
@step(ur'the variable "(\w+)" holds (\d+)')
def set_variable(step, name, value):
setattr(world, name, int(value))
@step(ur'the variable "(\w+)" is equal to (\d+)')
def check_variable(step, name, expected):
expected = int(expected)
expect(world).to.have.property(name).being.equal(expected)
@step(ur'the variable "(\w+)" times (\d+) is equal to (\d+)')
def multiply_and_verify(step, name, times, expected):
times = int(times)
expected = int(expected)
(getattr(world, name) * times).should.equal(expected)
filename = bg_feature_name('naked')
runner = Runner(filename, verbosity=1)
runner.run()
assert_stdout_lines(
".."
"\n"
"1 feature (1 passed)\n"
"2 scenarios (2 passed)\n"
"7 steps (7 passed)\n"
)
expect(actions).to.equal({
'after': {
'results': [True],
'background': u'<Background for feature: Without Header>'
},
'before': u'<Background for feature: Without Header>'
})
@with_setup(prepare_stdout)
def test_output_background_with_success_colorless():
"A feature with background should print it accordingly under verbosity 3"
from lettuce import step
@step(ur'the variable "(\w+)" holds (\d+)')
@step(ur'the variable "(\w+)" is equal to (\d+)')
def just_pass(step, *args):
pass
filename = bg_feature_name('simple')
runner = Runner(filename, verbosity=3)
runner.run()
assert_stdout_lines(
'\n'
'Feature: Simple and successful # tests/functional/bg_features/simple/simple.feature:1\n'
' As the Lettuce maintainer # tests/functional/bg_features/simple/simple.feature:2\n'
' In order to make sure the output is pretty # tests/functional/bg_features/simple/simple.feature:3\n'
' I want to automate its test # tests/functional/bg_features/simple/simple.feature:4\n'
'\n'
' Background:\n'
' Given the variable "X" holds 2 # tests/functional/test_runner.py:1239\n'
'\n'
' Scenario: multiplication changing the value # tests/functional/bg_features/simple/simple.feature:9\n'
' Given the variable "X" is equal to 2 # tests/functional/test_runner.py:1239\n'
'\n'
'1 feature (1 passed)\n'
'1 scenario (1 passed)\n'
'1 step (1 passed)\n'
)
@with_setup(prepare_stdout)
def test_output_background_with_success_colorful():
"A feature with background should print it accordingly under verbosity 4"
from lettuce import step
@step(ur'the variable "(\w+)" holds (\d+)')
@step(ur'the variable "(\w+)" is equal to (\d+)')
def just_pass(step, *args):
pass
filename = bg_feature_name('simple')
runner = Runner(filename, verbosity=4)
runner.run()
assert_stdout_lines(
'\n'
'\033[1;37mFeature: Simple and successful \033[1;30m# tests/functional/bg_features/simple/simple.feature:1\033[0m\n'
'\033[1;37m As the Lettuce maintainer \033[1;30m# tests/functional/bg_features/simple/simple.feature:2\033[0m\n'
'\033[1;37m In order to make sure the output is pretty \033[1;30m# tests/functional/bg_features/simple/simple.feature:3\033[0m\n'
'\033[1;37m I want to automate its test \033[1;30m# tests/functional/bg_features/simple/simple.feature:4\033[0m\n'
'\n'
'\033[1;37m Background:\033[0m\n'
'\033[1;30m Given the variable "X" holds 2 \033[1;30m# tests/functional/test_runner.py:1274\033[0m\n'
'\033[A\033[1;32m Given the variable "X" holds 2 \033[1;30m# tests/functional/test_runner.py:1274\033[0m\n'
'\n'
'\033[1;37m Scenario: multiplication changing the value \033[1;30m# tests/functional/bg_features/simple/simple.feature:9\033[0m\n'
'\033[1;30m Given the variable "X" is equal to 2 \033[1;30m# tests/functional/test_runner.py:1274\033[0m\n'
'\033[A\033[1;32m Given the variable "X" is equal to 2 \033[1;30m# tests/functional/test_runner.py:1274\033[0m\n'
'\n'
'\033[1;37m1 feature (\033[1;32m1 passed\033[1;37m)\033[0m\n'
'\033[1;37m1 scenario (\033[1;32m1 passed\033[1;37m)\033[0m\n'
'\033[1;37m1 step (\033[1;32m1 passed\033[1;37m)\033[0m\n'
)
@with_setup(prepare_stdout)
def test_background_with_scenario_before_hook():
"Running background with before_scenario hook"
from lettuce import step, world, before
@before.each_scenario
def reset_variable(scenario):
world.X = None
@step(ur'the variable "(\w+)" holds (\d+)')
def set_variable(step, name, value):
setattr(world, name, int(value))
@step(ur'the variable "(\w+)" is equal to (\d+)')
def check_variable(step, name, expected):
expected = int(expected)
expect(world).to.have.property(name).being.equal(expected)
@step(ur'the variable "(\w+)" times (\d+) is equal to (\d+)')
def multiply_and_verify(step, name, times, expected):
times = int(times)
expected = int(expected)
(getattr(world, name) * times).should.equal(expected)
filename = bg_feature_name('header')
runner = Runner(filename, verbosity=1)
runner.run()
assert_stdout_lines(
".."
"\n"
"1 feature (1 passed)\n"
"2 scenarios (2 passed)\n"
"7 steps (7 passed)\n"
)
@with_setup(prepare_stderr)
def test_many_features_a_file():
"syntax checking: Fail if a file has more than one feature"
filename = syntax_feature_name('many_features_a_file')
runner = Runner(filename)
assert_raises(SystemExit, runner.run)
assert_stderr_lines(
'Syntax error at: %s\n'
'A feature file must contain ONLY ONE feature!\n' % filename
)
@with_setup(prepare_stderr)
def test_feature_without_name():
"syntax checking: Fail on features without name"
filename = syntax_feature_name('feature_without_name')
runner = Runner(filename)
assert_raises(SystemExit, runner.run)
assert_stderr_lines(
'Syntax error at: %s\n'
'Features must have a name. e.g: "Feature: This is my name"\n'
% filename
)
@with_setup(prepare_stderr)
def test_feature_missing_scenarios():
"syntax checking: Fail on features missing scenarios"
filename = syntax_feature_name("feature_missing_scenarios")
runner = Runner(filename)
assert_raises(SystemExit, runner.run)
assert_stderr_lines(
u"Syntax error at: %s\n"
"Features must have scenarios.\nPlease refer to the documentation "
"available at http://lettuce.it for more information.\n" % filename
)
@with_setup(prepare_stdout)
def test_output_with_undefined_steps_colorful():
"With colored output, an undefined step should be printed in sequence."
runner = Runner(feature_name('undefined_steps'), verbosity=4)
runner.run()
assert_stdout_lines_with_traceback(
'\n'
'\x1b[1;37mFeature: Test undefined steps are displayed on console \x1b[1;30m# tests/functional/output_features/undefined_steps/undefined_steps.feature:1\x1b[0m\n'
'\n'
'\x1b[1;37m Scenario: Scenario with undefined step \x1b[1;30m# tests/functional/output_features/undefined_steps/undefined_steps.feature:3\x1b[0m\n'
'\x1b[1;30m Given this test step passes \x1b[1;30m# tests/functional/output_features/undefined_steps/undefined_steps.py:4\x1b[0m\n'
'\x1b[A\x1b[1;32m Given this test step passes \x1b[1;30m# tests/functional/output_features/undefined_steps/undefined_steps.py:4\x1b[0m\n'
'\x1b[0;33m When this test step is undefined \x1b[1;30m# tests/functional/output_features/undefined_steps/undefined_steps.feature:5\x1b[0m\n'
'\n'
'\x1b[1;37m Scenario Outline: Outline scenario with general undefined step \x1b[1;30m# tests/functional/output_features/undefined_steps/undefined_steps.feature:7\x1b[0m\n'
'\x1b[0;36m Given this test step passes \x1b[1;30m# tests/functional/output_features/undefined_steps/undefined_steps.py:4\x1b[0m\n'
'\x1b[0;33m When this test step is undefined \x1b[1;30m# tests/functional/output_features/undefined_steps/undefined_steps.feature:5\x1b[0m\n'
'\x1b[0;36m Then <in> squared is <out> \x1b[1;30m# tests/functional/output_features/undefined_steps/undefined_steps.py:8\x1b[0m\n'
'\n'
'\x1b[1;37m Examples:\x1b[0m\n'
'\x1b[0;36m \x1b[1;37m |\x1b[0;36m in\x1b[1;37m |\x1b[0;36m out\x1b[1;37m |\x1b[0;36m\x1b[0m\n'
'\x1b[1;32m \x1b[1;37m |\x1b[1;32m 1 \x1b[1;37m |\x1b[1;32m 1 \x1b[1;37m |\x1b[1;32m\x1b[0m\n'
'\x1b[1;32m \x1b[1;37m |\x1b[1;32m 2 \x1b[1;37m |\x1b[1;32m 4 \x1b[1;37m |\x1b[1;32m\x1b[0m\n'
'\n'
'\x1b[1;37m1 feature (\x1b[0;31m0 passed\x1b[1;37m)\x1b[0m\n'
'\x1b[1;37m3 scenarios (\x1b[0;31m0 passed\x1b[1;37m)\x1b[0m\n'
'\x1b[1;37m8 steps (\x1b[0;36m2 skipped\x1b[1;37m, \x1b[0;33m3 undefined\x1b[1;37m, \x1b[1;32m3 passed\x1b[1;37m)\x1b[0m\n'
'\n'
'\x1b[0;33mYou can implement step definitions for undefined steps with these snippets:\n'
'\n'
'# -*- coding: utf-8 -*-\n'
'from lettuce import step\n'
'\n'
"@step(u'When this test step is undefined')\n"
'def when_this_test_step_is_undefined(step):\n'
" assert False, 'This step must be implemented'\x1b[0m\n"
)
| gpl-3.0 | -5,035,576,027,792,273,000 | 51.14446 | 197 | 0.615818 | false |
Vignesh2208/Awlsim | awlsim/core/systemblocks/systemblocks.py | 1 | 3090 | # -*- coding: utf-8 -*-
#
# AWL simulator - System-blocks
#
# Copyright 2012-2015 Michael Buesch <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from __future__ import division, absolute_import, print_function, unicode_literals
from awlsim.common.compat import *
#from awlsim.core.instructions.insn_generic_call cimport * #@cy
from awlsim.core.instructions.insn_generic_call import * #@nocy
from awlsim.core.blocks import *
from awlsim.core.translator import *
from awlsim.core.identifier import *
class SystemBlock(StaticCodeBlock):
# The block identification. To be overridden by the subclass.
# The tuple is: (number, name, short_description)
name = (-1, "<unknown>", None)
isSystemBlock = True
def __init__(self, cpu, interface):
insns = [
AwlInsn_GENERIC_CALL(cpu, self.run),
]
StaticCodeBlock.__init__(self, insns, self.name[0], interface)
self.cpu = cpu
def run(self):
# Reimplement this method
raise NotImplementedError
# Fetch the value of a block-interface field.
def fetchInterfaceFieldByName(self, name):
return self.cpu.fetch(self.__interfaceOpers[name])
# Store a value to a block-interface field.
def storeInterfaceFieldByName(self, name, value):
return self.cpu.store(self.__interfaceOpers[name], value)
# Resolve hard wired symbolic accesses
# (i.e. accesses not done in AWL instructions)
def resolveSymbols(self):
super(SystemBlock, self).resolveSymbols()
resolver = AwlSymResolver(self.cpu)
self.__interfaceOpers = {}
for field in self.interface.fields_IN_OUT_INOUT_STAT:
# Create a scratch-operator for the access.
offset = AwlOffset(None, None)
offset.identChain = AwlDataIdentChain.parseString(field.name)
oper = AwlOperator(AwlOperator.NAMED_LOCAL, 0,
offset)
# Resolve the scratch-operator.
oper = resolver.resolveNamedLocal(block=self, insn=None,
oper=oper, pointer=False,allowWholeArrayAccess=True)
# Store the scratch operator for later use.
self.__interfaceOpers[field.name] = oper
class SFBInterface(FBInterface):
pass
class SFB(SystemBlock):
isFB = True
def __init__(self, cpu):
SystemBlock.__init__(self, cpu, SFBInterface())
def __repr__(self):
return "SFB %d" % self.index
class SFCInterface(FCInterface):
pass
class SFC(SystemBlock):
isFC = True
def __init__(self, cpu):
SystemBlock.__init__(self, cpu, SFCInterface())
def __repr__(self):
return "SFC %d" % self.index
| gpl-2.0 | -159,100,157,252,867,070 | 30.212121 | 82 | 0.731392 | false |
dscho/hg | i18n/check-translation.py | 1 | 7912 | #!/usr/bin/env python
#
# check-translation.py - check Mercurial specific translation problems
import polib
import re
scanners = []
checkers = []
def scanner():
def decorator(func):
scanners.append(func)
return func
return decorator
def levelchecker(level, msgidpat):
def decorator(func):
if msgidpat:
match = re.compile(msgidpat).search
else:
match = lambda msgid: True
checkers.append((func, level))
func.match = match
return func
return decorator
def match(checker, pe):
"""Examine whether POEntry "pe" is target of specified checker or not
"""
if not checker.match(pe.msgid):
return
# examine suppression by translator comment
nochecker = 'no-%s-check' % checker.__name__
for tc in pe.tcomment.split():
if nochecker == tc:
return
return True
####################
def fatalchecker(msgidpat=None):
return levelchecker('fatal', msgidpat)
@fatalchecker(r'\$\$')
def promptchoice(pe):
"""Check translation of the string given to "ui.promptchoice()"
>>> pe = polib.POEntry(
... msgid ='prompt$$missing &sep$$missing &$$followed by &none',
... msgstr='prompt missing &sep$$missing amp$$followed by none&')
>>> match(promptchoice, pe)
True
>>> for e in promptchoice(pe): print e
number of choices differs between msgid and msgstr
msgstr has invalid choice missing '&'
msgstr has invalid '&' followed by none
"""
idchoices = [c.rstrip(' ') for c in pe.msgid.split('$$')[1:]]
strchoices = [c.rstrip(' ') for c in pe.msgstr.split('$$')[1:]]
if len(idchoices) != len(strchoices):
yield "number of choices differs between msgid and msgstr"
indices = [(c, c.find('&')) for c in strchoices]
if [c for c, i in indices if i == -1]:
yield "msgstr has invalid choice missing '&'"
if [c for c, i in indices if len(c) == i + 1]:
yield "msgstr has invalid '&' followed by none"
deprecatedpe = None
@scanner()
def deprecatedsetup(pofile):
pes = [p for p in pofile if p.msgid == '(DEPRECATED)' and p.msgstr]
if len(pes):
global deprecatedpe
deprecatedpe = pes[0]
@fatalchecker(r'\(DEPRECATED\)')
def deprecated(pe):
"""Check for DEPRECATED
>>> ped = polib.POEntry(
... msgid = '(DEPRECATED)',
... msgstr= '(DETACERPED)')
>>> deprecatedsetup([ped])
>>> pe = polib.POEntry(
... msgid = 'Something (DEPRECATED)',
... msgstr= 'something (DEPRECATED)')
>>> match(deprecated, pe)
True
>>> for e in deprecated(pe): print e
>>> pe = polib.POEntry(
... msgid = 'Something (DEPRECATED)',
... msgstr= 'something (DETACERPED)')
>>> match(deprecated, pe)
True
>>> for e in deprecated(pe): print e
>>> pe = polib.POEntry(
... msgid = 'Something (DEPRECATED)',
... msgstr= 'something')
>>> match(deprecated, pe)
True
>>> for e in deprecated(pe): print e
msgstr inconsistently translated (DEPRECATED)
>>> pe = polib.POEntry(
... msgid = 'Something (DEPRECATED, foo bar)',
... msgstr= 'something (DETACERPED, foo bar)')
>>> match(deprecated, pe)
"""
if not ('(DEPRECATED)' in pe.msgstr or
(deprecatedpe and
deprecatedpe.msgstr in pe.msgstr)):
yield "msgstr inconsistently translated (DEPRECATED)"
####################
def warningchecker(msgidpat=None):
return levelchecker('warning', msgidpat)
@warningchecker()
def taildoublecolons(pe):
"""Check equality of tail '::'-ness between msgid and msgstr
>>> pe = polib.POEntry(
... msgid ='ends with ::',
... msgstr='ends with ::')
>>> for e in taildoublecolons(pe): print e
>>> pe = polib.POEntry(
... msgid ='ends with ::',
... msgstr='ends without double-colons')
>>> for e in taildoublecolons(pe): print e
tail '::'-ness differs between msgid and msgstr
>>> pe = polib.POEntry(
... msgid ='ends without double-colons',
... msgstr='ends with ::')
>>> for e in taildoublecolons(pe): print e
tail '::'-ness differs between msgid and msgstr
"""
if pe.msgid.endswith('::') != pe.msgstr.endswith('::'):
yield "tail '::'-ness differs between msgid and msgstr"
@warningchecker()
def indentation(pe):
"""Check equality of initial indentation between msgid and msgstr
This may report unexpected warning, because this doesn't aware
the syntax of rst document and the context of msgstr.
>>> pe = polib.POEntry(
... msgid =' indented text',
... msgstr=' narrowed indentation')
>>> for e in indentation(pe): print e
initial indentation width differs betweeen msgid and msgstr
"""
idindent = len(pe.msgid) - len(pe.msgid.lstrip())
strindent = len(pe.msgstr) - len(pe.msgstr.lstrip())
if idindent != strindent:
yield "initial indentation width differs betweeen msgid and msgstr"
####################
def check(pofile, fatal=True, warning=False):
targetlevel = { 'fatal': fatal, 'warning': warning }
targetcheckers = [(checker, level)
for checker, level in checkers
if targetlevel[level]]
if not targetcheckers:
return []
detected = []
for checker in scanners:
checker(pofile)
for pe in pofile.translated_entries():
errors = []
for checker, level in targetcheckers:
if match(checker, pe):
errors.extend((level, checker.__name__, error)
for error in checker(pe))
if errors:
detected.append((pe, errors))
return detected
########################################
if __name__ == "__main__":
import sys
import optparse
optparser = optparse.OptionParser("""%prog [options] pofile ...
This checks Mercurial specific translation problems in specified
'*.po' files.
Each detected problems are shown in the format below::
filename:linenum:type(checker): problem detail .....
"type" is "fatal" or "warning". "checker" is the name of the function
detecting corresponded error.
Checking by checker "foo" on the specific msgstr can be suppressed by
the "translator comment" like below. Multiple "no-xxxx-check" should
be separated by whitespaces::
# no-foo-check
msgid = "....."
msgstr = "....."
""")
optparser.add_option("", "--warning",
help="show also warning level problems",
action="store_true")
optparser.add_option("", "--doctest",
help="run doctest of this tool, instead of check",
action="store_true")
(options, args) = optparser.parse_args()
if options.doctest:
import os
if 'TERM' in os.environ:
del os.environ['TERM']
import doctest
failures, tests = doctest.testmod()
sys.exit(failures and 1 or 0)
# replace polib._POFileParser to show linenum of problematic msgstr
class ExtPOFileParser(polib._POFileParser):
def process(self, symbol, linenum):
super(ExtPOFileParser, self).process(symbol, linenum)
if symbol == 'MS': # msgstr
self.current_entry.linenum = linenum
polib._POFileParser = ExtPOFileParser
detected = []
warning = options.warning
for f in args:
detected.extend((f, pe, errors)
for pe, errors in check(polib.pofile(f),
warning=warning))
if detected:
for f, pe, errors in detected:
for level, checker, error in errors:
sys.stderr.write('%s:%d:%s(%s): %s\n'
% (f, pe.linenum, level, checker, error))
sys.exit(1)
| gpl-2.0 | 8,894,772,293,302,084,000 | 31.42623 | 75 | 0.587715 | false |
moverlan/LOTlib | LOTlib/Hypotheses/RecursiveLOTHypothesis.py | 1 | 2953 |
from LOTHypothesis import LOTHypothesis, raise_exception, evaluate_expression
from LOTlib.Evaluation.EvaluationException import RecursionDepthException, TooBigException, EvaluationException
class RecursiveLOTHypothesis(LOTHypothesis):
"""
A LOTHypothesis that permits recursive calls to itself via the primitive "recurse" (previously, L).
Here, RecursiveLOTHypothesis.__call__ does essentially the same thing as LOTHypothesis.__call__, but it binds
the symbol "recurse" to RecursiveLOTHypothesis.recursive_call so that recursion is processed internally.
For a Demo, see LOTlib.Examples.Number
NOTE: Pre Nov2014, this was computed with some fanciness in evaluate_expression that automatically appended the Y combinator.
This change was made to simplify and speed things up.
"""
def __init__(self, grammar, recurse='recurse_', recurse_bound=25, args=['x'], **kwargs):
"""
Initializer. recurse gives the name for the recursion operation internally.
"""
# save recurse symbol
self.recurse = recurse
self.recursive_depth_bound = recurse_bound # how deep can we recurse?
self.recursive_call_depth = 0 # how far down have we recursed?
# automatically put 'recurse' onto kwargs['args']
assert recurse not in args # not already specified
args = [recurse] + args
LOTHypothesis.__init__(self, grammar, args=args, **kwargs)
def recursive_call(self, *args):
"""
This gets called internally on recursive calls. It keeps track of the depth to allow us to escape
"""
self.recursive_call_depth += 1
if self.recursive_call_depth > self.recursive_depth_bound:
raise RecursionDepthException
return LOTHypothesis.__call__(self, *args)
def __call__(self, *args):
"""
The main calling function. Resets recursive_call_depth and then calls
"""
self.recursive_call_depth = 0
return LOTHypothesis.__call__(self, *args)
def compile_function(self):
"""
Called in set_value to make a function. Here, we defaultly wrap in recursive_call as our argument to "recurse"
so that recursive_call gets called by the symbol self.recurse
"""
"""Called in set_value to compile into a function."""
if self.value.count_nodes() > self.maxnodes:
return (lambda *args: raise_exception(TooBigException))
else:
try:
# Here, we evaluate it, and then defaultly pass recursive_call as the first "recurse"
f = evaluate_expression(str(self))
return lambda *args: f(self.recursive_call, *args)
except Exception as e:
print "# Warning: failed to execute evaluate_expression on " + str(self)
print "# ", e
return (lambda *args: raise_exception(EvaluationException) ) | gpl-3.0 | -8,645,650,876,244,384,000 | 42.441176 | 129 | 0.655266 | false |
Azure/azure-sdk-for-python | sdk/synapse/azure-synapse-accesscontrol/azure/synapse/accesscontrol/models/__init__.py | 1 | 2298 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
try:
from ._models_py3 import CheckAccessDecision
from ._models_py3 import CheckPrincipalAccessRequest
from ._models_py3 import CheckPrincipalAccessResponse
from ._models_py3 import ErrorContract
from ._models_py3 import ErrorDetail
from ._models_py3 import ErrorResponse
from ._models_py3 import RequiredAction
from ._models_py3 import RoleAssignmentDetails
from ._models_py3 import RoleAssignmentDetailsList
from ._models_py3 import RoleAssignmentRequest
from ._models_py3 import SubjectInfo
from ._models_py3 import SynapseRbacPermission
from ._models_py3 import SynapseRoleDefinition
except (SyntaxError, ImportError):
from ._models import CheckAccessDecision # type: ignore
from ._models import CheckPrincipalAccessRequest # type: ignore
from ._models import CheckPrincipalAccessResponse # type: ignore
from ._models import ErrorContract # type: ignore
from ._models import ErrorDetail # type: ignore
from ._models import ErrorResponse # type: ignore
from ._models import RequiredAction # type: ignore
from ._models import RoleAssignmentDetails # type: ignore
from ._models import RoleAssignmentDetailsList # type: ignore
from ._models import RoleAssignmentRequest # type: ignore
from ._models import SubjectInfo # type: ignore
from ._models import SynapseRbacPermission # type: ignore
from ._models import SynapseRoleDefinition # type: ignore
__all__ = [
'CheckAccessDecision',
'CheckPrincipalAccessRequest',
'CheckPrincipalAccessResponse',
'ErrorContract',
'ErrorDetail',
'ErrorResponse',
'RequiredAction',
'RoleAssignmentDetails',
'RoleAssignmentDetailsList',
'RoleAssignmentRequest',
'SubjectInfo',
'SynapseRbacPermission',
'SynapseRoleDefinition',
]
| mit | 1,541,626,679,146,592,500 | 43.192308 | 94 | 0.696258 | false |
ArcherSys/ArcherSys | Lib/multiprocessing/connection.py | 1 | 91847 | <<<<<<< HEAD
<<<<<<< HEAD
#
# A higher level module for using sockets (or Windows named pipes)
#
# multiprocessing/connection.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
__all__ = [ 'Client', 'Listener', 'Pipe', 'wait' ]
import io
import os
import sys
import socket
import struct
import time
import tempfile
import itertools
import _multiprocessing
from . import reduction
from . import util
from . import AuthenticationError, BufferTooShort
from .reduction import ForkingPickler
try:
import _winapi
from _winapi import WAIT_OBJECT_0, WAIT_ABANDONED_0, WAIT_TIMEOUT, INFINITE
except ImportError:
if sys.platform == 'win32':
raise
_winapi = None
#
#
#
BUFSIZE = 8192
# A very generous timeout when it comes to local connections...
CONNECTION_TIMEOUT = 20.
_mmap_counter = itertools.count()
default_family = 'AF_INET'
families = ['AF_INET']
if hasattr(socket, 'AF_UNIX'):
default_family = 'AF_UNIX'
families += ['AF_UNIX']
if sys.platform == 'win32':
default_family = 'AF_PIPE'
families += ['AF_PIPE']
def _init_timeout(timeout=CONNECTION_TIMEOUT):
return time.time() + timeout
def _check_timeout(t):
return time.time() > t
#
#
#
def arbitrary_address(family):
'''
Return an arbitrary free address for the given family
'''
if family == 'AF_INET':
return ('localhost', 0)
elif family == 'AF_UNIX':
return tempfile.mktemp(prefix='listener-', dir=util.get_temp_dir())
elif family == 'AF_PIPE':
return tempfile.mktemp(prefix=r'\\.\pipe\pyc-%d-%d-' %
(os.getpid(), next(_mmap_counter)), dir="")
else:
raise ValueError('unrecognized family')
def _validate_family(family):
'''
Checks if the family is valid for the current environment.
'''
if sys.platform != 'win32' and family == 'AF_PIPE':
raise ValueError('Family %s is not recognized.' % family)
if sys.platform == 'win32' and family == 'AF_UNIX':
# double check
if not hasattr(socket, family):
raise ValueError('Family %s is not recognized.' % family)
def address_type(address):
'''
Return the types of the address
This can be 'AF_INET', 'AF_UNIX', or 'AF_PIPE'
'''
if type(address) == tuple:
return 'AF_INET'
elif type(address) is str and address.startswith('\\\\'):
return 'AF_PIPE'
elif type(address) is str:
return 'AF_UNIX'
else:
raise ValueError('address type of %r unrecognized' % address)
#
# Connection classes
#
class _ConnectionBase:
_handle = None
def __init__(self, handle, readable=True, writable=True):
handle = handle.__index__()
if handle < 0:
raise ValueError("invalid handle")
if not readable and not writable:
raise ValueError(
"at least one of `readable` and `writable` must be True")
self._handle = handle
self._readable = readable
self._writable = writable
# XXX should we use util.Finalize instead of a __del__?
def __del__(self):
if self._handle is not None:
self._close()
def _check_closed(self):
if self._handle is None:
raise OSError("handle is closed")
def _check_readable(self):
if not self._readable:
raise OSError("connection is write-only")
def _check_writable(self):
if not self._writable:
raise OSError("connection is read-only")
def _bad_message_length(self):
if self._writable:
self._readable = False
else:
self.close()
raise OSError("bad message length")
@property
def closed(self):
"""True if the connection is closed"""
return self._handle is None
@property
def readable(self):
"""True if the connection is readable"""
return self._readable
@property
def writable(self):
"""True if the connection is writable"""
return self._writable
def fileno(self):
"""File descriptor or handle of the connection"""
self._check_closed()
return self._handle
def close(self):
"""Close the connection"""
if self._handle is not None:
try:
self._close()
finally:
self._handle = None
def send_bytes(self, buf, offset=0, size=None):
"""Send the bytes data from a bytes-like object"""
self._check_closed()
self._check_writable()
m = memoryview(buf)
# HACK for byte-indexing of non-bytewise buffers (e.g. array.array)
if m.itemsize > 1:
m = memoryview(bytes(m))
n = len(m)
if offset < 0:
raise ValueError("offset is negative")
if n < offset:
raise ValueError("buffer length < offset")
if size is None:
size = n - offset
elif size < 0:
raise ValueError("size is negative")
elif offset + size > n:
raise ValueError("buffer length < offset + size")
self._send_bytes(m[offset:offset + size])
def send(self, obj):
"""Send a (picklable) object"""
self._check_closed()
self._check_writable()
self._send_bytes(ForkingPickler.dumps(obj))
def recv_bytes(self, maxlength=None):
"""
Receive bytes data as a bytes object.
"""
self._check_closed()
self._check_readable()
if maxlength is not None and maxlength < 0:
raise ValueError("negative maxlength")
buf = self._recv_bytes(maxlength)
if buf is None:
self._bad_message_length()
return buf.getvalue()
def recv_bytes_into(self, buf, offset=0):
"""
Receive bytes data into a writeable bytes-like object.
Return the number of bytes read.
"""
self._check_closed()
self._check_readable()
with memoryview(buf) as m:
# Get bytesize of arbitrary buffer
itemsize = m.itemsize
bytesize = itemsize * len(m)
if offset < 0:
raise ValueError("negative offset")
elif offset > bytesize:
raise ValueError("offset too large")
result = self._recv_bytes()
size = result.tell()
if bytesize < offset + size:
raise BufferTooShort(result.getvalue())
# Message can fit in dest
result.seek(0)
result.readinto(m[offset // itemsize :
(offset + size) // itemsize])
return size
def recv(self):
"""Receive a (picklable) object"""
self._check_closed()
self._check_readable()
buf = self._recv_bytes()
return ForkingPickler.loads(buf.getbuffer())
def poll(self, timeout=0.0):
"""Whether there is any input available to be read"""
self._check_closed()
self._check_readable()
return self._poll(timeout)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
if _winapi:
class PipeConnection(_ConnectionBase):
"""
Connection class based on a Windows named pipe.
Overlapped I/O is used, so the handles must have been created
with FILE_FLAG_OVERLAPPED.
"""
_got_empty_message = False
def _close(self, _CloseHandle=_winapi.CloseHandle):
_CloseHandle(self._handle)
def _send_bytes(self, buf):
ov, err = _winapi.WriteFile(self._handle, buf, overlapped=True)
try:
if err == _winapi.ERROR_IO_PENDING:
waitres = _winapi.WaitForMultipleObjects(
[ov.event], False, INFINITE)
assert waitres == WAIT_OBJECT_0
except:
ov.cancel()
raise
finally:
nwritten, err = ov.GetOverlappedResult(True)
assert err == 0
assert nwritten == len(buf)
def _recv_bytes(self, maxsize=None):
if self._got_empty_message:
self._got_empty_message = False
return io.BytesIO()
else:
bsize = 128 if maxsize is None else min(maxsize, 128)
try:
ov, err = _winapi.ReadFile(self._handle, bsize,
overlapped=True)
try:
if err == _winapi.ERROR_IO_PENDING:
waitres = _winapi.WaitForMultipleObjects(
[ov.event], False, INFINITE)
assert waitres == WAIT_OBJECT_0
except:
ov.cancel()
raise
finally:
nread, err = ov.GetOverlappedResult(True)
if err == 0:
f = io.BytesIO()
f.write(ov.getbuffer())
return f
elif err == _winapi.ERROR_MORE_DATA:
return self._get_more_data(ov, maxsize)
except OSError as e:
if e.winerror == _winapi.ERROR_BROKEN_PIPE:
raise EOFError
else:
raise
raise RuntimeError("shouldn't get here; expected KeyboardInterrupt")
def _poll(self, timeout):
if (self._got_empty_message or
_winapi.PeekNamedPipe(self._handle)[0] != 0):
return True
return bool(wait([self], timeout))
def _get_more_data(self, ov, maxsize):
buf = ov.getbuffer()
f = io.BytesIO()
f.write(buf)
left = _winapi.PeekNamedPipe(self._handle)[1]
assert left > 0
if maxsize is not None and len(buf) + left > maxsize:
self._bad_message_length()
ov, err = _winapi.ReadFile(self._handle, left, overlapped=True)
rbytes, err = ov.GetOverlappedResult(True)
assert err == 0
assert rbytes == left
f.write(ov.getbuffer())
return f
class Connection(_ConnectionBase):
"""
Connection class based on an arbitrary file descriptor (Unix only), or
a socket handle (Windows).
"""
if _winapi:
def _close(self, _close=_multiprocessing.closesocket):
_close(self._handle)
_write = _multiprocessing.send
_read = _multiprocessing.recv
else:
def _close(self, _close=os.close):
_close(self._handle)
_write = os.write
_read = os.read
def _send(self, buf, write=_write):
remaining = len(buf)
while True:
try:
n = write(self._handle, buf)
except InterruptedError:
continue
remaining -= n
if remaining == 0:
break
buf = buf[n:]
def _recv(self, size, read=_read):
buf = io.BytesIO()
handle = self._handle
remaining = size
while remaining > 0:
try:
chunk = read(handle, remaining)
except InterruptedError:
continue
n = len(chunk)
if n == 0:
if remaining == size:
raise EOFError
else:
raise OSError("got end of file during message")
buf.write(chunk)
remaining -= n
return buf
def _send_bytes(self, buf):
n = len(buf)
# For wire compatibility with 3.2 and lower
header = struct.pack("!i", n)
if n > 16384:
# The payload is large so Nagle's algorithm won't be triggered
# and we'd better avoid the cost of concatenation.
chunks = [header, buf]
elif n > 0:
# Issue # 20540: concatenate before sending, to avoid delays due
# to Nagle's algorithm on a TCP socket.
chunks = [header + buf]
else:
# This code path is necessary to avoid "broken pipe" errors
# when sending a 0-length buffer if the other end closed the pipe.
chunks = [header]
for chunk in chunks:
self._send(chunk)
def _recv_bytes(self, maxsize=None):
buf = self._recv(4)
size, = struct.unpack("!i", buf.getvalue())
if maxsize is not None and size > maxsize:
return None
return self._recv(size)
def _poll(self, timeout):
r = wait([self], timeout)
return bool(r)
#
# Public functions
#
class Listener(object):
'''
Returns a listener object.
This is a wrapper for a bound socket which is 'listening' for
connections, or for a Windows named pipe.
'''
def __init__(self, address=None, family=None, backlog=1, authkey=None):
family = family or (address and address_type(address)) \
or default_family
address = address or arbitrary_address(family)
_validate_family(family)
if family == 'AF_PIPE':
self._listener = PipeListener(address, backlog)
else:
self._listener = SocketListener(address, family, backlog)
if authkey is not None and not isinstance(authkey, bytes):
raise TypeError('authkey should be a byte string')
self._authkey = authkey
def accept(self):
'''
Accept a connection on the bound socket or named pipe of `self`.
Returns a `Connection` object.
'''
if self._listener is None:
raise OSError('listener is closed')
c = self._listener.accept()
if self._authkey:
deliver_challenge(c, self._authkey)
answer_challenge(c, self._authkey)
return c
def close(self):
'''
Close the bound socket or named pipe of `self`.
'''
if self._listener is not None:
self._listener.close()
self._listener = None
address = property(lambda self: self._listener._address)
last_accepted = property(lambda self: self._listener._last_accepted)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
def Client(address, family=None, authkey=None):
'''
Returns a connection to the address of a `Listener`
'''
family = family or address_type(address)
_validate_family(family)
if family == 'AF_PIPE':
c = PipeClient(address)
else:
c = SocketClient(address)
if authkey is not None and not isinstance(authkey, bytes):
raise TypeError('authkey should be a byte string')
if authkey is not None:
answer_challenge(c, authkey)
deliver_challenge(c, authkey)
return c
if sys.platform != 'win32':
def Pipe(duplex=True):
'''
Returns pair of connection objects at either end of a pipe
'''
if duplex:
s1, s2 = socket.socketpair()
s1.setblocking(True)
s2.setblocking(True)
c1 = Connection(s1.detach())
c2 = Connection(s2.detach())
else:
fd1, fd2 = os.pipe()
c1 = Connection(fd1, writable=False)
c2 = Connection(fd2, readable=False)
return c1, c2
else:
def Pipe(duplex=True):
'''
Returns pair of connection objects at either end of a pipe
'''
address = arbitrary_address('AF_PIPE')
if duplex:
openmode = _winapi.PIPE_ACCESS_DUPLEX
access = _winapi.GENERIC_READ | _winapi.GENERIC_WRITE
obsize, ibsize = BUFSIZE, BUFSIZE
else:
openmode = _winapi.PIPE_ACCESS_INBOUND
access = _winapi.GENERIC_WRITE
obsize, ibsize = 0, BUFSIZE
h1 = _winapi.CreateNamedPipe(
address, openmode | _winapi.FILE_FLAG_OVERLAPPED |
_winapi.FILE_FLAG_FIRST_PIPE_INSTANCE,
_winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE |
_winapi.PIPE_WAIT,
1, obsize, ibsize, _winapi.NMPWAIT_WAIT_FOREVER,
# default security descriptor: the handle cannot be inherited
_winapi.NULL
)
h2 = _winapi.CreateFile(
address, access, 0, _winapi.NULL, _winapi.OPEN_EXISTING,
_winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL
)
_winapi.SetNamedPipeHandleState(
h2, _winapi.PIPE_READMODE_MESSAGE, None, None
)
overlapped = _winapi.ConnectNamedPipe(h1, overlapped=True)
_, err = overlapped.GetOverlappedResult(True)
assert err == 0
c1 = PipeConnection(h1, writable=duplex)
c2 = PipeConnection(h2, readable=duplex)
return c1, c2
#
# Definitions for connections based on sockets
#
class SocketListener(object):
'''
Representation of a socket which is bound to an address and listening
'''
def __init__(self, address, family, backlog=1):
self._socket = socket.socket(getattr(socket, family))
try:
# SO_REUSEADDR has different semantics on Windows (issue #2550).
if os.name == 'posix':
self._socket.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
self._socket.setblocking(True)
self._socket.bind(address)
self._socket.listen(backlog)
self._address = self._socket.getsockname()
except OSError:
self._socket.close()
raise
self._family = family
self._last_accepted = None
if family == 'AF_UNIX':
self._unlink = util.Finalize(
self, os.unlink, args=(address,), exitpriority=0
)
else:
self._unlink = None
def accept(self):
while True:
try:
s, self._last_accepted = self._socket.accept()
except InterruptedError:
pass
else:
break
s.setblocking(True)
return Connection(s.detach())
def close(self):
self._socket.close()
if self._unlink is not None:
self._unlink()
def SocketClient(address):
'''
Return a connection object connected to the socket given by `address`
'''
family = address_type(address)
with socket.socket( getattr(socket, family) ) as s:
s.setblocking(True)
s.connect(address)
return Connection(s.detach())
#
# Definitions for connections based on named pipes
#
if sys.platform == 'win32':
class PipeListener(object):
'''
Representation of a named pipe
'''
def __init__(self, address, backlog=None):
self._address = address
self._handle_queue = [self._new_handle(first=True)]
self._last_accepted = None
util.sub_debug('listener created with address=%r', self._address)
self.close = util.Finalize(
self, PipeListener._finalize_pipe_listener,
args=(self._handle_queue, self._address), exitpriority=0
)
def _new_handle(self, first=False):
flags = _winapi.PIPE_ACCESS_DUPLEX | _winapi.FILE_FLAG_OVERLAPPED
if first:
flags |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE
return _winapi.CreateNamedPipe(
self._address, flags,
_winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE |
_winapi.PIPE_WAIT,
_winapi.PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE,
_winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL
)
def accept(self):
self._handle_queue.append(self._new_handle())
handle = self._handle_queue.pop(0)
try:
ov = _winapi.ConnectNamedPipe(handle, overlapped=True)
except OSError as e:
if e.winerror != _winapi.ERROR_NO_DATA:
raise
# ERROR_NO_DATA can occur if a client has already connected,
# written data and then disconnected -- see Issue 14725.
else:
try:
res = _winapi.WaitForMultipleObjects(
[ov.event], False, INFINITE)
except:
ov.cancel()
_winapi.CloseHandle(handle)
raise
finally:
_, err = ov.GetOverlappedResult(True)
assert err == 0
return PipeConnection(handle)
@staticmethod
def _finalize_pipe_listener(queue, address):
util.sub_debug('closing listener with address=%r', address)
for handle in queue:
_winapi.CloseHandle(handle)
def PipeClient(address):
'''
Return a connection object connected to the pipe given by `address`
'''
t = _init_timeout()
while 1:
try:
_winapi.WaitNamedPipe(address, 1000)
h = _winapi.CreateFile(
address, _winapi.GENERIC_READ | _winapi.GENERIC_WRITE,
0, _winapi.NULL, _winapi.OPEN_EXISTING,
_winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL
)
except OSError as e:
if e.winerror not in (_winapi.ERROR_SEM_TIMEOUT,
_winapi.ERROR_PIPE_BUSY) or _check_timeout(t):
raise
else:
break
else:
raise
_winapi.SetNamedPipeHandleState(
h, _winapi.PIPE_READMODE_MESSAGE, None, None
)
return PipeConnection(h)
#
# Authentication stuff
#
MESSAGE_LENGTH = 20
CHALLENGE = b'#CHALLENGE#'
WELCOME = b'#WELCOME#'
FAILURE = b'#FAILURE#'
def deliver_challenge(connection, authkey):
import hmac
assert isinstance(authkey, bytes)
message = os.urandom(MESSAGE_LENGTH)
connection.send_bytes(CHALLENGE + message)
digest = hmac.new(authkey, message, 'md5').digest()
response = connection.recv_bytes(256) # reject large message
if response == digest:
connection.send_bytes(WELCOME)
else:
connection.send_bytes(FAILURE)
raise AuthenticationError('digest received was wrong')
def answer_challenge(connection, authkey):
import hmac
assert isinstance(authkey, bytes)
message = connection.recv_bytes(256) # reject large message
assert message[:len(CHALLENGE)] == CHALLENGE, 'message = %r' % message
message = message[len(CHALLENGE):]
digest = hmac.new(authkey, message, 'md5').digest()
connection.send_bytes(digest)
response = connection.recv_bytes(256) # reject large message
if response != WELCOME:
raise AuthenticationError('digest sent was rejected')
#
# Support for using xmlrpclib for serialization
#
class ConnectionWrapper(object):
def __init__(self, conn, dumps, loads):
self._conn = conn
self._dumps = dumps
self._loads = loads
for attr in ('fileno', 'close', 'poll', 'recv_bytes', 'send_bytes'):
obj = getattr(conn, attr)
setattr(self, attr, obj)
def send(self, obj):
s = self._dumps(obj)
self._conn.send_bytes(s)
def recv(self):
s = self._conn.recv_bytes()
return self._loads(s)
def _xml_dumps(obj):
return xmlrpclib.dumps((obj,), None, None, None, 1).encode('utf-8')
def _xml_loads(s):
(obj,), method = xmlrpclib.loads(s.decode('utf-8'))
return obj
class XmlListener(Listener):
def accept(self):
global xmlrpclib
import xmlrpc.client as xmlrpclib
obj = Listener.accept(self)
return ConnectionWrapper(obj, _xml_dumps, _xml_loads)
def XmlClient(*args, **kwds):
global xmlrpclib
import xmlrpc.client as xmlrpclib
return ConnectionWrapper(Client(*args, **kwds), _xml_dumps, _xml_loads)
#
# Wait
#
if sys.platform == 'win32':
def _exhaustive_wait(handles, timeout):
# Return ALL handles which are currently signalled. (Only
# returning the first signalled might create starvation issues.)
L = list(handles)
ready = []
while L:
res = _winapi.WaitForMultipleObjects(L, False, timeout)
if res == WAIT_TIMEOUT:
break
elif WAIT_OBJECT_0 <= res < WAIT_OBJECT_0 + len(L):
res -= WAIT_OBJECT_0
elif WAIT_ABANDONED_0 <= res < WAIT_ABANDONED_0 + len(L):
res -= WAIT_ABANDONED_0
else:
raise RuntimeError('Should not get here')
ready.append(L[res])
L = L[res+1:]
timeout = 0
return ready
_ready_errors = {_winapi.ERROR_BROKEN_PIPE, _winapi.ERROR_NETNAME_DELETED}
def wait(object_list, timeout=None):
'''
Wait till an object in object_list is ready/readable.
Returns list of those objects in object_list which are ready/readable.
'''
if timeout is None:
timeout = INFINITE
elif timeout < 0:
timeout = 0
else:
timeout = int(timeout * 1000 + 0.5)
object_list = list(object_list)
waithandle_to_obj = {}
ov_list = []
ready_objects = set()
ready_handles = set()
try:
for o in object_list:
try:
fileno = getattr(o, 'fileno')
except AttributeError:
waithandle_to_obj[o.__index__()] = o
else:
# start an overlapped read of length zero
try:
ov, err = _winapi.ReadFile(fileno(), 0, True)
except OSError as e:
err = e.winerror
if err not in _ready_errors:
raise
if err == _winapi.ERROR_IO_PENDING:
ov_list.append(ov)
waithandle_to_obj[ov.event] = o
else:
# If o.fileno() is an overlapped pipe handle and
# err == 0 then there is a zero length message
# in the pipe, but it HAS NOT been consumed.
ready_objects.add(o)
timeout = 0
ready_handles = _exhaustive_wait(waithandle_to_obj.keys(), timeout)
finally:
# request that overlapped reads stop
for ov in ov_list:
ov.cancel()
# wait for all overlapped reads to stop
for ov in ov_list:
try:
_, err = ov.GetOverlappedResult(True)
except OSError as e:
err = e.winerror
if err not in _ready_errors:
raise
if err != _winapi.ERROR_OPERATION_ABORTED:
o = waithandle_to_obj[ov.event]
ready_objects.add(o)
if err == 0:
# If o.fileno() is an overlapped pipe handle then
# a zero length message HAS been consumed.
if hasattr(o, '_got_empty_message'):
o._got_empty_message = True
ready_objects.update(waithandle_to_obj[h] for h in ready_handles)
return [o for o in object_list if o in ready_objects]
else:
import selectors
# poll/select have the advantage of not requiring any extra file
# descriptor, contrarily to epoll/kqueue (also, they require a single
# syscall).
if hasattr(selectors, 'PollSelector'):
_WaitSelector = selectors.PollSelector
else:
_WaitSelector = selectors.SelectSelector
def wait(object_list, timeout=None):
'''
Wait till an object in object_list is ready/readable.
Returns list of those objects in object_list which are ready/readable.
'''
with _WaitSelector() as selector:
for obj in object_list:
selector.register(obj, selectors.EVENT_READ)
if timeout is not None:
deadline = time.time() + timeout
while True:
ready = selector.select(timeout)
if ready:
return [key.fileobj for (key, events) in ready]
else:
if timeout is not None:
timeout = deadline - time.time()
if timeout < 0:
return ready
#
# Make connection and socket objects sharable if possible
#
if sys.platform == 'win32':
def reduce_connection(conn):
handle = conn.fileno()
with socket.fromfd(handle, socket.AF_INET, socket.SOCK_STREAM) as s:
from . import resource_sharer
ds = resource_sharer.DupSocket(s)
return rebuild_connection, (ds, conn.readable, conn.writable)
def rebuild_connection(ds, readable, writable):
sock = ds.detach()
return Connection(sock.detach(), readable, writable)
reduction.register(Connection, reduce_connection)
def reduce_pipe_connection(conn):
access = ((_winapi.FILE_GENERIC_READ if conn.readable else 0) |
(_winapi.FILE_GENERIC_WRITE if conn.writable else 0))
dh = reduction.DupHandle(conn.fileno(), access)
return rebuild_pipe_connection, (dh, conn.readable, conn.writable)
def rebuild_pipe_connection(dh, readable, writable):
handle = dh.detach()
return PipeConnection(handle, readable, writable)
reduction.register(PipeConnection, reduce_pipe_connection)
else:
def reduce_connection(conn):
df = reduction.DupFd(conn.fileno())
return rebuild_connection, (df, conn.readable, conn.writable)
def rebuild_connection(df, readable, writable):
fd = df.detach()
return Connection(fd, readable, writable)
reduction.register(Connection, reduce_connection)
=======
#
# A higher level module for using sockets (or Windows named pipes)
#
# multiprocessing/connection.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
__all__ = [ 'Client', 'Listener', 'Pipe', 'wait' ]
import io
import os
import sys
import socket
import struct
import time
import tempfile
import itertools
import _multiprocessing
from . import reduction
from . import util
from . import AuthenticationError, BufferTooShort
from .reduction import ForkingPickler
try:
import _winapi
from _winapi import WAIT_OBJECT_0, WAIT_ABANDONED_0, WAIT_TIMEOUT, INFINITE
except ImportError:
if sys.platform == 'win32':
raise
_winapi = None
#
#
#
BUFSIZE = 8192
# A very generous timeout when it comes to local connections...
CONNECTION_TIMEOUT = 20.
_mmap_counter = itertools.count()
default_family = 'AF_INET'
families = ['AF_INET']
if hasattr(socket, 'AF_UNIX'):
default_family = 'AF_UNIX'
families += ['AF_UNIX']
if sys.platform == 'win32':
default_family = 'AF_PIPE'
families += ['AF_PIPE']
def _init_timeout(timeout=CONNECTION_TIMEOUT):
return time.time() + timeout
def _check_timeout(t):
return time.time() > t
#
#
#
def arbitrary_address(family):
'''
Return an arbitrary free address for the given family
'''
if family == 'AF_INET':
return ('localhost', 0)
elif family == 'AF_UNIX':
return tempfile.mktemp(prefix='listener-', dir=util.get_temp_dir())
elif family == 'AF_PIPE':
return tempfile.mktemp(prefix=r'\\.\pipe\pyc-%d-%d-' %
(os.getpid(), next(_mmap_counter)), dir="")
else:
raise ValueError('unrecognized family')
def _validate_family(family):
'''
Checks if the family is valid for the current environment.
'''
if sys.platform != 'win32' and family == 'AF_PIPE':
raise ValueError('Family %s is not recognized.' % family)
if sys.platform == 'win32' and family == 'AF_UNIX':
# double check
if not hasattr(socket, family):
raise ValueError('Family %s is not recognized.' % family)
def address_type(address):
'''
Return the types of the address
This can be 'AF_INET', 'AF_UNIX', or 'AF_PIPE'
'''
if type(address) == tuple:
return 'AF_INET'
elif type(address) is str and address.startswith('\\\\'):
return 'AF_PIPE'
elif type(address) is str:
return 'AF_UNIX'
else:
raise ValueError('address type of %r unrecognized' % address)
#
# Connection classes
#
class _ConnectionBase:
_handle = None
def __init__(self, handle, readable=True, writable=True):
handle = handle.__index__()
if handle < 0:
raise ValueError("invalid handle")
if not readable and not writable:
raise ValueError(
"at least one of `readable` and `writable` must be True")
self._handle = handle
self._readable = readable
self._writable = writable
# XXX should we use util.Finalize instead of a __del__?
def __del__(self):
if self._handle is not None:
self._close()
def _check_closed(self):
if self._handle is None:
raise OSError("handle is closed")
def _check_readable(self):
if not self._readable:
raise OSError("connection is write-only")
def _check_writable(self):
if not self._writable:
raise OSError("connection is read-only")
def _bad_message_length(self):
if self._writable:
self._readable = False
else:
self.close()
raise OSError("bad message length")
@property
def closed(self):
"""True if the connection is closed"""
return self._handle is None
@property
def readable(self):
"""True if the connection is readable"""
return self._readable
@property
def writable(self):
"""True if the connection is writable"""
return self._writable
def fileno(self):
"""File descriptor or handle of the connection"""
self._check_closed()
return self._handle
def close(self):
"""Close the connection"""
if self._handle is not None:
try:
self._close()
finally:
self._handle = None
def send_bytes(self, buf, offset=0, size=None):
"""Send the bytes data from a bytes-like object"""
self._check_closed()
self._check_writable()
m = memoryview(buf)
# HACK for byte-indexing of non-bytewise buffers (e.g. array.array)
if m.itemsize > 1:
m = memoryview(bytes(m))
n = len(m)
if offset < 0:
raise ValueError("offset is negative")
if n < offset:
raise ValueError("buffer length < offset")
if size is None:
size = n - offset
elif size < 0:
raise ValueError("size is negative")
elif offset + size > n:
raise ValueError("buffer length < offset + size")
self._send_bytes(m[offset:offset + size])
def send(self, obj):
"""Send a (picklable) object"""
self._check_closed()
self._check_writable()
self._send_bytes(ForkingPickler.dumps(obj))
def recv_bytes(self, maxlength=None):
"""
Receive bytes data as a bytes object.
"""
self._check_closed()
self._check_readable()
if maxlength is not None and maxlength < 0:
raise ValueError("negative maxlength")
buf = self._recv_bytes(maxlength)
if buf is None:
self._bad_message_length()
return buf.getvalue()
def recv_bytes_into(self, buf, offset=0):
"""
Receive bytes data into a writeable bytes-like object.
Return the number of bytes read.
"""
self._check_closed()
self._check_readable()
with memoryview(buf) as m:
# Get bytesize of arbitrary buffer
itemsize = m.itemsize
bytesize = itemsize * len(m)
if offset < 0:
raise ValueError("negative offset")
elif offset > bytesize:
raise ValueError("offset too large")
result = self._recv_bytes()
size = result.tell()
if bytesize < offset + size:
raise BufferTooShort(result.getvalue())
# Message can fit in dest
result.seek(0)
result.readinto(m[offset // itemsize :
(offset + size) // itemsize])
return size
def recv(self):
"""Receive a (picklable) object"""
self._check_closed()
self._check_readable()
buf = self._recv_bytes()
return ForkingPickler.loads(buf.getbuffer())
def poll(self, timeout=0.0):
"""Whether there is any input available to be read"""
self._check_closed()
self._check_readable()
return self._poll(timeout)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
if _winapi:
class PipeConnection(_ConnectionBase):
"""
Connection class based on a Windows named pipe.
Overlapped I/O is used, so the handles must have been created
with FILE_FLAG_OVERLAPPED.
"""
_got_empty_message = False
def _close(self, _CloseHandle=_winapi.CloseHandle):
_CloseHandle(self._handle)
def _send_bytes(self, buf):
ov, err = _winapi.WriteFile(self._handle, buf, overlapped=True)
try:
if err == _winapi.ERROR_IO_PENDING:
waitres = _winapi.WaitForMultipleObjects(
[ov.event], False, INFINITE)
assert waitres == WAIT_OBJECT_0
except:
ov.cancel()
raise
finally:
nwritten, err = ov.GetOverlappedResult(True)
assert err == 0
assert nwritten == len(buf)
def _recv_bytes(self, maxsize=None):
if self._got_empty_message:
self._got_empty_message = False
return io.BytesIO()
else:
bsize = 128 if maxsize is None else min(maxsize, 128)
try:
ov, err = _winapi.ReadFile(self._handle, bsize,
overlapped=True)
try:
if err == _winapi.ERROR_IO_PENDING:
waitres = _winapi.WaitForMultipleObjects(
[ov.event], False, INFINITE)
assert waitres == WAIT_OBJECT_0
except:
ov.cancel()
raise
finally:
nread, err = ov.GetOverlappedResult(True)
if err == 0:
f = io.BytesIO()
f.write(ov.getbuffer())
return f
elif err == _winapi.ERROR_MORE_DATA:
return self._get_more_data(ov, maxsize)
except OSError as e:
if e.winerror == _winapi.ERROR_BROKEN_PIPE:
raise EOFError
else:
raise
raise RuntimeError("shouldn't get here; expected KeyboardInterrupt")
def _poll(self, timeout):
if (self._got_empty_message or
_winapi.PeekNamedPipe(self._handle)[0] != 0):
return True
return bool(wait([self], timeout))
def _get_more_data(self, ov, maxsize):
buf = ov.getbuffer()
f = io.BytesIO()
f.write(buf)
left = _winapi.PeekNamedPipe(self._handle)[1]
assert left > 0
if maxsize is not None and len(buf) + left > maxsize:
self._bad_message_length()
ov, err = _winapi.ReadFile(self._handle, left, overlapped=True)
rbytes, err = ov.GetOverlappedResult(True)
assert err == 0
assert rbytes == left
f.write(ov.getbuffer())
return f
class Connection(_ConnectionBase):
"""
Connection class based on an arbitrary file descriptor (Unix only), or
a socket handle (Windows).
"""
if _winapi:
def _close(self, _close=_multiprocessing.closesocket):
_close(self._handle)
_write = _multiprocessing.send
_read = _multiprocessing.recv
else:
def _close(self, _close=os.close):
_close(self._handle)
_write = os.write
_read = os.read
def _send(self, buf, write=_write):
remaining = len(buf)
while True:
try:
n = write(self._handle, buf)
except InterruptedError:
continue
remaining -= n
if remaining == 0:
break
buf = buf[n:]
def _recv(self, size, read=_read):
buf = io.BytesIO()
handle = self._handle
remaining = size
while remaining > 0:
try:
chunk = read(handle, remaining)
except InterruptedError:
continue
n = len(chunk)
if n == 0:
if remaining == size:
raise EOFError
else:
raise OSError("got end of file during message")
buf.write(chunk)
remaining -= n
return buf
def _send_bytes(self, buf):
n = len(buf)
# For wire compatibility with 3.2 and lower
header = struct.pack("!i", n)
if n > 16384:
# The payload is large so Nagle's algorithm won't be triggered
# and we'd better avoid the cost of concatenation.
chunks = [header, buf]
elif n > 0:
# Issue # 20540: concatenate before sending, to avoid delays due
# to Nagle's algorithm on a TCP socket.
chunks = [header + buf]
else:
# This code path is necessary to avoid "broken pipe" errors
# when sending a 0-length buffer if the other end closed the pipe.
chunks = [header]
for chunk in chunks:
self._send(chunk)
def _recv_bytes(self, maxsize=None):
buf = self._recv(4)
size, = struct.unpack("!i", buf.getvalue())
if maxsize is not None and size > maxsize:
return None
return self._recv(size)
def _poll(self, timeout):
r = wait([self], timeout)
return bool(r)
#
# Public functions
#
class Listener(object):
'''
Returns a listener object.
This is a wrapper for a bound socket which is 'listening' for
connections, or for a Windows named pipe.
'''
def __init__(self, address=None, family=None, backlog=1, authkey=None):
family = family or (address and address_type(address)) \
or default_family
address = address or arbitrary_address(family)
_validate_family(family)
if family == 'AF_PIPE':
self._listener = PipeListener(address, backlog)
else:
self._listener = SocketListener(address, family, backlog)
if authkey is not None and not isinstance(authkey, bytes):
raise TypeError('authkey should be a byte string')
self._authkey = authkey
def accept(self):
'''
Accept a connection on the bound socket or named pipe of `self`.
Returns a `Connection` object.
'''
if self._listener is None:
raise OSError('listener is closed')
c = self._listener.accept()
if self._authkey:
deliver_challenge(c, self._authkey)
answer_challenge(c, self._authkey)
return c
def close(self):
'''
Close the bound socket or named pipe of `self`.
'''
if self._listener is not None:
self._listener.close()
self._listener = None
address = property(lambda self: self._listener._address)
last_accepted = property(lambda self: self._listener._last_accepted)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
def Client(address, family=None, authkey=None):
'''
Returns a connection to the address of a `Listener`
'''
family = family or address_type(address)
_validate_family(family)
if family == 'AF_PIPE':
c = PipeClient(address)
else:
c = SocketClient(address)
if authkey is not None and not isinstance(authkey, bytes):
raise TypeError('authkey should be a byte string')
if authkey is not None:
answer_challenge(c, authkey)
deliver_challenge(c, authkey)
return c
if sys.platform != 'win32':
def Pipe(duplex=True):
'''
Returns pair of connection objects at either end of a pipe
'''
if duplex:
s1, s2 = socket.socketpair()
s1.setblocking(True)
s2.setblocking(True)
c1 = Connection(s1.detach())
c2 = Connection(s2.detach())
else:
fd1, fd2 = os.pipe()
c1 = Connection(fd1, writable=False)
c2 = Connection(fd2, readable=False)
return c1, c2
else:
def Pipe(duplex=True):
'''
Returns pair of connection objects at either end of a pipe
'''
address = arbitrary_address('AF_PIPE')
if duplex:
openmode = _winapi.PIPE_ACCESS_DUPLEX
access = _winapi.GENERIC_READ | _winapi.GENERIC_WRITE
obsize, ibsize = BUFSIZE, BUFSIZE
else:
openmode = _winapi.PIPE_ACCESS_INBOUND
access = _winapi.GENERIC_WRITE
obsize, ibsize = 0, BUFSIZE
h1 = _winapi.CreateNamedPipe(
address, openmode | _winapi.FILE_FLAG_OVERLAPPED |
_winapi.FILE_FLAG_FIRST_PIPE_INSTANCE,
_winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE |
_winapi.PIPE_WAIT,
1, obsize, ibsize, _winapi.NMPWAIT_WAIT_FOREVER,
# default security descriptor: the handle cannot be inherited
_winapi.NULL
)
h2 = _winapi.CreateFile(
address, access, 0, _winapi.NULL, _winapi.OPEN_EXISTING,
_winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL
)
_winapi.SetNamedPipeHandleState(
h2, _winapi.PIPE_READMODE_MESSAGE, None, None
)
overlapped = _winapi.ConnectNamedPipe(h1, overlapped=True)
_, err = overlapped.GetOverlappedResult(True)
assert err == 0
c1 = PipeConnection(h1, writable=duplex)
c2 = PipeConnection(h2, readable=duplex)
return c1, c2
#
# Definitions for connections based on sockets
#
class SocketListener(object):
'''
Representation of a socket which is bound to an address and listening
'''
def __init__(self, address, family, backlog=1):
self._socket = socket.socket(getattr(socket, family))
try:
# SO_REUSEADDR has different semantics on Windows (issue #2550).
if os.name == 'posix':
self._socket.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
self._socket.setblocking(True)
self._socket.bind(address)
self._socket.listen(backlog)
self._address = self._socket.getsockname()
except OSError:
self._socket.close()
raise
self._family = family
self._last_accepted = None
if family == 'AF_UNIX':
self._unlink = util.Finalize(
self, os.unlink, args=(address,), exitpriority=0
)
else:
self._unlink = None
def accept(self):
while True:
try:
s, self._last_accepted = self._socket.accept()
except InterruptedError:
pass
else:
break
s.setblocking(True)
return Connection(s.detach())
def close(self):
self._socket.close()
if self._unlink is not None:
self._unlink()
def SocketClient(address):
'''
Return a connection object connected to the socket given by `address`
'''
family = address_type(address)
with socket.socket( getattr(socket, family) ) as s:
s.setblocking(True)
s.connect(address)
return Connection(s.detach())
#
# Definitions for connections based on named pipes
#
if sys.platform == 'win32':
class PipeListener(object):
'''
Representation of a named pipe
'''
def __init__(self, address, backlog=None):
self._address = address
self._handle_queue = [self._new_handle(first=True)]
self._last_accepted = None
util.sub_debug('listener created with address=%r', self._address)
self.close = util.Finalize(
self, PipeListener._finalize_pipe_listener,
args=(self._handle_queue, self._address), exitpriority=0
)
def _new_handle(self, first=False):
flags = _winapi.PIPE_ACCESS_DUPLEX | _winapi.FILE_FLAG_OVERLAPPED
if first:
flags |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE
return _winapi.CreateNamedPipe(
self._address, flags,
_winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE |
_winapi.PIPE_WAIT,
_winapi.PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE,
_winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL
)
def accept(self):
self._handle_queue.append(self._new_handle())
handle = self._handle_queue.pop(0)
try:
ov = _winapi.ConnectNamedPipe(handle, overlapped=True)
except OSError as e:
if e.winerror != _winapi.ERROR_NO_DATA:
raise
# ERROR_NO_DATA can occur if a client has already connected,
# written data and then disconnected -- see Issue 14725.
else:
try:
res = _winapi.WaitForMultipleObjects(
[ov.event], False, INFINITE)
except:
ov.cancel()
_winapi.CloseHandle(handle)
raise
finally:
_, err = ov.GetOverlappedResult(True)
assert err == 0
return PipeConnection(handle)
@staticmethod
def _finalize_pipe_listener(queue, address):
util.sub_debug('closing listener with address=%r', address)
for handle in queue:
_winapi.CloseHandle(handle)
def PipeClient(address):
'''
Return a connection object connected to the pipe given by `address`
'''
t = _init_timeout()
while 1:
try:
_winapi.WaitNamedPipe(address, 1000)
h = _winapi.CreateFile(
address, _winapi.GENERIC_READ | _winapi.GENERIC_WRITE,
0, _winapi.NULL, _winapi.OPEN_EXISTING,
_winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL
)
except OSError as e:
if e.winerror not in (_winapi.ERROR_SEM_TIMEOUT,
_winapi.ERROR_PIPE_BUSY) or _check_timeout(t):
raise
else:
break
else:
raise
_winapi.SetNamedPipeHandleState(
h, _winapi.PIPE_READMODE_MESSAGE, None, None
)
return PipeConnection(h)
#
# Authentication stuff
#
MESSAGE_LENGTH = 20
CHALLENGE = b'#CHALLENGE#'
WELCOME = b'#WELCOME#'
FAILURE = b'#FAILURE#'
def deliver_challenge(connection, authkey):
import hmac
assert isinstance(authkey, bytes)
message = os.urandom(MESSAGE_LENGTH)
connection.send_bytes(CHALLENGE + message)
digest = hmac.new(authkey, message, 'md5').digest()
response = connection.recv_bytes(256) # reject large message
if response == digest:
connection.send_bytes(WELCOME)
else:
connection.send_bytes(FAILURE)
raise AuthenticationError('digest received was wrong')
def answer_challenge(connection, authkey):
import hmac
assert isinstance(authkey, bytes)
message = connection.recv_bytes(256) # reject large message
assert message[:len(CHALLENGE)] == CHALLENGE, 'message = %r' % message
message = message[len(CHALLENGE):]
digest = hmac.new(authkey, message, 'md5').digest()
connection.send_bytes(digest)
response = connection.recv_bytes(256) # reject large message
if response != WELCOME:
raise AuthenticationError('digest sent was rejected')
#
# Support for using xmlrpclib for serialization
#
class ConnectionWrapper(object):
def __init__(self, conn, dumps, loads):
self._conn = conn
self._dumps = dumps
self._loads = loads
for attr in ('fileno', 'close', 'poll', 'recv_bytes', 'send_bytes'):
obj = getattr(conn, attr)
setattr(self, attr, obj)
def send(self, obj):
s = self._dumps(obj)
self._conn.send_bytes(s)
def recv(self):
s = self._conn.recv_bytes()
return self._loads(s)
def _xml_dumps(obj):
return xmlrpclib.dumps((obj,), None, None, None, 1).encode('utf-8')
def _xml_loads(s):
(obj,), method = xmlrpclib.loads(s.decode('utf-8'))
return obj
class XmlListener(Listener):
def accept(self):
global xmlrpclib
import xmlrpc.client as xmlrpclib
obj = Listener.accept(self)
return ConnectionWrapper(obj, _xml_dumps, _xml_loads)
def XmlClient(*args, **kwds):
global xmlrpclib
import xmlrpc.client as xmlrpclib
return ConnectionWrapper(Client(*args, **kwds), _xml_dumps, _xml_loads)
#
# Wait
#
if sys.platform == 'win32':
def _exhaustive_wait(handles, timeout):
# Return ALL handles which are currently signalled. (Only
# returning the first signalled might create starvation issues.)
L = list(handles)
ready = []
while L:
res = _winapi.WaitForMultipleObjects(L, False, timeout)
if res == WAIT_TIMEOUT:
break
elif WAIT_OBJECT_0 <= res < WAIT_OBJECT_0 + len(L):
res -= WAIT_OBJECT_0
elif WAIT_ABANDONED_0 <= res < WAIT_ABANDONED_0 + len(L):
res -= WAIT_ABANDONED_0
else:
raise RuntimeError('Should not get here')
ready.append(L[res])
L = L[res+1:]
timeout = 0
return ready
_ready_errors = {_winapi.ERROR_BROKEN_PIPE, _winapi.ERROR_NETNAME_DELETED}
def wait(object_list, timeout=None):
'''
Wait till an object in object_list is ready/readable.
Returns list of those objects in object_list which are ready/readable.
'''
if timeout is None:
timeout = INFINITE
elif timeout < 0:
timeout = 0
else:
timeout = int(timeout * 1000 + 0.5)
object_list = list(object_list)
waithandle_to_obj = {}
ov_list = []
ready_objects = set()
ready_handles = set()
try:
for o in object_list:
try:
fileno = getattr(o, 'fileno')
except AttributeError:
waithandle_to_obj[o.__index__()] = o
else:
# start an overlapped read of length zero
try:
ov, err = _winapi.ReadFile(fileno(), 0, True)
except OSError as e:
err = e.winerror
if err not in _ready_errors:
raise
if err == _winapi.ERROR_IO_PENDING:
ov_list.append(ov)
waithandle_to_obj[ov.event] = o
else:
# If o.fileno() is an overlapped pipe handle and
# err == 0 then there is a zero length message
# in the pipe, but it HAS NOT been consumed.
ready_objects.add(o)
timeout = 0
ready_handles = _exhaustive_wait(waithandle_to_obj.keys(), timeout)
finally:
# request that overlapped reads stop
for ov in ov_list:
ov.cancel()
# wait for all overlapped reads to stop
for ov in ov_list:
try:
_, err = ov.GetOverlappedResult(True)
except OSError as e:
err = e.winerror
if err not in _ready_errors:
raise
if err != _winapi.ERROR_OPERATION_ABORTED:
o = waithandle_to_obj[ov.event]
ready_objects.add(o)
if err == 0:
# If o.fileno() is an overlapped pipe handle then
# a zero length message HAS been consumed.
if hasattr(o, '_got_empty_message'):
o._got_empty_message = True
ready_objects.update(waithandle_to_obj[h] for h in ready_handles)
return [o for o in object_list if o in ready_objects]
else:
import selectors
# poll/select have the advantage of not requiring any extra file
# descriptor, contrarily to epoll/kqueue (also, they require a single
# syscall).
if hasattr(selectors, 'PollSelector'):
_WaitSelector = selectors.PollSelector
else:
_WaitSelector = selectors.SelectSelector
def wait(object_list, timeout=None):
'''
Wait till an object in object_list is ready/readable.
Returns list of those objects in object_list which are ready/readable.
'''
with _WaitSelector() as selector:
for obj in object_list:
selector.register(obj, selectors.EVENT_READ)
if timeout is not None:
deadline = time.time() + timeout
while True:
ready = selector.select(timeout)
if ready:
return [key.fileobj for (key, events) in ready]
else:
if timeout is not None:
timeout = deadline - time.time()
if timeout < 0:
return ready
#
# Make connection and socket objects sharable if possible
#
if sys.platform == 'win32':
def reduce_connection(conn):
handle = conn.fileno()
with socket.fromfd(handle, socket.AF_INET, socket.SOCK_STREAM) as s:
from . import resource_sharer
ds = resource_sharer.DupSocket(s)
return rebuild_connection, (ds, conn.readable, conn.writable)
def rebuild_connection(ds, readable, writable):
sock = ds.detach()
return Connection(sock.detach(), readable, writable)
reduction.register(Connection, reduce_connection)
def reduce_pipe_connection(conn):
access = ((_winapi.FILE_GENERIC_READ if conn.readable else 0) |
(_winapi.FILE_GENERIC_WRITE if conn.writable else 0))
dh = reduction.DupHandle(conn.fileno(), access)
return rebuild_pipe_connection, (dh, conn.readable, conn.writable)
def rebuild_pipe_connection(dh, readable, writable):
handle = dh.detach()
return PipeConnection(handle, readable, writable)
reduction.register(PipeConnection, reduce_pipe_connection)
else:
def reduce_connection(conn):
df = reduction.DupFd(conn.fileno())
return rebuild_connection, (df, conn.readable, conn.writable)
def rebuild_connection(df, readable, writable):
fd = df.detach()
return Connection(fd, readable, writable)
reduction.register(Connection, reduce_connection)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
#
# A higher level module for using sockets (or Windows named pipes)
#
# multiprocessing/connection.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
__all__ = [ 'Client', 'Listener', 'Pipe', 'wait' ]
import io
import os
import sys
import socket
import struct
import time
import tempfile
import itertools
import _multiprocessing
from . import reduction
from . import util
from . import AuthenticationError, BufferTooShort
from .reduction import ForkingPickler
try:
import _winapi
from _winapi import WAIT_OBJECT_0, WAIT_ABANDONED_0, WAIT_TIMEOUT, INFINITE
except ImportError:
if sys.platform == 'win32':
raise
_winapi = None
#
#
#
BUFSIZE = 8192
# A very generous timeout when it comes to local connections...
CONNECTION_TIMEOUT = 20.
_mmap_counter = itertools.count()
default_family = 'AF_INET'
families = ['AF_INET']
if hasattr(socket, 'AF_UNIX'):
default_family = 'AF_UNIX'
families += ['AF_UNIX']
if sys.platform == 'win32':
default_family = 'AF_PIPE'
families += ['AF_PIPE']
def _init_timeout(timeout=CONNECTION_TIMEOUT):
return time.time() + timeout
def _check_timeout(t):
return time.time() > t
#
#
#
def arbitrary_address(family):
'''
Return an arbitrary free address for the given family
'''
if family == 'AF_INET':
return ('localhost', 0)
elif family == 'AF_UNIX':
return tempfile.mktemp(prefix='listener-', dir=util.get_temp_dir())
elif family == 'AF_PIPE':
return tempfile.mktemp(prefix=r'\\.\pipe\pyc-%d-%d-' %
(os.getpid(), next(_mmap_counter)), dir="")
else:
raise ValueError('unrecognized family')
def _validate_family(family):
'''
Checks if the family is valid for the current environment.
'''
if sys.platform != 'win32' and family == 'AF_PIPE':
raise ValueError('Family %s is not recognized.' % family)
if sys.platform == 'win32' and family == 'AF_UNIX':
# double check
if not hasattr(socket, family):
raise ValueError('Family %s is not recognized.' % family)
def address_type(address):
'''
Return the types of the address
This can be 'AF_INET', 'AF_UNIX', or 'AF_PIPE'
'''
if type(address) == tuple:
return 'AF_INET'
elif type(address) is str and address.startswith('\\\\'):
return 'AF_PIPE'
elif type(address) is str:
return 'AF_UNIX'
else:
raise ValueError('address type of %r unrecognized' % address)
#
# Connection classes
#
class _ConnectionBase:
_handle = None
def __init__(self, handle, readable=True, writable=True):
handle = handle.__index__()
if handle < 0:
raise ValueError("invalid handle")
if not readable and not writable:
raise ValueError(
"at least one of `readable` and `writable` must be True")
self._handle = handle
self._readable = readable
self._writable = writable
# XXX should we use util.Finalize instead of a __del__?
def __del__(self):
if self._handle is not None:
self._close()
def _check_closed(self):
if self._handle is None:
raise OSError("handle is closed")
def _check_readable(self):
if not self._readable:
raise OSError("connection is write-only")
def _check_writable(self):
if not self._writable:
raise OSError("connection is read-only")
def _bad_message_length(self):
if self._writable:
self._readable = False
else:
self.close()
raise OSError("bad message length")
@property
def closed(self):
"""True if the connection is closed"""
return self._handle is None
@property
def readable(self):
"""True if the connection is readable"""
return self._readable
@property
def writable(self):
"""True if the connection is writable"""
return self._writable
def fileno(self):
"""File descriptor or handle of the connection"""
self._check_closed()
return self._handle
def close(self):
"""Close the connection"""
if self._handle is not None:
try:
self._close()
finally:
self._handle = None
def send_bytes(self, buf, offset=0, size=None):
"""Send the bytes data from a bytes-like object"""
self._check_closed()
self._check_writable()
m = memoryview(buf)
# HACK for byte-indexing of non-bytewise buffers (e.g. array.array)
if m.itemsize > 1:
m = memoryview(bytes(m))
n = len(m)
if offset < 0:
raise ValueError("offset is negative")
if n < offset:
raise ValueError("buffer length < offset")
if size is None:
size = n - offset
elif size < 0:
raise ValueError("size is negative")
elif offset + size > n:
raise ValueError("buffer length < offset + size")
self._send_bytes(m[offset:offset + size])
def send(self, obj):
"""Send a (picklable) object"""
self._check_closed()
self._check_writable()
self._send_bytes(ForkingPickler.dumps(obj))
def recv_bytes(self, maxlength=None):
"""
Receive bytes data as a bytes object.
"""
self._check_closed()
self._check_readable()
if maxlength is not None and maxlength < 0:
raise ValueError("negative maxlength")
buf = self._recv_bytes(maxlength)
if buf is None:
self._bad_message_length()
return buf.getvalue()
def recv_bytes_into(self, buf, offset=0):
"""
Receive bytes data into a writeable bytes-like object.
Return the number of bytes read.
"""
self._check_closed()
self._check_readable()
with memoryview(buf) as m:
# Get bytesize of arbitrary buffer
itemsize = m.itemsize
bytesize = itemsize * len(m)
if offset < 0:
raise ValueError("negative offset")
elif offset > bytesize:
raise ValueError("offset too large")
result = self._recv_bytes()
size = result.tell()
if bytesize < offset + size:
raise BufferTooShort(result.getvalue())
# Message can fit in dest
result.seek(0)
result.readinto(m[offset // itemsize :
(offset + size) // itemsize])
return size
def recv(self):
"""Receive a (picklable) object"""
self._check_closed()
self._check_readable()
buf = self._recv_bytes()
return ForkingPickler.loads(buf.getbuffer())
def poll(self, timeout=0.0):
"""Whether there is any input available to be read"""
self._check_closed()
self._check_readable()
return self._poll(timeout)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
if _winapi:
class PipeConnection(_ConnectionBase):
"""
Connection class based on a Windows named pipe.
Overlapped I/O is used, so the handles must have been created
with FILE_FLAG_OVERLAPPED.
"""
_got_empty_message = False
def _close(self, _CloseHandle=_winapi.CloseHandle):
_CloseHandle(self._handle)
def _send_bytes(self, buf):
ov, err = _winapi.WriteFile(self._handle, buf, overlapped=True)
try:
if err == _winapi.ERROR_IO_PENDING:
waitres = _winapi.WaitForMultipleObjects(
[ov.event], False, INFINITE)
assert waitres == WAIT_OBJECT_0
except:
ov.cancel()
raise
finally:
nwritten, err = ov.GetOverlappedResult(True)
assert err == 0
assert nwritten == len(buf)
def _recv_bytes(self, maxsize=None):
if self._got_empty_message:
self._got_empty_message = False
return io.BytesIO()
else:
bsize = 128 if maxsize is None else min(maxsize, 128)
try:
ov, err = _winapi.ReadFile(self._handle, bsize,
overlapped=True)
try:
if err == _winapi.ERROR_IO_PENDING:
waitres = _winapi.WaitForMultipleObjects(
[ov.event], False, INFINITE)
assert waitres == WAIT_OBJECT_0
except:
ov.cancel()
raise
finally:
nread, err = ov.GetOverlappedResult(True)
if err == 0:
f = io.BytesIO()
f.write(ov.getbuffer())
return f
elif err == _winapi.ERROR_MORE_DATA:
return self._get_more_data(ov, maxsize)
except OSError as e:
if e.winerror == _winapi.ERROR_BROKEN_PIPE:
raise EOFError
else:
raise
raise RuntimeError("shouldn't get here; expected KeyboardInterrupt")
def _poll(self, timeout):
if (self._got_empty_message or
_winapi.PeekNamedPipe(self._handle)[0] != 0):
return True
return bool(wait([self], timeout))
def _get_more_data(self, ov, maxsize):
buf = ov.getbuffer()
f = io.BytesIO()
f.write(buf)
left = _winapi.PeekNamedPipe(self._handle)[1]
assert left > 0
if maxsize is not None and len(buf) + left > maxsize:
self._bad_message_length()
ov, err = _winapi.ReadFile(self._handle, left, overlapped=True)
rbytes, err = ov.GetOverlappedResult(True)
assert err == 0
assert rbytes == left
f.write(ov.getbuffer())
return f
class Connection(_ConnectionBase):
"""
Connection class based on an arbitrary file descriptor (Unix only), or
a socket handle (Windows).
"""
if _winapi:
def _close(self, _close=_multiprocessing.closesocket):
_close(self._handle)
_write = _multiprocessing.send
_read = _multiprocessing.recv
else:
def _close(self, _close=os.close):
_close(self._handle)
_write = os.write
_read = os.read
def _send(self, buf, write=_write):
remaining = len(buf)
while True:
try:
n = write(self._handle, buf)
except InterruptedError:
continue
remaining -= n
if remaining == 0:
break
buf = buf[n:]
def _recv(self, size, read=_read):
buf = io.BytesIO()
handle = self._handle
remaining = size
while remaining > 0:
try:
chunk = read(handle, remaining)
except InterruptedError:
continue
n = len(chunk)
if n == 0:
if remaining == size:
raise EOFError
else:
raise OSError("got end of file during message")
buf.write(chunk)
remaining -= n
return buf
def _send_bytes(self, buf):
n = len(buf)
# For wire compatibility with 3.2 and lower
header = struct.pack("!i", n)
if n > 16384:
# The payload is large so Nagle's algorithm won't be triggered
# and we'd better avoid the cost of concatenation.
chunks = [header, buf]
elif n > 0:
# Issue # 20540: concatenate before sending, to avoid delays due
# to Nagle's algorithm on a TCP socket.
chunks = [header + buf]
else:
# This code path is necessary to avoid "broken pipe" errors
# when sending a 0-length buffer if the other end closed the pipe.
chunks = [header]
for chunk in chunks:
self._send(chunk)
def _recv_bytes(self, maxsize=None):
buf = self._recv(4)
size, = struct.unpack("!i", buf.getvalue())
if maxsize is not None and size > maxsize:
return None
return self._recv(size)
def _poll(self, timeout):
r = wait([self], timeout)
return bool(r)
#
# Public functions
#
class Listener(object):
'''
Returns a listener object.
This is a wrapper for a bound socket which is 'listening' for
connections, or for a Windows named pipe.
'''
def __init__(self, address=None, family=None, backlog=1, authkey=None):
family = family or (address and address_type(address)) \
or default_family
address = address or arbitrary_address(family)
_validate_family(family)
if family == 'AF_PIPE':
self._listener = PipeListener(address, backlog)
else:
self._listener = SocketListener(address, family, backlog)
if authkey is not None and not isinstance(authkey, bytes):
raise TypeError('authkey should be a byte string')
self._authkey = authkey
def accept(self):
'''
Accept a connection on the bound socket or named pipe of `self`.
Returns a `Connection` object.
'''
if self._listener is None:
raise OSError('listener is closed')
c = self._listener.accept()
if self._authkey:
deliver_challenge(c, self._authkey)
answer_challenge(c, self._authkey)
return c
def close(self):
'''
Close the bound socket or named pipe of `self`.
'''
if self._listener is not None:
self._listener.close()
self._listener = None
address = property(lambda self: self._listener._address)
last_accepted = property(lambda self: self._listener._last_accepted)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
def Client(address, family=None, authkey=None):
'''
Returns a connection to the address of a `Listener`
'''
family = family or address_type(address)
_validate_family(family)
if family == 'AF_PIPE':
c = PipeClient(address)
else:
c = SocketClient(address)
if authkey is not None and not isinstance(authkey, bytes):
raise TypeError('authkey should be a byte string')
if authkey is not None:
answer_challenge(c, authkey)
deliver_challenge(c, authkey)
return c
if sys.platform != 'win32':
def Pipe(duplex=True):
'''
Returns pair of connection objects at either end of a pipe
'''
if duplex:
s1, s2 = socket.socketpair()
s1.setblocking(True)
s2.setblocking(True)
c1 = Connection(s1.detach())
c2 = Connection(s2.detach())
else:
fd1, fd2 = os.pipe()
c1 = Connection(fd1, writable=False)
c2 = Connection(fd2, readable=False)
return c1, c2
else:
def Pipe(duplex=True):
'''
Returns pair of connection objects at either end of a pipe
'''
address = arbitrary_address('AF_PIPE')
if duplex:
openmode = _winapi.PIPE_ACCESS_DUPLEX
access = _winapi.GENERIC_READ | _winapi.GENERIC_WRITE
obsize, ibsize = BUFSIZE, BUFSIZE
else:
openmode = _winapi.PIPE_ACCESS_INBOUND
access = _winapi.GENERIC_WRITE
obsize, ibsize = 0, BUFSIZE
h1 = _winapi.CreateNamedPipe(
address, openmode | _winapi.FILE_FLAG_OVERLAPPED |
_winapi.FILE_FLAG_FIRST_PIPE_INSTANCE,
_winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE |
_winapi.PIPE_WAIT,
1, obsize, ibsize, _winapi.NMPWAIT_WAIT_FOREVER,
# default security descriptor: the handle cannot be inherited
_winapi.NULL
)
h2 = _winapi.CreateFile(
address, access, 0, _winapi.NULL, _winapi.OPEN_EXISTING,
_winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL
)
_winapi.SetNamedPipeHandleState(
h2, _winapi.PIPE_READMODE_MESSAGE, None, None
)
overlapped = _winapi.ConnectNamedPipe(h1, overlapped=True)
_, err = overlapped.GetOverlappedResult(True)
assert err == 0
c1 = PipeConnection(h1, writable=duplex)
c2 = PipeConnection(h2, readable=duplex)
return c1, c2
#
# Definitions for connections based on sockets
#
class SocketListener(object):
'''
Representation of a socket which is bound to an address and listening
'''
def __init__(self, address, family, backlog=1):
self._socket = socket.socket(getattr(socket, family))
try:
# SO_REUSEADDR has different semantics on Windows (issue #2550).
if os.name == 'posix':
self._socket.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
self._socket.setblocking(True)
self._socket.bind(address)
self._socket.listen(backlog)
self._address = self._socket.getsockname()
except OSError:
self._socket.close()
raise
self._family = family
self._last_accepted = None
if family == 'AF_UNIX':
self._unlink = util.Finalize(
self, os.unlink, args=(address,), exitpriority=0
)
else:
self._unlink = None
def accept(self):
while True:
try:
s, self._last_accepted = self._socket.accept()
except InterruptedError:
pass
else:
break
s.setblocking(True)
return Connection(s.detach())
def close(self):
self._socket.close()
if self._unlink is not None:
self._unlink()
def SocketClient(address):
'''
Return a connection object connected to the socket given by `address`
'''
family = address_type(address)
with socket.socket( getattr(socket, family) ) as s:
s.setblocking(True)
s.connect(address)
return Connection(s.detach())
#
# Definitions for connections based on named pipes
#
if sys.platform == 'win32':
class PipeListener(object):
'''
Representation of a named pipe
'''
def __init__(self, address, backlog=None):
self._address = address
self._handle_queue = [self._new_handle(first=True)]
self._last_accepted = None
util.sub_debug('listener created with address=%r', self._address)
self.close = util.Finalize(
self, PipeListener._finalize_pipe_listener,
args=(self._handle_queue, self._address), exitpriority=0
)
def _new_handle(self, first=False):
flags = _winapi.PIPE_ACCESS_DUPLEX | _winapi.FILE_FLAG_OVERLAPPED
if first:
flags |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE
return _winapi.CreateNamedPipe(
self._address, flags,
_winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE |
_winapi.PIPE_WAIT,
_winapi.PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE,
_winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL
)
def accept(self):
self._handle_queue.append(self._new_handle())
handle = self._handle_queue.pop(0)
try:
ov = _winapi.ConnectNamedPipe(handle, overlapped=True)
except OSError as e:
if e.winerror != _winapi.ERROR_NO_DATA:
raise
# ERROR_NO_DATA can occur if a client has already connected,
# written data and then disconnected -- see Issue 14725.
else:
try:
res = _winapi.WaitForMultipleObjects(
[ov.event], False, INFINITE)
except:
ov.cancel()
_winapi.CloseHandle(handle)
raise
finally:
_, err = ov.GetOverlappedResult(True)
assert err == 0
return PipeConnection(handle)
@staticmethod
def _finalize_pipe_listener(queue, address):
util.sub_debug('closing listener with address=%r', address)
for handle in queue:
_winapi.CloseHandle(handle)
def PipeClient(address):
'''
Return a connection object connected to the pipe given by `address`
'''
t = _init_timeout()
while 1:
try:
_winapi.WaitNamedPipe(address, 1000)
h = _winapi.CreateFile(
address, _winapi.GENERIC_READ | _winapi.GENERIC_WRITE,
0, _winapi.NULL, _winapi.OPEN_EXISTING,
_winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL
)
except OSError as e:
if e.winerror not in (_winapi.ERROR_SEM_TIMEOUT,
_winapi.ERROR_PIPE_BUSY) or _check_timeout(t):
raise
else:
break
else:
raise
_winapi.SetNamedPipeHandleState(
h, _winapi.PIPE_READMODE_MESSAGE, None, None
)
return PipeConnection(h)
#
# Authentication stuff
#
MESSAGE_LENGTH = 20
CHALLENGE = b'#CHALLENGE#'
WELCOME = b'#WELCOME#'
FAILURE = b'#FAILURE#'
def deliver_challenge(connection, authkey):
import hmac
assert isinstance(authkey, bytes)
message = os.urandom(MESSAGE_LENGTH)
connection.send_bytes(CHALLENGE + message)
digest = hmac.new(authkey, message, 'md5').digest()
response = connection.recv_bytes(256) # reject large message
if response == digest:
connection.send_bytes(WELCOME)
else:
connection.send_bytes(FAILURE)
raise AuthenticationError('digest received was wrong')
def answer_challenge(connection, authkey):
import hmac
assert isinstance(authkey, bytes)
message = connection.recv_bytes(256) # reject large message
assert message[:len(CHALLENGE)] == CHALLENGE, 'message = %r' % message
message = message[len(CHALLENGE):]
digest = hmac.new(authkey, message, 'md5').digest()
connection.send_bytes(digest)
response = connection.recv_bytes(256) # reject large message
if response != WELCOME:
raise AuthenticationError('digest sent was rejected')
#
# Support for using xmlrpclib for serialization
#
class ConnectionWrapper(object):
def __init__(self, conn, dumps, loads):
self._conn = conn
self._dumps = dumps
self._loads = loads
for attr in ('fileno', 'close', 'poll', 'recv_bytes', 'send_bytes'):
obj = getattr(conn, attr)
setattr(self, attr, obj)
def send(self, obj):
s = self._dumps(obj)
self._conn.send_bytes(s)
def recv(self):
s = self._conn.recv_bytes()
return self._loads(s)
def _xml_dumps(obj):
return xmlrpclib.dumps((obj,), None, None, None, 1).encode('utf-8')
def _xml_loads(s):
(obj,), method = xmlrpclib.loads(s.decode('utf-8'))
return obj
class XmlListener(Listener):
def accept(self):
global xmlrpclib
import xmlrpc.client as xmlrpclib
obj = Listener.accept(self)
return ConnectionWrapper(obj, _xml_dumps, _xml_loads)
def XmlClient(*args, **kwds):
global xmlrpclib
import xmlrpc.client as xmlrpclib
return ConnectionWrapper(Client(*args, **kwds), _xml_dumps, _xml_loads)
#
# Wait
#
if sys.platform == 'win32':
def _exhaustive_wait(handles, timeout):
# Return ALL handles which are currently signalled. (Only
# returning the first signalled might create starvation issues.)
L = list(handles)
ready = []
while L:
res = _winapi.WaitForMultipleObjects(L, False, timeout)
if res == WAIT_TIMEOUT:
break
elif WAIT_OBJECT_0 <= res < WAIT_OBJECT_0 + len(L):
res -= WAIT_OBJECT_0
elif WAIT_ABANDONED_0 <= res < WAIT_ABANDONED_0 + len(L):
res -= WAIT_ABANDONED_0
else:
raise RuntimeError('Should not get here')
ready.append(L[res])
L = L[res+1:]
timeout = 0
return ready
_ready_errors = {_winapi.ERROR_BROKEN_PIPE, _winapi.ERROR_NETNAME_DELETED}
def wait(object_list, timeout=None):
'''
Wait till an object in object_list is ready/readable.
Returns list of those objects in object_list which are ready/readable.
'''
if timeout is None:
timeout = INFINITE
elif timeout < 0:
timeout = 0
else:
timeout = int(timeout * 1000 + 0.5)
object_list = list(object_list)
waithandle_to_obj = {}
ov_list = []
ready_objects = set()
ready_handles = set()
try:
for o in object_list:
try:
fileno = getattr(o, 'fileno')
except AttributeError:
waithandle_to_obj[o.__index__()] = o
else:
# start an overlapped read of length zero
try:
ov, err = _winapi.ReadFile(fileno(), 0, True)
except OSError as e:
err = e.winerror
if err not in _ready_errors:
raise
if err == _winapi.ERROR_IO_PENDING:
ov_list.append(ov)
waithandle_to_obj[ov.event] = o
else:
# If o.fileno() is an overlapped pipe handle and
# err == 0 then there is a zero length message
# in the pipe, but it HAS NOT been consumed.
ready_objects.add(o)
timeout = 0
ready_handles = _exhaustive_wait(waithandle_to_obj.keys(), timeout)
finally:
# request that overlapped reads stop
for ov in ov_list:
ov.cancel()
# wait for all overlapped reads to stop
for ov in ov_list:
try:
_, err = ov.GetOverlappedResult(True)
except OSError as e:
err = e.winerror
if err not in _ready_errors:
raise
if err != _winapi.ERROR_OPERATION_ABORTED:
o = waithandle_to_obj[ov.event]
ready_objects.add(o)
if err == 0:
# If o.fileno() is an overlapped pipe handle then
# a zero length message HAS been consumed.
if hasattr(o, '_got_empty_message'):
o._got_empty_message = True
ready_objects.update(waithandle_to_obj[h] for h in ready_handles)
return [o for o in object_list if o in ready_objects]
else:
import selectors
# poll/select have the advantage of not requiring any extra file
# descriptor, contrarily to epoll/kqueue (also, they require a single
# syscall).
if hasattr(selectors, 'PollSelector'):
_WaitSelector = selectors.PollSelector
else:
_WaitSelector = selectors.SelectSelector
def wait(object_list, timeout=None):
'''
Wait till an object in object_list is ready/readable.
Returns list of those objects in object_list which are ready/readable.
'''
with _WaitSelector() as selector:
for obj in object_list:
selector.register(obj, selectors.EVENT_READ)
if timeout is not None:
deadline = time.time() + timeout
while True:
ready = selector.select(timeout)
if ready:
return [key.fileobj for (key, events) in ready]
else:
if timeout is not None:
timeout = deadline - time.time()
if timeout < 0:
return ready
#
# Make connection and socket objects sharable if possible
#
if sys.platform == 'win32':
def reduce_connection(conn):
handle = conn.fileno()
with socket.fromfd(handle, socket.AF_INET, socket.SOCK_STREAM) as s:
from . import resource_sharer
ds = resource_sharer.DupSocket(s)
return rebuild_connection, (ds, conn.readable, conn.writable)
def rebuild_connection(ds, readable, writable):
sock = ds.detach()
return Connection(sock.detach(), readable, writable)
reduction.register(Connection, reduce_connection)
def reduce_pipe_connection(conn):
access = ((_winapi.FILE_GENERIC_READ if conn.readable else 0) |
(_winapi.FILE_GENERIC_WRITE if conn.writable else 0))
dh = reduction.DupHandle(conn.fileno(), access)
return rebuild_pipe_connection, (dh, conn.readable, conn.writable)
def rebuild_pipe_connection(dh, readable, writable):
handle = dh.detach()
return PipeConnection(handle, readable, writable)
reduction.register(PipeConnection, reduce_pipe_connection)
else:
def reduce_connection(conn):
df = reduction.DupFd(conn.fileno())
return rebuild_connection, (df, conn.readable, conn.writable)
def rebuild_connection(df, readable, writable):
fd = df.detach()
return Connection(fd, readable, writable)
reduction.register(Connection, reduce_connection)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| mit | 484,566,150,979,678,700 | 31.022664 | 84 | 0.548524 | false |
supermarkion/Life-Backup | Python/CanIWin.py | 1 | 2531 | '''
In the "100 game," two players take turns adding, to a running total, any integer from 1..10. The player who first
causes the running total to reach or exceed 100 wins.
What if we change the game so that players cannot re-use integers?
For example, two players might take turns drawing from a common pool of numbers of 1..15 without replacement until
they reach a total >= 100.
Given an integer maxChoosableInteger and another integer desiredTotal, determine if the first player to move can
force a win, assuming both players play optimally.
You can always assume that maxChoosableInteger will not be larger than 20 and desiredTotal will not be larger than
300.
Link: https://leetcode.com/problems/can-i-win/#/description
Example:
Input:
maxChoosableInteger = 10
desiredTotal = 11
Output:
false
Explanation:
No matter which integer the first player choose, the first player will lose.
The first player can choose an integer from 1 up to 10.
If the first player choose 1, the second player can only choose integers from 2 up to 10.
The second player will win by choosing 10 and get a total = 11, which is >= desiredTotal.
Same with other integers chosen by the first player, the second player will always win.
Solution: None
Source: None
'''
class Solution(object):
def canIWin(self, maxChoosableInteger, desiredTotal):
"""
:type maxChoosableInteger: int
:type desiredTotal: int
:rtype: bool
"""
# generate a list to save all possible action/movement
choosable = tuple(range(1, maxChoosableInteger + 1))
if sum(choosable) < desiredTotal:
return False
self.cache = {}
return self.dfs(choosable, desiredTotal)
def dfs(self, choosable, total):
# as explanation said, if the last value is large enough, just choose it, and you will wine
if choosable[-1] >= total:
return True
key = choosable
if key in self.cache:
return self.cache[key] # cache the previous solution to save time
for i in range(len(choosable)):
# ignore the i index. when i cannot be a solution
if not self.dfs(choosable[:i] + choosable[i + 1:], total - choosable[i]):
self.cache[key] = True
return True
self.cache[key] = False
return False | mit | 4,426,303,549,233,985,000 | 35.695652 | 118 | 0.641644 | false |
devilry/devilry-django | devilry/devilry_account/migrations/0003_datamigrate-admins-into-permissiongroups.py | 1 | 2619 | # -*- coding: utf-8 -*-
from django.db import models, migrations
def datamigrate_admins_into_permissiongroups(apps, schema_editor):
permissiongroup_model = apps.get_model('devilry_account', 'PermissionGroup')
permissiongroupuser_model = apps.get_model('devilry_account', 'PermissionGroupUser')
subject_model = apps.get_model('core', 'Subject')
subjectpermissiongroup_model = apps.get_model('devilry_account', 'SubjectPermissionGroup')
period_model = apps.get_model('core', 'Period')
periodpermissiongroup_model = apps.get_model('devilry_account', 'PeriodPermissionGroup')
permissiongroupusers = []
subjectpermissiongroups = []
periodpermissiongroups = []
for subject in subject_model.objects.prefetch_related('admins'):
permissiongroup = permissiongroup_model(
name=subject.short_name,
is_custom_manageable=True,
grouptype='subjectadmin')
permissiongroup.save()
subjectpermissiongroups.append(
subjectpermissiongroup_model(
subject=subject,
permissiongroup=permissiongroup))
for adminuser in subject.admins.all():
permissiongroupusers.append(
permissiongroupuser_model(
permissiongroup=permissiongroup,
user=adminuser))
for period in period_model.objects\
.select_related('parentnode')\
.prefetch_related('admins'):
permissiongroup = permissiongroup_model(
name='{}.{}'.format(period.parentnode.short_name,
period.short_name),
is_custom_manageable=True,
grouptype='periodadmin')
permissiongroup.save()
periodpermissiongroups.append(
periodpermissiongroup_model(
period=period,
permissiongroup=permissiongroup))
for adminuser in period.admins.all():
permissiongroupusers.append(
permissiongroupuser_model(
permissiongroup=permissiongroup,
user=adminuser))
permissiongroupuser_model.objects.bulk_create(permissiongroupusers)
subjectpermissiongroup_model.objects.bulk_create(subjectpermissiongroups)
periodpermissiongroup_model.objects.bulk_create(periodpermissiongroups)
class Migration(migrations.Migration):
dependencies = [
('devilry_account', '0002_auto_20150917_1731'),
('core', '0003_auto_20150917_1537'),
]
operations = [
migrations.RunPython(datamigrate_admins_into_permissiongroups)
]
| bsd-3-clause | 4,552,534,692,730,217,500 | 36.414286 | 94 | 0.649866 | false |
tejal29/pants | src/python/pants/backend/core/wrapped_globs.py | 1 | 3765 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from six import string_types
from twitter.common.dirutil.fileset import Fileset
from pants.base.build_environment import get_buildroot
class FilesetRelPathWrapper(object):
def __init__(self, parse_context):
self.rel_path = parse_context.rel_path
def __call__(self, *args, **kwargs):
root = os.path.join(get_buildroot(), self.rel_path)
excludes = kwargs.pop('exclude', [])
if isinstance(excludes, string_types):
raise ValueError("Expected exclude parameter to be a list of globs, lists, or strings")
for i, exclude in enumerate(excludes):
if isinstance(exclude, string_types):
# You can't subtract raw strings from globs
excludes[i] = [exclude]
for glob in args:
if(self._is_glob_dir_outside_root(glob, root)):
raise ValueError('Invalid glob %s, points outside BUILD file root dir %s' % (glob, root))
result = self.wrapped_fn(root=root, *args, **kwargs)
for exclude in excludes:
result -= exclude
return result
def _is_glob_dir_outside_root(self, glob, root):
# The assumption is that a correct glob starts with the root,
# even after normalizing.
glob_path = os.path.normpath(os.path.join(root, glob))
# Check if the glob path has the correct root.
return os.path.commonprefix([root, glob_path]) != root
class Globs(FilesetRelPathWrapper):
"""Returns Fileset containing matching files in same directory as this BUILD file.
E.g., ``sources = globs('*java'),`` to get .java files in this directory.
:param exclude: a list of {,r,z}globs objects, strings, or lists of
strings to exclude. E.g. ``globs('*',exclude=[globs('*.java'),
'foo.py'])`` gives all files in this directory except ``.java``
files and ``foo.py``.
Deprecated:
You might see that old code uses "math" on the return value of
``globs()``. E.g., ``globs('*') - globs('*.java')`` gives all files
in this directory *except* ``.java`` files. Please use exclude
instead, since pants is moving to make BUILD files easier to parse,
and the new grammar will not support arithmetic.
:returns Fileset containing matching files in same directory as this BUILD file.
:rtype Fileset
"""
wrapped_fn = Fileset.globs
class RGlobs(FilesetRelPathWrapper):
"""Recursive ``globs``, returns Fileset matching files in this directory and its descendents.
E.g., ``bundle().add(rglobs('config/*')),`` to bundle up all files in
the config, config/foo, config/foo/bar directories.
:param exclude: a list of {,r,z}globs objects, strings, or lists of
strings to exclude. E.g. ``rglobs('config/*',exclude=[globs('config/*.java'),
'config/foo.py'])`` gives all files under config except ``.java`` files and ``config/foo.py``.
Deprecated:
You might see that old code uses "math" on the return value of ``rglobs()``. E.g.,
``rglobs('config/*') - rglobs('config/foo/*')`` gives all files under `config` *except*
those in ``config/foo``. Please use exclude instead, since pants is moving to
make BUILD files easier to parse, and the new grammar will not support arithmetic.
:returns Fileset matching files in this directory and its descendents.
:rtype Fileset
"""
wrapped_fn = Fileset.rglobs
class ZGlobs(FilesetRelPathWrapper):
"""Returns a Fileset that matches zsh-style globs, including ``**/`` for recursive globbing.
Uses ``BUILD`` file's directory as the "working directory".
"""
wrapped_fn = Fileset.zglobs
| apache-2.0 | -2,163,516,751,647,764,200 | 36.65 | 97 | 0.692696 | false |
UltronAI/Deep-Learning | CS231n/reference/CS231n-master/assignment2/cs231n/layer_utils.py | 1 | 2538 | from cs231n.layers import *
from cs231n.fast_layers import *
def affine_relu_forward(x, w, b):
"""
Convenience layer that perorms an affine transform followed by a ReLU
Inputs:
- x: Input to the affine layer
- w, b: Weights for the affine layer
Returns a tuple of:
- out: Output from the ReLU
- cache: Object to give to the backward pass
"""
a, fc_cache = affine_forward(x, w, b)
out, relu_cache = relu_forward(a)
cache = (fc_cache, relu_cache)
return out, cache
def affine_relu_backward(dout, cache):
"""
Backward pass for the affine-relu convenience layer
"""
fc_cache, relu_cache = cache
da = relu_backward(dout, relu_cache)
dx, dw, db = affine_backward(da, fc_cache)
return dx, dw, db
pass
def conv_relu_forward(x, w, b, conv_param):
"""
A convenience layer that performs a convolution followed by a ReLU.
Inputs:
- x: Input to the convolutional layer
- w, b, conv_param: Weights and parameters for the convolutional layer
Returns a tuple of:
- out: Output from the ReLU
- cache: Object to give to the backward pass
"""
a, conv_cache = conv_forward_fast(x, w, b, conv_param)
out, relu_cache = relu_forward(a)
cache = (conv_cache, relu_cache)
return out, cache
def conv_relu_backward(dout, cache):
"""
Backward pass for the conv-relu convenience layer.
"""
conv_cache, relu_cache = cache
da = relu_backward(dout, relu_cache)
dx, dw, db = conv_backward_fast(da, conv_cache)
return dx, dw, db
def conv_relu_pool_forward(x, w, b, conv_param, pool_param):
"""
Convenience layer that performs a convolution, a ReLU, and a pool.
Inputs:
- x: Input to the convolutional layer
- w, b, conv_param: Weights and parameters for the convolutional layer
- pool_param: Parameters for the pooling layer
Returns a tuple of:
- out: Output from the pooling layer
- cache: Object to give to the backward pass
"""
a, conv_cache = conv_forward_fast(x, w, b, conv_param)
s, relu_cache = relu_forward(a)
out, pool_cache = max_pool_forward_fast(s, pool_param)
cache = (conv_cache, relu_cache, pool_cache)
return out, cache
def conv_relu_pool_backward(dout, cache):
"""
Backward pass for the conv-relu-pool convenience layer
"""
conv_cache, relu_cache, pool_cache = cache
ds = max_pool_backward_fast(dout, pool_cache)
da = relu_backward(ds, relu_cache)
dx, dw, db = conv_backward_fast(da, conv_cache)
return dx, dw, db
| mit | 6,864,787,935,838,201,000 | 25.290323 | 72 | 0.657998 | false |
OpenAT/cu_eura | eura_config/__openerp__.py | 1 | 1681 | # -*- coding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
{
'name': 'eura_config',
'summary': """FCOM Customer eura Modifications""",
'description': """
FCOM Customer Modifications
===========================
Use this Addon as a Base for all Customer specific Modifications containing:
- Default Settings (User Defaults, Accounts, Taxes, Project Stages, ...)
- View Modifications
- CSS Modifications (sass in scss format)
- Translations (i18n, pot, po)
""",
'author': 'Michael Karrer ([email protected]), DataDialog',
'version': '1.0',
'website': 'https://www.datadialog.net',
'installable': False,
'depends': [
'base_config',
'website_sale',
'website_sale_delivery',
'website_sale_donate',
'website_sale_categories',
],
'data': [
'views/templates.xml',
],
}
| agpl-3.0 | -4,298,214,358,802,706,000 | 34.020833 | 78 | 0.595479 | false |
ifarup/colourlab | colourlab/space.py | 1 | 67700 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
space: Colour spaces, part of the colourlab package
Copyright (C) 2013-2016 Ivar Farup
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy as np
from . import misc
# =============================================================================
# Colour space classes
#
# Throughout the code, the name ndata is used for numerical data (numpy
# arrays), and data is used for objects of the type Points.
# =============================================================================
class Space(object):
"""
Base class for the colour space classes.
"""
# White points in XYZ
white_A = np.array([1.0985, 1., 0.35585])
white_B = np.array([.990720, 1., .852230])
white_C = np.array([.980740, 1., .82320])
white_D50 = np.array([.964220, 1., .825210])
white_D55 = np.array([.956820, 1., .921490])
white_D65 = np.array([.950470, 1., 1.088830])
white_D75 = np.array([.949720, 1., 1.226380])
white_E = np.array([1., 1., 1.])
white_F2 = np.array([.991860, 1., .673930])
white_F7 = np.array([.950410, 1., 1.087470])
white_F11 = np.array([1.009620, 1., .643500])
def empty_matrix(self, ndata):
"""
Return list of emtpy (zero) matrixes suitable for jacobians etc.
Parameters
----------
ndata : ndarray
List of colour data.
Returns
-------
empty_matrix : ndarray
List of empty matrices of dimensions corresponding to ndata.
"""
return np.zeros((np.shape(ndata)[0], 3, 3))
def jacobian_XYZ(self, data):
"""
Return the Jacobian to XYZ, dx^i/dXYZ^j.
The Jacobian is calculated at the given data points (of the
Points class) by inverting the inverse Jacobian.
Parameters
----------
data : Points
Colour data points for the jacobians to be computed.
Returns
-------
jacobian : ndarray
The list of Jacobians to XYZ.
"""
return np.linalg.inv(self.inv_jacobian_XYZ(data))
def inv_jacobian_XYZ(self, data):
"""
Return the inverse Jacobian to XYZ, dXYZ^i/dx^j.
The inverse Jacobian is calculated at the given data points
(of the Points class) by inverting the Jacobian.
Parameters
----------
data : Points
Colour data points for the jacobians to be computed.
Returns
-------
jacobian : ndarray
The list of Jacobians from XYZ.
"""
return np.linalg.inv(self.jacobian_XYZ(data))
def vectors_to_XYZ(self, points_data, vectors_ndata):
"""
Convert metric data to the XYZ colour space.
Parameters
----------
points_data : Points
The colour data points.
vectors_ndata : ndarray
Array of colour metric tensors in current colour space.
Returns
-------
xyz_vectors : ndarray
Array of colour vectors in XYZ.
"""
jacobian = self.inv_jacobian_XYZ(points_data)
return np.einsum('...ij,...j->...i', jacobian, vectors_ndata)
def vectors_from_XYZ(self, points_data, vectors_ndata):
"""
Convert metric data from the XYZ colour space.
Parameters
----------
points_data : Points
The colour data points.
vectors_ndata : ndarray
Array of colour metric tensors in XYZ.
Returns
-------
vectors : ndarray
Array of colour vectors in the current colour space.
"""
jacobian = self.jacobian_XYZ(points_data)
return np.einsum('...ij,...j->...i', jacobian, vectors_ndata)
def metrics_to_XYZ(self, points_data, metrics_ndata):
"""
Convert metric data to the XYZ colour space.
Parameters
----------
points_data : Points
The colour data points.
metrics_ndata : ndarray
Array of colour metric tensors in current colour space.
Returns
-------
xyz_metrics : ndarray
Array of colour metric tensors in XYZ.
"""
jacobian = self.jacobian_XYZ(points_data)
return np.einsum('...ij,...ik,...kl->...jl', jacobian,
metrics_ndata, jacobian)
def metrics_from_XYZ(self, points_data, metrics_ndata):
"""
Convert metric data from the XYZ colour space.
Parameters
----------
points_data : Points
The colour data points.
metrics_ndata : ndarray
Array of colour metric tensors in XYZ.
Returns
-------
metrics : ndarray
Array of colour metric tensors in the current colour space.
"""
jacobian = self.inv_jacobian_XYZ(points_data)
return np.einsum('...ij,...ik,...kl->...jl', jacobian,
metrics_ndata, jacobian)
class XYZ(Space):
"""
The XYZ colour space.
Assumes that the CIE 1931 XYZ colour matching functions are
used. The white point is D65. Serves a special role in the code in that
it serves as a common reference point.
"""
def to_XYZ(self, ndata):
"""
Convert from current colour space to XYZ.
Parameters
----------
ndata : ndarray
Colour data in the current colour space.
Returns
-------
xyz : ndarray
Colour data in the XYZ colour space.
"""
return ndata.copy() # identity transform
def from_XYZ(self, ndata):
"""
Convert from XYZ to current colour space.
Parameters
----------
ndata : ndarray
Colour data in the XYZ colour space.
Returns
-------
xyz : ndarray
Colour data in the current colour space.
"""
return ndata.copy() # identity transform
def jacobian_XYZ(self, data):
"""
Return the Jacobian to XYZ, dx^i/dXYZ^j.
The Jacobian is calculated at the given data points (of the
Points class).
Parameters
----------
data : Points
Colour data points for the jacobians to be computed.
Returns
-------
jacobian : ndarray
The list of Jacobians to XYZ.
"""
jac = self.empty_matrix(data.flattened_XYZ)
jac[:] = np.eye(3)
return jac
def inv_jacobian_XYZ(self, data):
"""
Return the inverse Jacobian to XYZ, dXYZ^i/dx^j.
The inverse Jacobian is calculated at the given data points
(of the Points class).
Parameters
----------
data : Points
Colour data points for the jacobians to be computed.
Returns
-------
jacobian : ndarray
The list of Jacobians from XYZ.
"""
ijac = self.empty_matrix(data.flattened_XYZ)
ijac[:] = np.eye(3)
return ijac
class Transform(Space):
"""
Base class for colour space transforms.
Real transforms (children) must implement to_base, from_base and either
jacobian_base or inv_jacobian_base.
"""
def __init__(self, base):
"""
Construct instance and set base space for transformation.
Parameters
----------
base : Space
The base for the colour space transform.
"""
self.base = base
def to_XYZ(self, ndata):
"""
Transform data to XYZ by using the transformation to the base.
Parameters
----------
ndata : ndarray
Colour data in the current colour space
Returns
-------
xyz : ndarray
Colour data in the XYZ colour space
"""
return self.base.to_XYZ(self.to_base(ndata))
def from_XYZ(self, ndata):
"""
Transform data from XYZ using the transformation to the base.
Parameters
----------
ndata : ndarray
Colour data in the XYZ colour space.
Returns
-------
xyz : ndarray
Colour data in the current colour space.
"""
return self.from_base(self.base.from_XYZ(ndata))
def jacobian_base(self, data):
"""
Return the Jacobian to base, dx^i/dbase^j.
The Jacobian is calculated at the given data points (of the
Points class) by inverting the inverse Jacobian.
Parameters
----------
data : Points
Colour data points for the jacobians to be computed.
Returns
-------
jacobian : ndarray
The list of Jacobians to the base colour space.
"""
return np.linalg.inv(self.inv_jacobian_base(data))
def inv_jacobian_base(self, data):
"""
Return the inverse Jacobian to base, dbase^i/dx^j.
The inverse Jacobian is calculated at the given data points
(of the Points class) by inverting the Jacobian.
Parameters
----------
data : Points
Colour data points for the jacobians to be computed.
Returns
-------
jacobian : ndarray
The list of Jacobians from the base colour space.
"""
return np.linalg.inv(self.jacobian_base(data))
def jacobian_XYZ(self, data):
"""
Return the Jacobian to XYZ, dx^i/dXYZ^j.
The Jacobian is calculated at the given data points (of the
Points class) using the jacobian to the base and the Jacobian
of the base space.
Parameters
----------
data : Points
Colour data points for the jacobians to be computed.
Returns
-------
jacobian : ndarray
The list of Jacobians to XYZ.
"""
dxdbase = self.jacobian_base(data)
dbasedXYZ = self.base.jacobian_XYZ(data)
return np.einsum('...ij,...jk->...ik', dxdbase, dbasedXYZ)
def inv_jacobian_XYZ(self, data):
"""
Return the inverse Jacobian to XYZ, dXYZ^i/dx^j.
The Jacobian is calculated at the given data points (of the
Points class) using the inverse jacobian to the base and the
inverse Jacobian of the base space.
Parameters
----------
data : Points
Colour data points for the jacobians to be computed.
Returns
-------
jacobian : ndarray
The list of Jacobians from XYZ.
"""
dXYZdbase = self.base.inv_jacobian_XYZ(data)
dbasedx = self.inv_jacobian_base(data)
return np.einsum('...ij,...jk->...ik', dXYZdbase, dbasedx)
class TransformxyY(Transform):
"""
The XYZ to xyY projective transform.
"""
def __init__(self, base):
"""
Construct instance.
Parameters
----------
base : Space
Base colour space.
"""
super(TransformxyY, self).__init__(base)
def to_base(self, ndata):
"""
Convert from xyY to XYZ.
Parameters
----------
ndata : ndarray
Colour data in the current colour space
Returns
-------
col : ndarray
Colour data in the base colour space
"""
xyz = np.zeros(np.shape(ndata))
xyz[:, 0] = ndata[:, 0]*ndata[:, 2] / ndata[:, 1]
xyz[:, 1] = ndata[:, 2]
xyz[:, 2] = (1 - ndata[:, 0] - ndata[:, 1]) * ndata[:, 2] / ndata[:, 1]
return xyz
def from_base(self, ndata):
"""
Convert from XYZ to xyY.
Parameters
----------
ndata : ndarray
Colour data in the base colour space.
Returns
-------
col : ndarray
Colour data in the current colour space.
"""
xyz = ndata
xyY = np.zeros(np.shape(xyz))
xyz_sum = np.sum(xyz, axis=1)
xyY[:, 0] = xyz[:, 0] / xyz_sum # x
xyY[:, 1] = xyz[:, 1] / xyz_sum # y
xyY[:, 2] = xyz[:, 1] # Y
return xyY
def jacobian_base(self, data):
"""
Return the Jacobian to XYZ, dxyY^i/dXYZ^j.
The Jacobian is calculated at the given data points (of the
Points class).
Parameters
----------
data : Points
Colour data points for the jacobians to be computed.
Returns
-------
jacobian : ndarray
The list of Jacobians to the base colour space.
"""
xyzdata = data.get_flattened(self.base)
jac = self.empty_matrix(xyzdata)
for i in range(np.shape(jac)[0]):
jac[i, 0, 0] = (xyzdata[i, 1] + xyzdata[i, 2]) / \
(xyzdata[i, 0] + xyzdata[i, 1] + xyzdata[i, 2]) ** 2
jac[i, 0, 1] = -xyzdata[i, 0] / \
(xyzdata[i, 0] + xyzdata[i, 1] + xyzdata[i, 2]) ** 2
jac[i, 0, 2] = -xyzdata[i, 0] / \
(xyzdata[i, 0] + xyzdata[i, 1] + xyzdata[i, 2]) ** 2
jac[i, 1, 0] = -xyzdata[i, 1] / \
(xyzdata[i, 0] + xyzdata[i, 1] + xyzdata[i, 2]) ** 2
jac[i, 1, 1] = (xyzdata[i, 0] + xyzdata[i, 2]) / \
(xyzdata[i, 0] + xyzdata[i, 1] + xyzdata[i, 2]) ** 2
jac[i, 1, 2] = -xyzdata[i, 1] / \
(xyzdata[i, 0] + xyzdata[i, 1] + xyzdata[i, 2]) ** 2
jac[i, 2, 1] = 1
return jac
def inv_jacobian_base(self, data):
"""
Return the Jacobian from XYZ, dXYZ^i/dxyY^j.
The Jacobian is calculated at the given data points (of the
Points class).
Parameters
----------
data : Points
Colour data points for the jacobians to be computed.
Returns
-------
jacobian : ndarray
The list of Jacobians to the base colour space.
"""
xyYdata = data.get_flattened(self)
jac = self.empty_matrix(xyYdata)
for i in range(np.shape(jac)[0]):
jac[i, 0, 0] = xyYdata[i, 2] / xyYdata[i, 1]
jac[i, 0, 1] = - xyYdata[i, 0] * xyYdata[i, 2] / xyYdata[i, 1] ** 2
jac[i, 0, 2] = xyYdata[i, 0] / xyYdata[i, 1]
jac[i, 1, 2] = 1
jac[i, 2, 0] = - xyYdata[i, 2] / xyYdata[i, 1]
jac[i, 2, 1] = xyYdata[i, 2] * (xyYdata[i, 0] - 1) / \
xyYdata[i, 1] ** 2
jac[i, 2, 2] = (1 - xyYdata[i, 0] - xyYdata[i, 1]) / xyYdata[i, 1]
return jac
class TransformProjective(Transform):
"""
General projective transform of the XYZ to xyY type.
"""
def __init__(self, base, M=np.array([[1, 0, 0],
[0, 1, 0],
[1, 1, 1],
[0, 1, 0]])):
"""
Construct instance.
Parameters
----------
base : Space
Base colour space.
M : ndarray
4 x 3 array with the coefficients of the projective transforms,
such that
x = (M[0, 0] * X + M[0, 1] * Y + M[0, 2] * Z) / denom
y = (M[1, 0] * X + M[1, 1] * Y + M[1, 2] * Z) / denom
denom = M[2, 0] * X + M[2, 1] * Y + M[2, 2] * Z
Y = M[3, 0] * X + M[3, 1] * Y + M[3, 2] * Z
"""
self.M = M
super(TransformProjective, self).__init__(base)
def to_base(self, ndata):
"""
Convert from xyY to XYZ.
Parameters
----------
ndata : ndarray
Colour data in the current colour space
Returns
-------
col : ndarray
Colour data in the base colour space
"""
xyz = np.zeros(np.shape(ndata))
x = ndata[:, 0]
y = ndata[:, 1]
Y = ndata[:, 2]
a = self.M[0, 0]
b = self.M[0, 1]
c = self.M[0, 2]
d = self.M[1, 0]
e = self.M[1, 1]
f = self.M[1, 2]
g = self.M[2, 0]
h = self.M[2, 1]
i = self.M[2, 2]
j = self.M[3, 0]
k = self.M[3, 1]
ell = self.M[3, 2]
for n in range(xyz.shape[0]):
A = np.array([[g * x[n] - a, h * x[n] - b, i * x[n] - c],
[g * y[n] - d, h * y[n] - e, i * y[n] - f],
[j, k, ell]])
B = np.array([0, 0, Y[n]])
XYZ = np.dot(np.linalg.inv(A), B)
xyz[n, 0] = XYZ[0]
xyz[n, 1] = XYZ[1]
xyz[n, 2] = XYZ[2]
return xyz
def from_base(self, ndata):
"""
Convert from XYZ to xyY.
Parameters
----------
ndata : ndarray
Colour data in the base colour space.
Returns
-------
col : ndarray
Colour data in the current colour space.
"""
X = ndata[:, 0]
Y = ndata[:, 1]
Z = ndata[:, 2]
denom = self.M[2, 0] * X + self.M[2, 1] * Y + self.M[2, 2] * Z
xyY = np.zeros(np.shape(ndata))
xyY[:, 0] = (self.M[0, 0] * X +
self.M[0, 1] * Y +
self.M[0, 2] * Z) / denom # x
xyY[:, 1] = (self.M[1, 0] * X +
self.M[1, 1] * Y +
self.M[1, 2] * Z) / denom # y
xyY[:, 2] = (self.M[3, 0] * X +
self.M[3, 1] * Y +
self.M[3, 2] * Z) # Y
return xyY
def jacobian_base(self, data):
"""
Return the Jacobian to XYZ, dxyY^i/dXYZ^j.
The Jacobian is calculated at the given data points (of the
Points class).
Parameters
----------
data : Points
Colour data points for the jacobians to be computed.
Returns
-------
jacobian : ndarray
The list of Jacobians to the base colour space.
"""
a = self.M[0, 0]
b = self.M[0, 1]
c = self.M[0, 2]
d = self.M[1, 0]
e = self.M[1, 1]
f = self.M[1, 2]
g = self.M[2, 0]
h = self.M[2, 1]
i = self.M[2, 2]
j = self.M[3, 0]
k = self.M[3, 1]
ell = self.M[3, 2]
xyzdata = data.get_flattened(self.base)
X = xyzdata[:, 0]
Y = xyzdata[:, 1]
Z = xyzdata[:, 2]
ABC = a * X + b * Y + c * Z
DEF = d * X + e * Y + f * Z
GHI = g * X + h * Y + i * Z
jac = self.empty_matrix(xyzdata)
jac[:, 0, 0] = (a * GHI - g * ABC) / GHI**2
jac[:, 0, 1] = (b * GHI - h * ABC) / GHI**2
jac[:, 0, 2] = (c * GHI - i * ABC) / GHI**2
jac[:, 1, 0] = (d * GHI - g * DEF) / GHI**2
jac[:, 1, 1] = (e * GHI - h * DEF) / GHI**2
jac[:, 1, 2] = (f * GHI - i * DEF) / GHI**2
jac[:, 2, 0] = j
jac[:, 2, 1] = k
jac[:, 2, 2] = ell
return jac
class TransformCIELAB(Transform):
"""
The XYZ to CIELAB colour space transform.
The white point is a parameter in the transform.
"""
kappa = 24389. / 27. # standard: 903.3
epsilon = 216. / 24389. # standard: 0.008856
def __init__(self, base, white_point=Space.white_D65):
"""
Construct instance by setting base space and white point.
Parameters
----------
base : Space
The base colour space.
white_point : ndarray or Points
The white point
"""
super(TransformCIELAB, self).__init__(base)
if not isinstance(white_point, np.ndarray):
self.white_point = white_point.get(xyz)
else:
self.white_point = white_point
def f(self, ndata):
"""
Auxiliary function for the conversion.
"""
fx = (self.kappa * ndata + 16.) / 116.
fx[ndata > self.epsilon] = ndata[ndata > self.epsilon] ** (1. / 3)
return fx
def dfdx(self, ndata):
"""
Auxiliary function for the Jacobian.
Returns the derivative of the function f above. Works for arrays.
"""
df = self.kappa / 116. * np.ones(np.shape(ndata))
df[ndata > self.epsilon] = \
(ndata[ndata > self.epsilon] ** (-2. / 3)) / 3
return df
def to_base(self, ndata):
"""
Convert from CIELAB to XYZ (base).
Parameters
----------
ndata : ndarray
Colour data in the current colour space
Returns
-------
col : ndarray
Colour data in the base colour space
"""
ndata
fy = (ndata[:, 0] + 16.) / 116.
fx = ndata[:, 1] / 500. + fy
fz = fy - ndata[:, 2] / 200.
xr = fx ** 3
xr[xr <= self.epsilon] = ((116 * fx[xr <= self.epsilon] - 16) /
self.kappa)
yr = fy ** 3
yr[ndata[:, 0] <= self.kappa * self.epsilon] = \
ndata[ndata[:, 0] <= self.kappa * self.epsilon, 0] / self.kappa
zr = fz ** 3
zr[zr <= self.epsilon] = ((116 * fz[zr <= self.epsilon] - 16) /
self.kappa)
xyz = np.zeros(np.shape(ndata))
xyz[:, 0] = xr * self.white_point[0]
xyz[:, 1] = yr * self.white_point[1]
xyz[:, 2] = zr * self.white_point[2]
return xyz
def from_base(self, ndata):
"""
Convert from XYZ (base) to CIELAB.
Parameters
----------
ndata : ndarray
Colour data in the base colour space.
Returns
-------
col : ndarray
Colour data in the current colour space.
"""
lab = np.zeros(np.shape(ndata))
fx = self.f(ndata[:, 0] / self.white_point[0])
fy = self.f(ndata[:, 1] / self.white_point[1])
fz = self.f(ndata[:, 2] / self.white_point[2])
lab[:, 0] = 116. * fy - 16.
lab[:, 1] = 500. * (fx - fy)
lab[:, 2] = 200. * (fy - fz)
return lab
def jacobian_base(self, data):
"""
Return the Jacobian to XYZ (base), dCIELAB^i/dXYZ^j.
The Jacobian is calculated at the given data points (of the
Points class).
Parameters
----------
data : Points
Colour data points for the jacobians to be computed.
Returns
-------
jacobian : ndarray
The list of Jacobians to the base colour space.
"""
d = data.get_flattened(self.base)
dr = d.copy()
for i in range(3):
dr[:, i] = dr[:, i] / self.white_point[i]
df = self.dfdx(dr)
jac = self.empty_matrix(d)
jac[:, 0, 1] = 116 * df[:, 1] / self.white_point[1] # dL/dY
jac[:, 1, 0] = 500 * df[:, 0] / self.white_point[0] # da/dX
jac[:, 1, 1] = -500 * df[:, 1] / self.white_point[1] # da/dY
jac[:, 2, 1] = 200 * df[:, 1] / self.white_point[1] # db/dY
jac[:, 2, 2] = -200 * df[:, 2] / self.white_point[2] # db/dZ
return jac
class TransformCIELUV(Transform):
"""
The XYZ to CIELUV colour space transform.
The white point is a parameter in the transform.
"""
kappa = 24389. / 27. # standard: 903.3
epsilon = 216. / 24389. # standard: 0.008856
def __init__(self, base, white_point=Space.white_D65):
"""
Construct instance by setting base space and white point.
Parameters
----------
base : Space
The base colour space.
white_point : ndarray or Points
The white point
"""
super(TransformCIELUV, self).__init__(base)
if not isinstance(white_point, np.ndarray):
self.white_point = white_point.get(xyz)
else:
self.white_point = white_point
def f(self, ndata):
"""
Auxiliary function for the conversion.
"""
fx = (self.kappa * ndata + 16.) / 116.
fx[ndata > self.epsilon] = ndata[ndata > self.epsilon] ** (1. / 3)
return fx
def dfdx(self, ndata):
"""
Auxiliary function for the Jacobian.
Returns the derivative of the function f above. Works for arrays.
"""
df = self.kappa / 116. * np.ones(np.shape(ndata))
df[ndata > self.epsilon] = \
(ndata[ndata > self.epsilon] ** (-2. / 3)) / 3
return df
def to_base(self, ndata):
"""
Convert from CIELUV to XYZ (base).
Parameters
----------
ndata : ndarray
Colour data in the current colour space
Returns
-------
col : ndarray
Colour data in the base colour space
"""
luv = ndata
fy = (luv[:, 0] + 16.) / 116.
y = fy ** 3
y[luv[:, 0] <= self.kappa * self.epsilon] = \
luv[luv[:, 0] <= self.kappa * self.epsilon, 0] / self.kappa
upr = 4 * self.white_point[0] / (self.white_point[0] +
15*self.white_point[1] +
3*self.white_point[2])
vpr = 9 * self.white_point[1] / (self.white_point[0] +
15*self.white_point[1] +
3*self.white_point[2])
a = (52*luv[:, 0] / (luv[:, 1] + 13*luv[:, 0]*upr) - 1) / 3
b = -5 * y
c = -1/3.
d = y * (39*luv[:, 0] / (luv[:, 2] + 13*luv[:, 0]*vpr) - 5)
x = (d - b) / (a - c)
z = x * a + b
# Combine into matrix
xyz = np.zeros(np.shape(luv))
xyz[:, 0] = x
xyz[:, 1] = y
xyz[:, 2] = z
return xyz
def from_base(self, ndata):
"""
Convert from XYZ (base) to CIELUV.
Parameters
----------
ndata : ndarray
Colour data in the base colour space.
Returns
-------
col : ndarray
Colour data in the current colour space.
"""
d = ndata
luv = np.zeros(np.shape(d))
fy = self.f(d[:, 1] / self.white_point[1])
up = 4 * d[:, 0] / (d[:, 0] + 15*d[:, 1] + 3*d[:, 2])
upr = 4 * self.white_point[0] / (self.white_point[0] +
15*self.white_point[1] +
3*self.white_point[2])
vp = 9 * d[:, 1] / (d[:, 0] + 15*d[:, 1] + 3*d[:, 2])
vpr = 9 * self.white_point[1] / (self.white_point[0] +
15*self.white_point[1] +
3*self.white_point[2])
luv[:, 0] = 116. * fy - 16.
luv[:, 1] = 13 * luv[:, 0] * (up - upr)
luv[:, 2] = 13 * luv[:, 0] * (vp - vpr)
return luv
def jacobian_base(self, data):
"""
Return the Jacobian to XYZ (base), dCIELUV^i/dXYZ^j.
The Jacobian is calculated at the given data points (of the
Points class).
Parameters
----------
data : Points
Colour data points for the jacobians to be computed.
Returns
-------
jacobian : ndarray
The list of Jacobians to the base colour space.
"""
xyz_ = data.get_flattened(xyz)
luv = data.get_flattened(cieluv)
df = self.dfdx(xyz_)
jac = self.empty_matrix(xyz_)
# dL/dY:
jac[:, 0, 1] = 116 * df[:, 1] / self.white_point[1]
# du/dX:
jac[:, 1, 0] = 13 * luv[:, 0] * \
(60 * xyz_[:, 1] + 12 * xyz_[:, 2]) / \
(xyz_[:, 0] + 15 * xyz_[:, 1] + 3 * xyz_[:, 2]) ** 2
# du/dY:
jac[:, 1, 1] = 13 * luv[:, 0] * \
-60 * xyz_[:, 0] / \
(xyz_[:, 0] + 15 * xyz_[:, 1] + 3 * xyz_[:, 2]) ** 2 + \
13 * jac[:, 0, 1] * (
4 * xyz_[:, 0] / (xyz_[:, 0] + 15 * xyz_[:, 1] +
3 * xyz_[:, 2]) -
4 * self.white_point[0] /
(self.white_point[0] + 15 * self.white_point[1] +
3 * self.white_point[2]))
# du/dZ:
jac[:, 1, 2] = 13 * luv[:, 0] * \
-12 * xyz_[:, 0] / \
(xyz_[:, 0] + 15 * xyz_[:, 1] + 3 * xyz_[:, 2]) ** 2
# dv/dX:
jac[:, 2, 0] = 13 * luv[:, 0] * \
-9 * xyz_[:, 1] / \
(xyz_[:, 0] + 15 * xyz_[:, 1] + 3 * xyz_[:, 2]) ** 2
# dv/dY:
jac[:, 2, 1] = 13 * luv[:, 0] * \
(9 * xyz_[:, 0] + 27 * xyz_[:, 2]) / \
(xyz_[:, 0] + 15 * xyz_[:, 1] + 3 * xyz_[:, 2]) ** 2 + \
13 * jac[:, 0, 1] * (
9 * xyz_[:, 1] / (xyz_[:, 0] + 15 * xyz_[:, 1] +
3 * xyz_[:, 2]) - 9 * self.white_point[1] /
(self.white_point[0] + 15 * self.white_point[1] +
3 * self.white_point[2]))
# dv/dZ:
jac[:, 2, 2] = 13 * luv[:, 0] * \
-27 * xyz_[:, 1] / \
(xyz_[:, 0] + 15 * xyz_[:, 1] + 3 * xyz_[:, 2]) ** 2
return jac
class TransformCIEDE00(Transform):
"""
The CIELAB to CIEDE00 L'a'b' colour space transform.
"""
def __init__(self, base):
"""
Construct instance by setting base space.
Parameters
----------
base : Space
The base colour space.
"""
super(TransformCIEDE00, self).__init__(base)
def to_base(self, ndata):
"""
Convert from CIEDE00 to CIELAB (base).
Parameters
----------
ndata : ndarray
Colour data in the current colour space
Returns
-------
col : ndarray
Colour data in the base colour space
"""
raise RuntimeError('No conversion of CIEDE00 Lab to CIELAB ' +
'implemented (yet).')
def from_base(self, ndata):
"""
Convert from CIELAB (base) to CIEDE00.
Parameters
----------
ndata : ndarray
Colour data in the base colour space.
Returns
-------
labp : ndarray
Colour data in the CIEDE00 L'a'b' colour space.
"""
lab = ndata
labp = lab.copy()
Cab = np.sqrt(lab[:, 1]**2 + lab[:, 2]**2)
G = .5 * (1 - np.sqrt(Cab**7 / (Cab**7 + 25**7)))
labp[:, 1] = lab[:, 1] * (1 + G)
return labp
def jacobian_base(self, data):
"""
Return the Jacobian to CIELAB (base), dCIEDE00^i/dCIELAB^j.
The Jacobian is calculated at the given data points (of the
Points class).
Parameters
----------
data : Points
Colour data points for the jacobians to be computed.
Returns
-------
jacobian : ndarray
The list of Jacobians to the base colour space.
"""
lab = data.get_flattened(cielab)
lch = data.get_flattened(cielch)
a = lab[:, 1]
b = lab[:, 2]
C = lch[:, 1]
G = .5 * (1 - np.sqrt(C**7 / (C**7 + 25**7)))
jac = self.empty_matrix(lab)
jac[:, 0, 0] = 1 # dLp/dL
jac[:, 2, 2] = 1 # dbp/db
# jac[:, 1, 1] = 1 + G - misc.safe_div(a**2, C) * \
# (7 * 25**7 * C**(5/2.) /
# (4 * (C**7 + 25**7)**(3/2.))) # dap/da
jac[:, 1, 1] = 1 + G - misc.safe_div(a**2, C) * \
(7 * 25**7 * C**(5/2.) /
(8 * (C**7 + 25**7)**(3/2.))) # dap/da
jac[C == 0, 1, 1] = 1
# jac[:, 1, 2] = - a * misc.safe_div(b, C) * \
# (7 * 25**7 * C**(5/2.) / (4 * (C**7 + 25**7)**(3/2.)))
jac[:, 1, 2] = - a * misc.safe_div(b, C) * \
(7 * 25**7 * C**(5/2.) / (8 * (C**7 + 25**7)**(3/2.)))
jac[C == 0, 1, 2] = 0
return jac
class TransformSRGB(Transform):
"""
Transform linear RGB with sRGB primaries to sRGB.
"""
def __init__(self, base):
"""
Construct sRGB space instance, setting the base (linear RGB).
Parameters
----------
base : Space
The base colour space.
"""
super(TransformSRGB, self).__init__(base)
def to_base(self, ndata):
"""
Convert from sRGB to linear RGB. Performs gamut clipping if necessary.
Parameters
----------
ndata : ndarray
Colour data in the sRGB colour space
Returns
-------
col : ndarray
Colour data in the linear RGB colour space
"""
nd = ndata.copy()
nd[nd < 0] = 0
nd[nd > 1] = 1
rgb = ((nd + 0.055) / 1.055)**2.4
rgb[nd <= 0.04045] = nd[nd <= 0.04045] / 12.92
return rgb
def jacobian_base(self, data):
"""
Return the Jacobian to linear RGB (base), dsRGB^i/dRGB^j.
The Jacobian is calculated at the given data points (of the
Points class).
Parameters
----------
data : Points
Colour data points for the jacobians to be computed.
Returns
-------
jacobian : ndarray
The list of Jacobians to the base colour space.
"""
rgb = data.get_flattened(self.base)
r = rgb[:, 0]
g = rgb[:, 1]
b = rgb[:, 2]
jac = self.empty_matrix(rgb)
jac[:, 0, 0] = 1.055 / 2.4 * r**(1 / 2.4 - 1)
jac[r < 0.0031308, 0, 0] = 12.92
jac[:, 1, 1] = 1.055 / 2.4 * g**(1 / 2.4 - 1)
jac[g < 0.0031308, 1, 1] = 12.92
jac[:, 2, 2] = 1.055 / 2.4 * b**(1 / 2.4 - 1)
jac[b < 0.0031308, 2, 2] = 12.92
return jac
def from_base(self, ndata):
"""
Convert from linear RGB to sRGB. Performs gamut clipping if necessary.
Parameters
----------
ndata : ndarray
Colour data in the linear colour space
Returns
-------
col : ndarray
Colour data in the sRGB colour space
"""
nd = ndata.copy()
nd[nd < 0] = 0
nd[nd > 1] = 1
srgb = 1.055 * nd**(1 / 2.4) - 0.055
srgb[nd <= 0.0031308] = 12.92 * nd[nd <= 0.0031308]
return srgb
class TransformLinear(Transform):
"""
General linear transform, transformed = M * base
"""
def __init__(self, base, M=np.eye(3)):
"""
Construct instance, setting the matrix of the linear transfrom.
Parameters
----------
base : Space
The base colour space.
"""
super(TransformLinear, self).__init__(base)
self.M = M.copy()
self.M_inv = np.linalg.inv(M)
def to_base(self, ndata):
"""
Convert from linear to the base.
Parameters
----------
ndata : ndarray
Colour data in the current colour space
Returns
-------
col : ndarray
Colour data in the base colour space
"""
xyz = np.zeros(np.shape(ndata))
for i in range(np.shape(ndata)[0]):
xyz[i] = np.dot(self.M_inv, ndata[i])
return xyz
def from_base(self, ndata):
"""
Convert from the base to linear.
Parameters
----------
ndata : ndarray
Colour data in the base colour space.
Returns
-------
col : ndarray
Colour data in the current colour space.
"""
xyz = ndata
lins = np.zeros(np.shape(xyz))
for i in range(np.shape(xyz)[0]):
lins[i] = np.dot(self.M, xyz[i])
return lins
def jacobian_base(self, data):
"""
Return the Jacobian to XYZ (base), dlinear^i/dXYZ^j.
The Jacobian is calculated at the given data points (of the
Points class).
Parameters
----------
data : Points
Colour data points for the jacobians to be computed.
Returns
-------
jacobian : ndarray
The list of Jacobians to the base colour space.
"""
xyzdata = data.get_flattened(xyz)
jac = self.empty_matrix(xyzdata)
jac[:] = self.M
return jac
def inv_jacobian_base(self, data):
"""
Return the Jacobian from XYZ (base), dXYZ^i/dlinear^j.
The Jacobian is calculated at the given data points (of the
Points class).
Parameters
----------
data : Points
Colour data points for the jacobians to be computed.
Returns
-------
jacobian : ndarray
The list of Jacobians to the base colour space.
"""
xyzdata = data.get_flattened(xyz)
jac = self.empty_matrix(xyzdata)
jac[:] = self.M_inv
return jac
class TransformGamma(Transform):
"""
General gamma transform, transformed = base**gamma
Uses absolute value and sign for negative base values:
transformed = sign(base) * abs(base)**gamma
"""
def __init__(self, base, gamma=1):
"""
Construct instance, setting the gamma of the transfrom.
Parameters
----------
base : Space
The base colour space.
gamma : float
The exponent for the gamma transformation from the base.
"""
super(TransformGamma, self).__init__(base)
self.gamma = float(gamma)
self.gamma_inv = 1. / gamma
def to_base(self, ndata):
"""
Convert from gamma corrected to XYZ (base).
Parameters
----------
ndata : ndarray
Colour data in the current colour space
Returns
-------
col : ndarray
Colour data in the base colour space
"""
return np.sign(ndata) * np.abs(ndata)**self.gamma_inv
def from_base(self, ndata):
"""
Convert from XYZ to gamma corrected.
Parameters
----------
ndata : ndarray
Colour data in the base colour space.
Returns
-------
col : ndarray
Colour data in the current colour space.
"""
return np.sign(ndata) * np.abs(ndata)**self.gamma
def jacobian_base(self, data):
"""
Return the Jacobian to XYZ (base), dgamma^i/dXYZ^j.
The Jacobian is calculated at the given data points (of the
Points class).
Parameters
----------
data : Points
Colour data points for the jacobians to be computed.
Returns
-------
jacobian : ndarray
The list of Jacobians to the base colour space.
"""
basedata = data.get_flattened(self.base)
jac = self.empty_matrix(basedata)
for i in range(np.shape(basedata)[0]):
jac[i, 0, 0] = self.gamma * \
np.abs(basedata[i, 0])**(self.gamma - 1)
jac[i, 1, 1] = self.gamma * \
np.abs(basedata[i, 1])**(self.gamma - 1)
jac[i, 2, 2] = self.gamma * \
np.abs(basedata[i, 2])**(self.gamma - 1)
return jac
class TransformPolar(Transform):
"""
Transform form Cartesian to polar coordinates in the two last variables.
For example CIELAB to CIELCH.
"""
def __init__(self, base):
"""
Construct instance, setting base space.
Parameters
----------
base : Space
The base colour space.
"""
super(TransformPolar, self).__init__(base)
def to_base(self, ndata):
"""
Convert from polar to Cartesian.
Parameters
----------
ndata : ndarray
Colour data in the current colour space
Returns
-------
col : ndarray
Colour data in the base colour space
"""
Lab = np.zeros(np.shape(ndata))
Lab[:, 0] = ndata[:, 0]
C = ndata[:, 1]
h = ndata[:, 2]
Lab[:, 1] = C * np.cos(h)
Lab[:, 2] = C * np.sin(h)
return Lab
def from_base(self, ndata):
"""
Convert from Cartesian (base) to polar.
Parameters
----------
ndata : ndarray
Colour data in the base colour space.
Returns
-------
col : ndarray
Colour data in the current colour space.
"""
LCh = np.zeros(np.shape(ndata))
LCh[:, 0] = ndata[:, 0]
x = ndata[:, 1]
y = ndata[:, 2]
LCh[:, 1] = np.sqrt(x**2 + y**2)
LCh[:, 2] = np.arctan2(y, x)
return LCh
def inv_jacobian_base(self, data):
"""
Return the Jacobian from CIELAB (base), dCIELAB^i/dCIELCH^j.
The Jacobian is calculated at the given data points (of the
Points class).
Parameters
----------
data : Points
Colour data points for the jacobians to be computed.
Returns
-------
jacobian : ndarray
The list of Jacobians to the base colour space.
"""
LCh = data.get_flattened(self)
C = LCh[:, 1]
h = LCh[:, 2]
jac = self.empty_matrix(LCh)
for i in range(np.shape(jac)[0]):
jac[i, 0, 0] = 1 # dL/dL
jac[i, 1, 1] = np.cos(h[i]) # da/dC
jac[i, 1, 2] = -C[i] * np.sin(h[i]) # da/dh
jac[i, 2, 1] = np.sin(h[i]) # db/dC
jac[i, 2, 2] = C[i] * np.cos(h[i]) # db/dh
if C[i] == 0:
jac[i, 2, 2] = 1
jac[i, 1, 1] = 1
return jac
class TransformCartesian(Transform):
"""
Transform form polar to Cartesian coordinates in the two last variables.
For example CIELCH to CIELAB.
"""
def __init__(self, base):
"""
Construct instance, setting base space.
Parameters
----------
base : Space
The base colour space.
"""
super(TransformCartesian, self).__init__(base)
def from_base(self, ndata):
"""
Convert from polar to Cartesian.
Parameters
----------
ndata : ndarray
Colour data in the base colour space.
Returns
-------
col : ndarray
Colour data in the current colour space.
"""
Lab = np.zeros(np.shape(ndata))
Lab[:, 0] = ndata[:, 0]
C = ndata[:, 1]
h = ndata[:, 2]
Lab[:, 1] = C * np.cos(h)
Lab[:, 2] = C * np.sin(h)
return Lab
def to_base(self, ndata):
"""
Convert from Cartesian (base) to polar.
Parameters
----------
ndata : ndarray
Colour data in the current colour space
Returns
-------
col : ndarray
Colour data in the base colour space
"""
LCh = np.zeros(np.shape(ndata))
LCh[:, 0] = ndata[:, 0]
x = ndata[:, 1]
y = ndata[:, 2]
LCh[:, 1] = np.sqrt(x**2 + y**2)
LCh[:, 2] = np.arctan2(y, x)
return LCh
def jacobian_base(self, data):
"""
Return the Jacobian from CIELCh (base), dCIELAB^i/dCIELCH^j.
The Jacobian is calculated at the given data points (of the
Points class).
Parameters
----------
data : Points
Colour data points for the jacobians to be computed.
Returns
-------
jacobian : ndarray
The list of Jacobians to the base colour space.
"""
LCh = data.get_flattened(self.base)
C = LCh[:, 1]
h = LCh[:, 2]
jac = self.empty_matrix(LCh)
for i in range(np.shape(jac)[0]):
jac[i, 0, 0] = 1 # dL/dL
jac[i, 1, 1] = np.cos(h[i]) # da/dC
jac[i, 1, 2] = -C[i] * np.sin(h[i]) # da/dh
jac[i, 2, 1] = np.sin(h[i]) # db/dC
jac[i, 2, 2] = C[i] * np.cos(h[i]) # db/dh
return jac
class TransformLGJOSA(Transform):
"""
Transform from XYZ type coordinates to L_osa G J.
"""
def __init__(self, base):
"""
Construct instance, setting base space.
Parameters
----------
base : Space
The base colour space.
"""
super(TransformLGJOSA, self).__init__(base)
self.space_ABC = TransformLinear(self.base,
np.array([[0.6597, 0.4492, -0.1089],
[-0.3053, 1.2126, 0.0927],
[-0.0374, 0.4795, 0.5579]]))
self.space_xyY = TransformxyY(self.base)
def err_func(self, xyz, lgj):
clgj = self.from_base(np.reshape(xyz, (1, 3)))
diff = clgj - np.reshape(lgj, (1, 3))
n = np.linalg.norm(diff)
return n
def to_base(self, ndata):
"""
Convert from LGJOSA to XYZ (base).
Implemented as numerical inversion of the from_base method,
since the functions unfortunately are not analytically
invertible.
Parameters
----------
ndata : ndarray
Colour data in the current colour space
Returns
-------
col : ndarray
Colour data in the base colour space
"""
import scipy.optimize
xyz = .5 * np.ones(np.shape(ndata))
for i in range(np.shape(xyz)[0]):
xyz_guess = xyz[i].copy()
lgj = ndata[i].copy()
xyz[i] = scipy.optimize.fmin(self.err_func, xyz_guess, (lgj,))
return xyz
def from_base(self, ndata):
"""
Transform from base to LGJ OSA.
Parameters
----------
ndata : ndarray
Colour data in the base colour space (XYZ).
Returns
-------
col : ndarray
Colour data in the LGJOSA colour space.
"""
abc = self.space_ABC.from_base(ndata)
A = abc[:, 0]
B = abc[:, 1]
C = abc[:, 2]
xyY = self.space_xyY.from_base(ndata)
x = xyY[:, 0]
y = xyY[:, 1]
Y = xyY[:, 2]
Y_0 = 100 * Y * (4.4934 * x**2 + 4.3034 * y**2 - 4.2760 * x * y -
1.3744 * x - 2.5643 * y + 1.8103)
L_osa = (5.9 * ((Y_0**(1/3.) - (2/3.)) +
0.0042 * np.sign(Y_0 - 30) *
np.abs(Y_0 - 30)**(1/3.)) - 14.4) / np.sqrt(2)
G = -2 * (0.764 * L_osa + 9.2521) * (
0.9482 * (np.log(A) - np.log(0.9366 * B)) -
0.3175 * (np.log(B) - np.log(0.9807 * C)))
J = 2 * (0.5735 * L_osa + 7.0892) * (
0.1792 * (np.log(A) - np.log(0.9366 * B)) +
0.9237 * (np.log(B) - np.log(0.9807 * C)))
col = np.zeros(np.shape(ndata))
col[:, 0] = L_osa
col[:, 1] = G
col[:, 2] = J
return col
def jacobian_base(self, data):
"""
Return the Jacobian from XYZ (base), dLGJOSA^i/dXYZ^j.
The Jacobian is calculated at the given data points (of the
Points class). Like the colour space, a terrible mess...
Parameters
----------
data : Points
Colour data points for the jacobians to be computed.
Returns
-------
jacobian : ndarray
The list of Jacobians to the base colour space.
"""
ABC = data.get_flattened(self.space_ABC)
xyY = data.get_flattened(self.space_xyY)
x = xyY[:, 0]
y = xyY[:, 1]
Y = xyY[:, 2]
A = ABC[:, 0]
B = ABC[:, 1]
C = ABC[:, 2]
dxyY_dXYZ = self.space_xyY.jacobian_base(data)
dx_dX = dxyY_dXYZ[:, 0, 0]
dx_dY = dxyY_dXYZ[:, 0, 1]
dx_dZ = dxyY_dXYZ[:, 0, 2]
dy_dX = dxyY_dXYZ[:, 1, 0]
dy_dY = dxyY_dXYZ[:, 1, 1]
dy_dZ = dxyY_dXYZ[:, 1, 2]
dY_dX = dxyY_dXYZ[:, 2, 0]
dY_dY = dxyY_dXYZ[:, 2, 1]
dY_dZ = dxyY_dXYZ[:, 2, 2]
dABC_dXYZ = self.space_ABC.jacobian_base(data)
dA_dX = dABC_dXYZ[:, 0, 0]
dA_dY = dABC_dXYZ[:, 0, 1]
dA_dZ = dABC_dXYZ[:, 0, 2]
dB_dX = dABC_dXYZ[:, 1, 0]
dB_dY = dABC_dXYZ[:, 1, 1]
dB_dZ = dABC_dXYZ[:, 1, 2]
dC_dX = dABC_dXYZ[:, 2, 0]
dC_dY = dABC_dXYZ[:, 2, 1]
dC_dZ = dABC_dXYZ[:, 2, 2]
Y_0 = 100 * Y * (4.4934 * x**2 + 4.3034 * y**2 - 4.2760 * x * y -
1.3744 * x - 2.5643 * y + 1.8103)
L = (5.9 * ((Y_0**(1/3.) - (2/3.)) +
0.0042 * np.sign(Y_0 - 30) *
np.abs(Y_0 - 30)**(1/3.)) - 14.4) / np.sqrt(2)
dL_dY0 = 5.9 * (Y_0**(-2./3) + 0.042 * np.sign(Y_0 - 30) *
np.abs(Y_0 - 30)**(-2./3) / 3) / np.sqrt(2)
dY0_dx = 100 * Y * (4.4934 * 2 * x - 4.2760 * y - 1.3744)
dY0_dy = 100 * Y * (4.3034 * 2 * y - 4.2760 * x - 2.5643)
dY0_dY = 100 * (4.4934 * x**2 + 4.3034 * y**2 - 4.2760 * x * y -
1.3744 * x - 2.5643 * y + 1.8103)
dL_dX = dL_dY0 * (dY0_dx * dx_dX + dY0_dy * dy_dX + dY0_dY * dY_dX)
dL_dY = dL_dY0 * (dY0_dx * dx_dY + dY0_dy * dy_dY + dY0_dY * dY_dY)
dL_dZ = dL_dY0 * (dY0_dx * dx_dZ + dY0_dy * dy_dZ + dY0_dY * dY_dZ)
TG = 0.9482 * (np.log(A) - np.log(0.9366 * B)) - \
0.3175 * (np.log(B) - np.log(0.9807 * C))
TJ = 0.1792 * (np.log(A) - np.log(0.9366 * B)) + \
0.9237 * (np.log(B) - np.log(0.9807 * C))
SG = - 2 * (0.764 * L + 9.2521)
SJ = 2 * (0.5735 * L + 7.0892)
dG_dL = - 2 * 0.764 * TG
dJ_dL = 2 * 0.57354 * TJ
dG_dA = misc.safe_div(SG * 0.9482, A)
dG_dB = misc.safe_div(SG * (-0.9482 - 0.3175), B)
dG_dC = misc.safe_div(SG * 0.3175, C)
dJ_dA = misc.safe_div(SJ * 0.1792, A)
dJ_dB = misc.safe_div(SJ * (-0.1792 + 0.9837), B)
dJ_dC = misc.safe_div(SJ * (-0.9837), C)
dG_dX = dG_dL * dL_dX + dG_dA * dA_dX + dG_dB * dB_dX + dG_dC * dC_dX
dG_dY = dG_dL * dL_dY + dG_dA * dA_dY + dG_dB * dB_dY + dG_dC * dC_dY
dG_dZ = dG_dL * dL_dZ + dG_dA * dA_dZ + dG_dB * dB_dZ + dG_dC * dC_dZ
dJ_dX = dJ_dL * dL_dX + dJ_dA * dA_dX + dJ_dB * dB_dX + dJ_dC * dC_dX
dJ_dY = dJ_dL * dL_dY + dJ_dA * dA_dY + dJ_dB * dB_dY + dJ_dC * dC_dY
dJ_dZ = dJ_dL * dL_dZ + dJ_dA * dA_dZ + dJ_dB * dB_dZ + dJ_dC * dC_dZ
jac = self.empty_matrix(ABC)
jac[:, 0, 0] = dL_dX
jac[:, 0, 1] = dL_dY
jac[:, 0, 2] = dL_dZ
jac[:, 1, 0] = dG_dX
jac[:, 1, 1] = dG_dY
jac[:, 1, 2] = dG_dZ
jac[:, 2, 0] = dJ_dX
jac[:, 2, 1] = dJ_dY
jac[:, 2, 2] = dJ_dZ
return jac
class TransformLGJE(Transform):
"""
Transform from LGJOSA type coordinates to L_E, G_E, J_E.
"""
def __init__(self, base):
"""
Construct instance, setting base space.
Parameters
----------
base : Space
The base colour space.
"""
super(TransformLGJE, self).__init__(base)
self.aL = 2.890
self.bL = 0.015
self.ac = 1.256
self.bc = 0.050
def to_base(self, ndata):
"""
Convert from LGJE to LGJOSA (base).
Parameters
----------
ndata : ndarray
Colour data in the current colour space
Returns
-------
col : ndarray
Colour data in the base colour space
"""
LE = ndata[:, 0]
GE = ndata[:, 1]
JE = ndata[:, 2]
CE = np.sqrt(GE**2 + JE**2)
L = self.aL * (np.exp(self.bL * LE) - 1) / (10 * self.bL)
C = self.ac * (np.exp(self.bc * CE) - 1) / (10 * self.bc)
scale = misc.safe_div(C, CE)
G = - scale * GE
J = - scale * JE
col = ndata.copy()
col[:, 0] = L
col[:, 1] = G
col[:, 2] = J
return col
def from_base(self, ndata):
"""
Transform from LGJOSA (base) to LGJE.
Parameters
----------
ndata : ndarray
Colour data in the base colour space (LGJOSA).
Returns
-------
col : ndarray
Colour data in the LGJOSA colour space.
"""
L = ndata[:, 0]
G = ndata[:, 1]
J = ndata[:, 2]
C = np.sqrt(G**2 + J**2)
L_E = np.log(1 + 10 * L * self.bL / self.aL) / self.bL
C_E = np.log(1 + 10 * C * self.bc / self.ac) / self.bc
scale = misc.safe_div(C_E, C)
G_E = - scale * G
J_E = - scale * J
col = ndata.copy()
col[:, 0] = L_E
col[:, 1] = G_E
col[:, 2] = J_E
return col
def jacobian_base(self, data):
"""
Return the Jacobian from LGJOSA (base), dLGJE^i/dLGJOSA^j.
The Jacobian is calculated at the given data points (of the
Points class).
Parameters
----------
data : Points
Colour data points for the jacobians to be computed.
Returns
-------
jacobian : ndarray
The list of Jacobians to the base colour space.
"""
lgj = data.get_flattened(self.base)
L = lgj[:, 0]
G = lgj[:, 1]
J = lgj[:, 2]
C = np.sqrt(G**2 + J**2)
lgj_e = data.get_flattened(self)
C_E = np.sqrt(lgj_e[:, 1]**2 + lgj_e[:, 2]**2)
dLE_dL = 10 / (self.aL + 10 * self.bL * L)
dCE_dC = 10 / (self.ac + 10 * self.bc * C)
dCEC_dC = misc.safe_div(dCE_dC * C - C_E, C**2)
dC_dG = misc.safe_div(G, C)
dC_dJ = misc.safe_div(J, C)
dCEC_dG = dCEC_dC * dC_dG
dCEC_dJ = dCEC_dC * dC_dJ
dGE_dG = - misc.safe_div(C_E, C) - G * dCEC_dG
dGE_dJ = - G * dCEC_dJ
dJE_dG = - J * dCEC_dG
dJE_dJ = - misc.safe_div(C_E, C) - J * dCEC_dJ
jac = self.empty_matrix(lgj)
jac[:, 0, 0] = dLE_dL
jac[:, 1, 1] = dGE_dG
jac[:, 1, 2] = dGE_dJ
jac[:, 2, 1] = dJE_dG
jac[:, 2, 2] = dJE_dJ
return jac
class TransformLogCompressL(Transform):
"""
Perform parametric logarithmic compression of lightness.
As in the DIN99x formulae.
"""
def __init__(self, base, aL, bL):
"""
Construct instance, setting base space.
Parameters
----------
base : Space
The base colour space.
"""
super(TransformLogCompressL, self).__init__(base)
self.aL = aL
self.bL = bL
def from_base(self, ndata):
"""
Transform from Lab (base) to L'ab.
Parameters
----------
ndata : ndarray
Colour data in the base colour space (Lab).
Returns
-------
col : ndarray
Colour data in the La'b' colour space.
"""
Lpab = ndata.copy()
Lpab[:, 0] = self.aL * np.log(1 + self.bL * ndata[:, 0])
return Lpab
def to_base(self, ndata):
"""
Transform from L'ab to Lab (base).
Parameters
----------
ndata : ndarray
Colour data in L'ab colour space.
Returns
-------
col : ndarray
Colour data in the Lab colour space.
"""
Lab = ndata.copy()
Lab[:, 0] = (np.exp(ndata[:, 0] / self.aL) - 1) / self.bL
return Lab
def jacobian_base(self, data):
"""
Return the Jacobian from Lab (base), dL'ab^i/dLab^j.
The Jacobian is calculated at the given data points (of the
Points class).
Parameters
----------
data : Points
Colour data points for the jacobians to be computed.
Returns
-------
jacobian : ndarray
The list of Jacobians to the base colour space.
"""
lab = data.get_flattened(self.base)
L = lab[:, 0]
dLp_dL = self.aL * self.bL / (1 + self.bL * L)
jac = self.empty_matrix(lab)
jac[:, 0, 0] = dLp_dL
jac[:, 1, 1] = 1
jac[:, 2, 2] = 1
return jac
class TransformLogCompressC(Transform):
"""
Perform parametric logarithmic compression of chroma.
As in the DIN99x formulae.
"""
def __init__(self, base, aC, bC):
"""
Construct instance, setting base space.
Parameters
----------
base : Space
The base colour space.
"""
super(TransformLogCompressC, self).__init__(base)
self.aC = aC
self.bC = bC
def from_base(self, ndata):
"""
Transform from Lab (base) to La'b'.
Parameters
----------
ndata : ndarray
Colour data in the base colour space (Lab).
Returns
-------
col : ndarray
Colour data in the La'b' colour space.
"""
Lapbp = ndata.copy()
C = np.sqrt(ndata[:, 1]**2 + ndata[:, 2]**2)
Cp = self.aC * np.log(1 + self.bC * C)
scale = misc.safe_div(Cp, C)
ap = scale * ndata[:, 1]
bp = scale * ndata[:, 2]
Lapbp[:, 1] = ap
Lapbp[:, 2] = bp
return Lapbp
def to_base(self, ndata):
"""
Transform from La'b' to Lab (base).
Parameters
----------
ndata : ndarray
Colour data in L'ab colour space.
Returns
-------
col : ndarray
Colour data in the Lab colour space.
"""
Lab = ndata.copy()
ap = ndata[:, 1]
bp = ndata[:, 2]
Cp = np.sqrt(ap**2 + bp**2)
C = (np.exp(Cp / self.aC) - 1) / self.bC
scale = misc.safe_div(Cp, C)
a = scale * ap
b = scale * bp
Lab[:, 1] = a
Lab[:, 2] = b
return Lab
def jacobian_base(self, data):
"""
Return the Jacobian from Lab (base), dLa'b'^i/dLab^j.
The Jacobian is calculated at the given data points (of the
Points class).
Parameters
----------
data : Points
Colour data points for the jacobians to be computed.
Returns
-------
jacobian : ndarray
The list of Jacobians to the base colour space.
"""
lab = data.get_flattened(self.base)
lapbp = data.get_flattened(self)
a = lab[:, 1]
b = lab[:, 2]
C = np.sqrt(a**2 + b**2)
Cp = np.sqrt(lapbp[:, 1]**2 + lapbp[:, 2]**2)
dC_da = misc.safe_div(a, C)
dC_db = misc.safe_div(b, C)
dCp_dC = self.aC * self.bC / (1 + self.bC * C)
dCpC_dC = misc.safe_div(dCp_dC * C - Cp, C**2)
dap_da = misc.safe_div(Cp, C) + a * (dCpC_dC * dC_da)
dbp_db = misc.safe_div(Cp, C) + b * (dCpC_dC * dC_db)
dap_db = a * dCpC_dC * dC_db
dbp_da = b * dCpC_dC * dC_da
jac = self.empty_matrix(lab)
jac[:, 0, 0] = 1
jac[:, 1, 1] = dap_da
jac[:, 1, 2] = dap_db
jac[:, 2, 1] = dbp_da
jac[:, 2, 2] = dbp_db
return jac
class TransformPoincareDisk(Transform):
"""
Transform from Cartesian coordinates to Poincare disk coordinates.
The coordinate transform only changes the radius (chroma, typically),
and does so in a way that preserves the radial distance with respect to
the Euclidean metric and the Poincare disk metric in the source and
target spaces, respectively.
"""
def __init__(self, base, R=1.):
"""
Construct instance, setting base space and radius of curvature.
Parameters
----------
base : Space
The base colour space.
R : float
The radius of curvature.
"""
super(TransformPoincareDisk, self).__init__(base)
self.R = R
def to_base(self, ndata):
"""
Transform from Poincare disk to base.
Parameters
----------
ndata : ndarray
Colour data in the current colour space
Returns
-------
col : ndarray
Colour data in the base colour space
"""
Lab = ndata.copy()
Lab[:, 1:] = 0
x = ndata[:, 1]
y = ndata[:, 2]
r = np.sqrt(x**2 + y**2)
for i in range(np.shape(Lab)[0]):
if r[i] > 0:
Lab[i, 1:] = ndata[i, 1:] * 2 * \
self.R * np.arctanh(r[i]) / r[i]
return Lab
def from_base(self, ndata):
"""
Transform from base to Poincare disk
Parameters
----------
ndata : ndarray
Colour data in the base colour space.
Returns
-------
col : ndarray
Colour data in the current colour space.
"""
Lxy = ndata.copy()
Lxy[:, 1:] = 0
a = ndata[:, 1]
b = ndata[:, 2]
C = np.sqrt(a**2 + b**2)
for i in range(np.shape(Lxy)[0]):
if C[i] > 0:
Lxy[i, 1:] = ndata[i, 1:] * np.tanh(C[i] / (2 * self.R)) / C[i]
return Lxy
def jacobian_base(self, data):
"""
Return the Jacobian from CIELAB (base), dLxy^i/dCIELAB^j.
The Jacobian is calculated at the given data points (of the
Points class).
Parameters
----------
data : Points
Colour data points for the jacobians to be computed.
Returns
-------
jacobian : ndarray
The list of Jacobians to the base colour space.
"""
# TODO: bugfix!!!
Lab = data.get_flattened(self.base)
a = Lab[:, 1]
b = Lab[:, 2]
C = np.sqrt(a**2 + b**2)
tanhC2R = np.tanh(C / (2. * self.R))
tanhC2C = misc.safe_div(tanhC2R, C)
dCda = misc.safe_div(a, C)
dCdb = misc.safe_div(b, C)
dtanhdC = misc.safe_div(C / (2. * self.R) *
(1 - tanhC2R**2) - tanhC2R, C**2)
jac = self.empty_matrix(Lab)
for i in range(np.shape(jac)[0]):
jac[i, 0, 0] = 1 # dL/dL
if C[i] == 0:
jac[i, 1, 1] = .5 # dx/da
jac[i, 2, 2] = .5 # dy/db
else:
jac[i, 1, 1] = tanhC2C[i] + \
a[i] * dtanhdC[i] * dCda[i] # dx/da
jac[i, 1, 2] = a[i] * dtanhdC[i] * dCdb[i] # dx/db
jac[i, 2, 1] = b[i] * dtanhdC[i] * dCda[i] # dy/da
jac[i, 2, 2] = tanhC2C[i] + \
b[i] * dtanhdC[i] * dCdb[i] # dy/db
return jac
# =============================================================================
# Colour space instances
# =============================================================================
# CIE based
xyz = XYZ()
xyY = TransformxyY(xyz)
cielab = TransformCIELAB(xyz)
cielch = TransformPolar(cielab)
cieluv = TransformCIELUV(xyz)
ciede00lab = TransformCIEDE00(cielab)
ciede00lch = TransformPolar(ciede00lab)
ciecat02 = TransformLinear(xyz,
np.array([[.7328, .4296, -.1624],
[-.7036, 1.675, .0061],
[.0030, .0136, .9834]]))
ciecat16 = TransformLinear(xyz,
np.array([[.401288, .650173, -.051461],
[-.250268, 1.204414, .045854],
[-.002079, .048952, .953127]]))
# sRGB
_srgb_linear = TransformLinear(
xyz,
np.array([[3.2404542, -1.5371385, -0.4985314],
[-0.9692660, 1.8760108, 0.0415560],
[0.0556434, -0.2040259, 1.0572252]]))
srgb = TransformSRGB(_srgb_linear)
# Adobe RGB
_rgb_adobe_linear = TransformLinear(
xyz,
np.array([[2.0413690, -0.5649464, -0.3446944],
[-0.9692660, 1.8760108, 0.0415560],
[0.0134474, -0.1183897, 1.0154096]]))
rgb_adobe = TransformGamma(_rgb_adobe_linear, 1 / 2.2)
# IPT
_ipt_lms = TransformLinear(
xyz,
np.array([[.4002, .7075, -.0807],
[-.228, 1.15, .0612],
[0, 0, .9184]]))
_ipt_lmsp = TransformGamma(_ipt_lms, .43)
ipt = TransformLinear(
_ipt_lmsp,
np.array([[.4, .4, .2],
[4.455, -4.850, .3960],
[.8056, .3572, -1.1628]]))
# OSA-UCS
lgj_osa = TransformLGJOSA(xyz)
lgj_e = TransformLGJE(lgj_osa)
# DIN99
_din99_lpab = TransformLogCompressL(cielab, 105.51, 0.0158)
_din99_lef = TransformLinear(
_din99_lpab,
np.array([[1, 0, 0],
[0, np.cos(np.deg2rad(16.)),
np.sin(np.deg2rad(16.))],
[0, - 0.7 * np.sin(np.deg2rad(16.)),
0.7 * np.cos(np.deg2rad(16.))]]))
din99 = TransformLogCompressC(_din99_lef, 1 / 0.045, 0.045)
# DIN99b
_din99b_lpab = TransformLogCompressL(cielab, 303.67, 0.0039)
_din99b_lef = TransformLinear(
_din99b_lpab,
np.array([[1, 0, 0],
[0, np.cos(np.deg2rad(26.)), np.sin(np.deg2rad(26.))],
[0, - 0.83 * np.sin(np.deg2rad(26.)),
0.83 * np.cos(np.deg2rad(26.))]]))
_din99b_rot = TransformLogCompressC(_din99b_lef, 23.0, 0.075)
din99b = TransformLinear(
_din99b_rot,
np.array([[1, 0, 0],
[0, np.cos(np.deg2rad(-26.)), np.sin(np.deg2rad(-26.))],
[0, - np.sin(np.deg2rad(-26.)), np.cos(np.deg2rad(-26.))]]))
# DIN99c
_din99c_xyz = TransformLinear(xyz,
np.array([[1.1, 0, -0.1],
[0, 1, 0],
[0, 0, 1]]))
_din99c_white = np.dot(_din99c_xyz.M, _din99c_xyz.white_D65)
_din99c_lab = TransformCIELAB(_din99c_xyz, _din99c_white)
_din99c_lpab = TransformLogCompressL(_din99c_lab, 317.65, 0.0037)
_din99c_lef = TransformLinear(_din99c_lpab,
np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, .94]]))
din99c = TransformLogCompressC(_din99c_lef, 23., 0.066)
# DIN99d
_din99d_xyz = TransformLinear(xyz,
np.array([[1.12, 0, -0.12],
[0, 1, 0],
[0, 0, 1]]))
_din99d_white = np.dot(_din99d_xyz.M, _din99d_xyz.white_D65)
_din99d_lab = TransformCIELAB(_din99d_xyz, _din99d_white)
_din99d_lpab = TransformLogCompressL(_din99c_lab, 325.22, 0.0036)
_din99d_lef = TransformLinear(
_din99d_lpab,
np.array([[1, 0, 0],
[0, np.cos(np.deg2rad(50.)), np.sin(np.deg2rad(50.))],
[0, - 1.14 * np.sin(np.deg2rad(50.)),
1.14 * np.cos(np.deg2rad(50.))]]))
_din99d_rot = TransformLogCompressC(_din99d_lef, 23., 0.066)
din99d = TransformLinear(
_din99d_rot,
np.array([[1, 0, 0],
[0, np.cos(np.deg2rad(-50.)), np.sin(np.deg2rad(-50.))],
[0, - np.sin(np.deg2rad(-50.)), np.cos(np.deg2rad(-50.))]]))
| gpl-3.0 | 1,189,778,251,837,604,400 | 28.771328 | 79 | 0.468863 | false |
openstack/octavia | octavia/tests/unit/controller/worker/v1/flows/test_amphora_flows.py | 1 | 20294 | # Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from unittest import mock
from oslo_config import cfg
from oslo_config import fixture as oslo_fixture
from oslo_utils import uuidutils
from taskflow.patterns import linear_flow as flow
from octavia.common import constants
from octavia.common import data_models
from octavia.controller.worker.v1.flows import amphora_flows
import octavia.tests.unit.base as base
AUTH_VERSION = '2'
# NOTE: We patch the get_network_driver for all the calls so we don't
# inadvertently make real calls.
@mock.patch('octavia.common.utils.get_network_driver')
class TestAmphoraFlows(base.TestCase):
def setUp(self):
super().setUp()
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
self.conf.config(
group="controller_worker",
amphora_driver='amphora_haproxy_rest_driver')
self.conf.config(group="nova", enable_anti_affinity=False)
self.AmpFlow = amphora_flows.AmphoraFlows()
self.amp1 = data_models.Amphora(id=1)
self.amp2 = data_models.Amphora(id=2)
self.amp3 = data_models.Amphora(id=3, status=constants.DELETED)
self.amp4 = data_models.Amphora(id=uuidutils.generate_uuid())
self.lb = data_models.LoadBalancer(
id=4, amphorae=[self.amp1, self.amp2, self.amp3])
def test_get_create_amphora_flow(self, mock_get_net_driver):
amp_flow = self.AmpFlow.get_create_amphora_flow()
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.AMPHORA, amp_flow.provides)
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
self.assertEqual(5, len(amp_flow.provides))
self.assertEqual(4, len(amp_flow.requires))
def test_get_create_amphora_flow_cert(self, mock_get_net_driver):
self.AmpFlow = amphora_flows.AmphoraFlows()
amp_flow = self.AmpFlow.get_create_amphora_flow()
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.AMPHORA, amp_flow.provides)
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
self.assertEqual(5, len(amp_flow.provides))
self.assertEqual(4, len(amp_flow.requires))
def test_get_create_amphora_for_lb_flow(self, mock_get_net_driver):
amp_flow = self.AmpFlow._get_create_amp_for_lb_subflow(
'SOMEPREFIX', constants.ROLE_STANDALONE)
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
self.assertIn(constants.AMPHORA, amp_flow.provides)
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
self.assertEqual(5, len(amp_flow.provides))
self.assertEqual(5, len(amp_flow.requires))
def test_get_cert_create_amphora_for_lb_flow(self, mock_get_net_driver):
self.AmpFlow = amphora_flows.AmphoraFlows()
amp_flow = self.AmpFlow._get_create_amp_for_lb_subflow(
'SOMEPREFIX', constants.ROLE_STANDALONE)
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
self.assertIn(constants.AMPHORA, amp_flow.provides)
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
self.assertEqual(5, len(amp_flow.provides))
self.assertEqual(5, len(amp_flow.requires))
def test_get_cert_master_create_amphora_for_lb_flow(
self, mock_get_net_driver):
self.AmpFlow = amphora_flows.AmphoraFlows()
amp_flow = self.AmpFlow._get_create_amp_for_lb_subflow(
'SOMEPREFIX', constants.ROLE_MASTER)
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
self.assertIn(constants.AMPHORA, amp_flow.provides)
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
self.assertEqual(5, len(amp_flow.provides))
self.assertEqual(5, len(amp_flow.requires))
def test_get_cert_master_rest_anti_affinity_create_amphora_for_lb_flow(
self, mock_get_net_driver):
self.conf.config(group="nova", enable_anti_affinity=True)
self.AmpFlow = amphora_flows.AmphoraFlows()
amp_flow = self.AmpFlow._get_create_amp_for_lb_subflow(
'SOMEPREFIX', constants.ROLE_MASTER)
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
self.assertEqual(5, len(amp_flow.provides))
self.assertEqual(5, len(amp_flow.requires))
self.conf.config(group="nova", enable_anti_affinity=False)
def test_get_cert_backup_create_amphora_for_lb_flow(
self, mock_get_net_driver):
self.AmpFlow = amphora_flows.AmphoraFlows()
amp_flow = self.AmpFlow._get_create_amp_for_lb_subflow(
'SOMEPREFIX', constants.ROLE_BACKUP)
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
self.assertIn(constants.AMPHORA, amp_flow.provides)
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
self.assertEqual(5, len(amp_flow.provides))
self.assertEqual(5, len(amp_flow.requires))
def test_get_cert_bogus_create_amphora_for_lb_flow(
self, mock_get_net_driver):
self.AmpFlow = amphora_flows.AmphoraFlows()
amp_flow = self.AmpFlow._get_create_amp_for_lb_subflow(
'SOMEPREFIX', 'BOGUS_ROLE')
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
self.assertIn(constants.AMPHORA, amp_flow.provides)
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
self.assertEqual(5, len(amp_flow.provides))
self.assertEqual(5, len(amp_flow.requires))
def test_get_cert_backup_rest_anti_affinity_create_amphora_for_lb_flow(
self, mock_get_net_driver):
self.conf.config(group="nova", enable_anti_affinity=True)
self.AmpFlow = amphora_flows.AmphoraFlows()
amp_flow = self.AmpFlow._get_create_amp_for_lb_subflow(
'SOMEPREFIX', constants.ROLE_BACKUP)
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
self.assertEqual(5, len(amp_flow.provides))
self.assertEqual(5, len(amp_flow.requires))
self.conf.config(group="nova", enable_anti_affinity=False)
def test_get_delete_amphora_flow(self, mock_get_net_driver):
amp_flow = self.AmpFlow.get_delete_amphora_flow(self.amp4)
self.assertIsInstance(amp_flow, flow.Flow)
# This flow injects the required data at flow compile time.
self.assertEqual(0, len(amp_flow.provides))
self.assertEqual(0, len(amp_flow.requires))
def test_get_failover_flow_act_stdby(self, mock_get_net_driver):
failed_amphora = data_models.Amphora(
id=uuidutils.generate_uuid(), role=constants.ROLE_MASTER,
load_balancer_id=uuidutils.generate_uuid())
amp_flow = self.AmpFlow.get_failover_amphora_flow(
failed_amphora, 2)
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.AVAILABILITY_ZONE, amp_flow.requires)
self.assertIn(constants.BUILD_TYPE_PRIORITY, amp_flow.requires)
self.assertIn(constants.FLAVOR, amp_flow.requires)
self.assertIn(constants.LOADBALANCER, amp_flow.requires)
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
self.assertIn(constants.VIP, amp_flow.requires)
self.assertIn(constants.ADDED_PORTS, amp_flow.provides)
self.assertIn(constants.AMP_VRRP_INT, amp_flow.provides)
self.assertIn(constants.AMPHORA, amp_flow.provides)
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
self.assertIn(constants.AMPHORAE, amp_flow.provides)
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, amp_flow.provides)
self.assertIn(constants.BASE_PORT, amp_flow.provides)
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
self.assertIn(constants.DELTA, amp_flow.provides)
self.assertIn(constants.LOADBALANCER, amp_flow.provides)
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
self.assertIn(constants.VIP_SG_ID, amp_flow.provides)
self.assertEqual(7, len(amp_flow.requires))
self.assertEqual(13, len(amp_flow.provides))
def test_get_failover_flow_standalone(self, mock_get_net_driver):
failed_amphora = data_models.Amphora(
id=uuidutils.generate_uuid(), role=constants.ROLE_STANDALONE,
load_balancer_id=uuidutils.generate_uuid(), vrrp_ip='2001:3b8::32')
amp_flow = self.AmpFlow.get_failover_amphora_flow(
failed_amphora, 1)
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.AVAILABILITY_ZONE, amp_flow.requires)
self.assertIn(constants.BUILD_TYPE_PRIORITY, amp_flow.requires)
self.assertIn(constants.FLAVOR, amp_flow.requires)
self.assertIn(constants.LOADBALANCER, amp_flow.requires)
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
self.assertIn(constants.VIP, amp_flow.requires)
self.assertIn(constants.ADDED_PORTS, amp_flow.provides)
self.assertIn(constants.AMPHORA, amp_flow.provides)
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
self.assertIn(constants.AMPHORAE, amp_flow.provides)
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, amp_flow.provides)
self.assertIn(constants.BASE_PORT, amp_flow.provides)
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
self.assertIn(constants.DELTA, amp_flow.provides)
self.assertIn(constants.LOADBALANCER, amp_flow.provides)
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
self.assertIn(constants.VIP_SG_ID, amp_flow.provides)
self.assertEqual(7, len(amp_flow.requires))
self.assertEqual(12, len(amp_flow.provides))
def test_get_failover_flow_bogus_role(self, mock_get_net_driver):
failed_amphora = data_models.Amphora(id=uuidutils.generate_uuid(),
role='bogus')
amp_flow = self.AmpFlow.get_failover_amphora_flow(
failed_amphora, 1)
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
self.assertIn(constants.VIP_SG_ID, amp_flow.provides)
print(amp_flow.requires)
self.assertEqual(1, len(amp_flow.requires))
self.assertEqual(1, len(amp_flow.provides))
def test_cert_rotate_amphora_flow(self, mock_get_net_driver):
self.AmpFlow = amphora_flows.AmphoraFlows()
amp_rotate_flow = self.AmpFlow.cert_rotate_amphora_flow()
self.assertIsInstance(amp_rotate_flow, flow.Flow)
self.assertIn(constants.SERVER_PEM, amp_rotate_flow.provides)
self.assertIn(constants.AMPHORA, amp_rotate_flow.requires)
self.assertEqual(1, len(amp_rotate_flow.provides))
self.assertEqual(2, len(amp_rotate_flow.requires))
def test_get_vrrp_subflow(self, mock_get_net_driver):
vrrp_subflow = self.AmpFlow.get_vrrp_subflow('123')
self.assertIsInstance(vrrp_subflow, flow.Flow)
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, vrrp_subflow.provides)
self.assertIn(constants.AMP_VRRP_INT, vrrp_subflow.provides)
self.assertIn(constants.LOADBALANCER_ID, vrrp_subflow.requires)
self.assertIn(constants.AMPHORAE, vrrp_subflow.requires)
self.assertEqual(2, len(vrrp_subflow.provides))
self.assertEqual(2, len(vrrp_subflow.requires))
def test_get_vrrp_subflow_dont_create_vrrp_group(
self, mock_get_net_driver):
vrrp_subflow = self.AmpFlow.get_vrrp_subflow('123',
create_vrrp_group=False)
self.assertIsInstance(vrrp_subflow, flow.Flow)
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, vrrp_subflow.provides)
self.assertIn(constants.AMP_VRRP_INT, vrrp_subflow.provides)
self.assertIn(constants.LOADBALANCER_ID, vrrp_subflow.requires)
self.assertIn(constants.AMPHORAE, vrrp_subflow.requires)
self.assertEqual(2, len(vrrp_subflow.provides))
self.assertEqual(2, len(vrrp_subflow.requires))
def test_get_post_map_lb_subflow(self, mock_get_net_driver):
self.AmpFlow = amphora_flows.AmphoraFlows()
amp_flow = self.AmpFlow._get_post_map_lb_subflow(
'SOMEPREFIX', constants.ROLE_MASTER)
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.FLAVOR, amp_flow.requires)
self.assertIn(constants.AMPHORA_ID, amp_flow.requires)
self.assertIn(constants.AMPHORA, amp_flow.provides)
self.assertEqual(1, len(amp_flow.provides))
self.assertEqual(2, len(amp_flow.requires))
amp_flow = self.AmpFlow._get_post_map_lb_subflow(
'SOMEPREFIX', constants.ROLE_BACKUP)
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.FLAVOR, amp_flow.requires)
self.assertIn(constants.AMPHORA_ID, amp_flow.requires)
self.assertIn(constants.AMPHORA, amp_flow.provides)
self.assertEqual(1, len(amp_flow.provides))
self.assertEqual(2, len(amp_flow.requires))
amp_flow = self.AmpFlow._get_post_map_lb_subflow(
'SOMEPREFIX', constants.ROLE_STANDALONE)
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.FLAVOR, amp_flow.requires)
self.assertIn(constants.AMPHORA_ID, amp_flow.requires)
self.assertIn(constants.AMPHORA, amp_flow.provides)
self.assertEqual(1, len(amp_flow.provides))
self.assertEqual(2, len(amp_flow.requires))
amp_flow = self.AmpFlow._get_post_map_lb_subflow(
'SOMEPREFIX', 'BOGUS_ROLE')
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.FLAVOR, amp_flow.requires)
self.assertIn(constants.AMPHORA_ID, amp_flow.requires)
self.assertIn(constants.AMPHORA, amp_flow.provides)
self.assertEqual(1, len(amp_flow.provides))
self.assertEqual(2, len(amp_flow.requires))
def test_update_amphora_config_flow(self, mock_get_net_driver):
amp_flow = self.AmpFlow.update_amphora_config_flow()
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.AMPHORA, amp_flow.requires)
self.assertIn(constants.FLAVOR, amp_flow.requires)
self.assertEqual(2, len(amp_flow.requires))
self.assertEqual(0, len(amp_flow.provides))
def test_get_amphora_for_lb_failover_flow_single(self,
mock_get_net_driver):
FAILED_PORT_ID = uuidutils.generate_uuid()
TEST_PREFIX = 'test_prefix'
get_amp_flow = self.AmpFlow.get_amphora_for_lb_failover_subflow(
TEST_PREFIX, role=constants.ROLE_STANDALONE,
failed_amp_vrrp_port_id=FAILED_PORT_ID, is_vrrp_ipv6=True)
self.assertIsInstance(get_amp_flow, flow.Flow)
self.assertIn(constants.AVAILABILITY_ZONE, get_amp_flow.requires)
self.assertIn(constants.BUILD_TYPE_PRIORITY, get_amp_flow.requires)
self.assertIn(constants.FLAVOR, get_amp_flow.requires)
self.assertIn(constants.LOADBALANCER, get_amp_flow.requires)
self.assertIn(constants.LOADBALANCER_ID, get_amp_flow.requires)
self.assertIn(constants.VIP, get_amp_flow.requires)
self.assertIn(constants.VIP_SG_ID, get_amp_flow.requires)
self.assertIn(constants.ADDED_PORTS, get_amp_flow.provides)
self.assertIn(constants.AMPHORA, get_amp_flow.provides)
self.assertIn(constants.AMPHORA_ID, get_amp_flow.provides)
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, get_amp_flow.provides)
self.assertIn(constants.BASE_PORT, get_amp_flow.provides)
self.assertIn(constants.COMPUTE_ID, get_amp_flow.provides)
self.assertIn(constants.COMPUTE_OBJ, get_amp_flow.provides)
self.assertIn(constants.DELTA, get_amp_flow.provides)
self.assertIn(constants.SERVER_PEM, get_amp_flow.provides)
self.assertEqual(8, len(get_amp_flow.requires), get_amp_flow.requires)
self.assertEqual(9, len(get_amp_flow.provides), get_amp_flow.provides)
def test_get_amphora_for_lb_failover_flow_act_stdby(self,
mock_get_net_driver):
TEST_PREFIX = 'test_prefix'
get_amp_flow = self.AmpFlow.get_amphora_for_lb_failover_subflow(
TEST_PREFIX, role=constants.ROLE_MASTER)
self.assertIsInstance(get_amp_flow, flow.Flow)
self.assertIn(constants.AVAILABILITY_ZONE, get_amp_flow.requires)
self.assertIn(constants.BUILD_TYPE_PRIORITY, get_amp_flow.requires)
self.assertIn(constants.FLAVOR, get_amp_flow.requires)
self.assertIn(constants.LOADBALANCER, get_amp_flow.requires)
self.assertIn(constants.LOADBALANCER_ID, get_amp_flow.requires)
self.assertIn(constants.VIP, get_amp_flow.requires)
self.assertIn(constants.VIP_SG_ID, get_amp_flow.requires)
self.assertIn(constants.ADDED_PORTS, get_amp_flow.provides)
self.assertIn(constants.AMPHORA, get_amp_flow.provides)
self.assertIn(constants.AMPHORA_ID, get_amp_flow.provides)
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, get_amp_flow.provides)
self.assertIn(constants.BASE_PORT, get_amp_flow.provides)
self.assertIn(constants.COMPUTE_ID, get_amp_flow.provides)
self.assertIn(constants.COMPUTE_OBJ, get_amp_flow.provides)
self.assertIn(constants.DELTA, get_amp_flow.provides)
self.assertIn(constants.SERVER_PEM, get_amp_flow.provides)
self.assertEqual(8, len(get_amp_flow.requires), get_amp_flow.requires)
self.assertEqual(9, len(get_amp_flow.provides), get_amp_flow.provides)
| apache-2.0 | -6,779,304,459,603,399,000 | 41.814346 | 79 | 0.685671 | false |
lizerd123/github | game compalation/jack.py | 1 | 1810 | # A map GUI thing
# +---------+
# |####|****|
# |####|****|
# |####|****|
# |----+----|
# |::::|$$$$|
# |::::|$$$$|
# |::::|$$$$|
# +---------+
print "+-----------+"
print "| U = Up |"
print "| D = Down |"
print "| R = Right |"
print "| L = Left |"
print "| Q = Quit |"
print "+-----------+"
mainmap = []
coords = [6,5]
# these are for making map
def genMap(inarray):
for i in range(3):
makeLine(inarray, '#', '|', '*')
makeLine(inarray, '-', '+', '-')
for i in range(3):
makeLine(inarray, ':', '|', '$')
def makeLine(inarray, a, b, c):
inarray.append([])
iter = len(inarray) - 1
for i in range(4):
inarray[iter].append(a)
inarray[iter].append(b)
for i in range(4):
inarray[iter].append(c)
def resetMap(mainmap):
mainmap = []
genMap(mainmap)
genMap(mainmap)
#shows the map off
def showMap(mainmap):
print " 123456789 "
print "+---------+"
iter = 0
for z in range(len(mainmap)):
iter += 1
if ((z + 1) == coords[1]):
storage = mainmap[coords[1]-1]
storage[coords[0]-1] = '@'
print (str(iter) + '|' + (''.join(storage)) + '|')
else:
print (str(iter) + '|' + (''.join(mainmap[z])) + '|')
print "+---------+"
showMap(mainmap)
# give the user reason
# dir = x/y (0 = x, 1 = y)
# move = direction of movement (move on x is right, move on y is down)
def move(coords, dir, move):
coords[dir] += move
if coords[dir] > 9:
coords[dir] = 1
if coords[dir] < 1:
coords[dir] = 9
#UI
while True:
cmd = raw_input(": ").lower()
if cmd == 'u':
move(coords, 1, -1)
showMap(mainmap)
resetMap(mainmap)
if cmd == 'd':
move(coords, 1, 1)
showMap(mainmap)
resetMap(mainmap)
if cmd == 'r':
move(coords, 0, 1)
showMap(mainmap)
resetMap(mainmap)
if cmd == 'l':
move(coords, 0, -1)
showMap(mainmap)
resetMap(mainmap)
if cmd == 'q':
break | mit | -8,454,774,936,096,684,000 | 17.670103 | 70 | 0.523204 | false |
apporc/cinder | cinder/volume/drivers/emc/emc_cli_fc.py | 1 | 11978 | # Copyright (c) 2012 - 2015 EMC Corporation, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Fibre Channel Driver for EMC VNX array based on CLI."""
from oslo_log import log as logging
from cinder.volume import driver
from cinder.volume.drivers.emc import emc_vnx_cli
from cinder.zonemanager import utils as zm_utils
LOG = logging.getLogger(__name__)
class EMCCLIFCDriver(driver.FibreChannelDriver):
"""EMC FC Driver for VNX using CLI.
Version history:
1.0.0 - Initial driver
2.0.0 - Thick/thin provisioning, robust enhancement
3.0.0 - Array-based Backend Support, FC Basic Support,
Target Port Selection for MPIO,
Initiator Auto Registration,
Storage Group Auto Deletion,
Multiple Authentication Type Support,
Storage-Assisted Volume Migration,
SP Toggle for HA
3.0.1 - Security File Support
4.0.0 - Advance LUN Features (Compression Support,
Deduplication Support, FAST VP Support,
FAST Cache Support), Storage-assisted Retype,
External Volume Management, Read-only Volume,
FC Auto Zoning
4.1.0 - Consistency group support
5.0.0 - Performance enhancement, LUN Number Threshold Support,
Initiator Auto Deregistration,
Force Deleting LUN in Storage Groups,
robust enhancement
5.1.0 - iSCSI multipath enhancement
5.2.0 - Pool-aware scheduler support
5.3.0 - Consistency group modification support
6.0.0 - Over subscription support
Create consistency group from cgsnapshot support
Multiple pools support enhancement
Manage/unmanage volume revise
White list target ports support
Snap copy support
Support efficient non-disruptive backup
7.0.0 - Clone consistency group support
"""
def __init__(self, *args, **kwargs):
super(EMCCLIFCDriver, self).__init__(*args, **kwargs)
self.cli = emc_vnx_cli.getEMCVnxCli(
'FC',
configuration=self.configuration)
self.VERSION = self.cli.VERSION
def check_for_setup_error(self):
pass
def create_volume(self, volume):
"""Creates a volume."""
return self.cli.create_volume(volume)
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
return self.cli.create_volume_from_snapshot(volume, snapshot)
def create_cloned_volume(self, volume, src_vref):
"""Creates a cloned volume."""
return self.cli.create_cloned_volume(volume, src_vref)
def extend_volume(self, volume, new_size):
"""Extend a volume."""
self.cli.extend_volume(volume, new_size)
def delete_volume(self, volume):
"""Deletes a volume."""
self.cli.delete_volume(volume)
def migrate_volume(self, ctxt, volume, host):
"""Migrate volume via EMC migration functionality."""
return self.cli.migrate_volume(ctxt, volume, host)
def retype(self, ctxt, volume, new_type, diff, host):
"""Convert the volume to be of the new type."""
return self.cli.retype(ctxt, volume, new_type, diff, host)
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
self.cli.create_snapshot(snapshot)
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
self.cli.delete_snapshot(snapshot)
def ensure_export(self, context, volume):
"""Driver entry point to get the export info for an existing volume."""
pass
def create_export(self, context, volume, connector):
"""Driver entry point to get the export info for a new volume."""
pass
def remove_export(self, context, volume):
"""Driver entry point to remove an export for a volume."""
pass
def check_for_export(self, context, volume_id):
"""Make sure volume is exported."""
pass
@zm_utils.AddFCZone
def initialize_connection(self, volume, connector):
"""Initializes the connection and returns connection info.
Assign any created volume to a compute node/host so that it can be
used from that host.
The driver returns a driver_volume_type of 'fibre_channel'.
The target_wwn can be a single entry or a list of wwns that
correspond to the list of remote wwn(s) that will export the volume.
The initiator_target_map is a map that represents the remote wwn(s)
and a list of wwns which are visible to the remote wwn(s).
Example return values:
{
'driver_volume_type': 'fibre_channel'
'data': {
'target_discovered': True,
'target_lun': 1,
'target_wwn': '1234567890123',
'access_mode': 'rw'
'initiator_target_map': {
'1122334455667788': ['1234567890123']
}
}
}
or
{
'driver_volume_type': 'fibre_channel'
'data': {
'target_discovered': True,
'target_lun': 1,
'target_wwn': ['1234567890123', '0987654321321'],
'access_mode': 'rw'
'initiator_target_map': {
'1122334455667788': ['1234567890123',
'0987654321321']
}
}
}
"""
conn_info = self.cli.initialize_connection(volume,
connector)
LOG.debug("Exit initialize_connection"
" - Returning FC connection info: %(conn_info)s.",
{'conn_info': conn_info})
return conn_info
@zm_utils.RemoveFCZone
def terminate_connection(self, volume, connector, **kwargs):
"""Disallow connection from connector."""
conn_info = self.cli.terminate_connection(volume, connector)
LOG.debug("Exit terminate_connection"
" - Returning FC connection info: %(conn_info)s.",
{'conn_info': conn_info})
return conn_info
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If 'refresh' is True, run update the stats first.
"""
if refresh:
self.update_volume_stats()
return self._stats
def update_volume_stats(self):
"""Retrieve stats info from volume group."""
LOG.debug("Updating volume stats.")
data = self.cli.update_volume_stats()
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or 'EMCCLIFCDriver'
data['storage_protocol'] = 'FC'
self._stats = data
def manage_existing(self, volume, existing_ref):
"""Manage an existing lun in the array.
The lun should be in a manageable pool backend, otherwise
error would return.
Rename the backend storage object so that it matches the,
volume['name'] which is how drivers traditionally map between a
cinder volume and the associated backend storage object.
manage_existing_ref:{
'source-id':<lun id in VNX>
}
or
manage_existing_ref:{
'source-name':<lun name in VNX>
}
"""
return self.cli.manage_existing(volume, existing_ref)
def manage_existing_get_size(self, volume, existing_ref):
"""Return size of volume to be managed by manage_existing."""
return self.cli.manage_existing_get_size(volume, existing_ref)
def create_consistencygroup(self, context, group):
"""Creates a consistencygroup."""
return self.cli.create_consistencygroup(context, group)
def delete_consistencygroup(self, context, group, volumes):
"""Deletes a consistency group."""
return self.cli.delete_consistencygroup(
context, group, volumes)
def create_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Creates a cgsnapshot."""
return self.cli.create_cgsnapshot(
context, cgsnapshot, snapshots)
def delete_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Deletes a cgsnapshot."""
return self.cli.delete_cgsnapshot(
context, cgsnapshot, snapshots)
def get_pool(self, volume):
"""Returns the pool name of a volume."""
return self.cli.get_pool(volume)
def update_consistencygroup(self, context, group,
add_volumes,
remove_volumes):
"""Updates LUNs in consistency group."""
return self.cli.update_consistencygroup(context, group,
add_volumes,
remove_volumes)
def unmanage(self, volume):
"""Unmanages a volume."""
return self.cli.unmanage(volume)
def create_consistencygroup_from_src(self, context, group, volumes,
cgsnapshot=None, snapshots=None,
source_cg=None, source_vols=None):
"""Creates a consistency group from source."""
return self.cli.create_consistencygroup_from_src(context,
group,
volumes,
cgsnapshot,
snapshots,
source_cg,
source_vols)
def update_migrated_volume(self, context, volume, new_volume,
original_volume_status=None):
"""Returns model update for migrated volume."""
return self.cli.update_migrated_volume(context, volume, new_volume,
original_volume_status)
def create_export_snapshot(self, context, snapshot, connector):
"""Creates a snapshot mount point for snapshot."""
return self.cli.create_export_snapshot(context, snapshot, connector)
def remove_export_snapshot(self, context, snapshot):
"""Removes snapshot mount point for snapshot."""
return self.cli.remove_export_snapshot(context, snapshot)
def initialize_connection_snapshot(self, snapshot, connector, **kwargs):
"""Allows connection to snapshot."""
return self.cli.initialize_connection_snapshot(snapshot,
connector,
**kwargs)
def terminate_connection_snapshot(self, snapshot, connector, **kwargs):
"""Disallows connection to snapshot."""
return self.cli.terminate_connection_snapshot(snapshot,
connector,
**kwargs)
def backup_use_temp_snapshot(self):
return True
| apache-2.0 | 2,333,369,983,232,384,500 | 38.531353 | 79 | 0.570463 | false |
lmaurits/BEASTling | beastling/configuration.py | 1 | 35756 | import itertools
import random
from pathlib import Path
from configparser import ConfigParser
import newick
from csvw.dsv import reader
from appdirs import user_data_dir
from beastling.fileio.datareaders import iterlocations
import beastling.clocks.random as random_clock
import beastling.models.geo as geo
from beastling import sections
from beastling.util import log
from beastling.util import monophyly
from beastling.util.misc import retrieve_url
import beastling.treepriors.base as treepriors
from beastling.treepriors.coalescent import CoalescentTree
from beastling.distributions import Calibration
_BEAST_MAX_LENGTH = 2147483647
def get_glottolog_data(datatype, release):
"""
Lookup or download data from Glottolog.
:param datatype: 'newick'|'geo'
:param release: Glottolog release number >= '2.4'
:return: the path of the data file
"""
path_spec = {
'newick': ('glottolog-{0}.newick', 'tree-glottolog-newick.txt'),
'geo': ('glottolog-{0}-geo.csv', 'languages-and-dialects-geo.csv'),
}
fname_pattern, fname_source = path_spec[datatype]
fname = fname_pattern.format(release)
path = Path(__file__).parent / 'data' / fname
if not path.exists():
data_dir = Path(user_data_dir('beastling'))
if not data_dir.exists():
data_dir.mkdir(parents=True)
path = data_dir / fname
if not path.exists():
try:
retrieve_url(
'https://glottolog.org/static/download/{0}/{1}'.format(release, fname_source),
path)
except (IOError, ValueError):
raise ValueError(
'Could not retrieve %s data for Glottolog %s' % (datatype, release))
return path
class Configuration(object):
"""
A container object for all of the settings which define a BEASTling
analysis. Configuration objects are initialised with default values
for all options.
"""
def __init__(self, basename="beastling", configfile=None, stdin_data=False, prior=False, force_glottolog_load=False):
"""
Set all options to their default values and then, if a configuration
file has been provided, override the default values for those options
set in the file.
"""
cli_params = {k: v for k, v in locals().items()}
# Options set by the user, with default values
"""A dictionary whose keys are glottocodes or lowercase Glottolog clade names, and whose values are length-2 tuples of flatoing point dates (lower and upper bounds of 95% credible interval)."""
self.calibration_configs = {}
"""A list of `sections.Clock`s, each of which specifies the configuration for a single clock model."""
self.clocks = []
self.clocks_by_name = {}
"""An ordered dictionary whose keys are language group names and whose values are language group definitions."""
self.language_groups = {}
"""A dictionary giving names to arbitrary collections of tip languages."""
"""A list of dictionaries, each of which specifies the configuration for a single evolutionary model."""
self.models = []
self.stdin_data = stdin_data
"""A boolean value, controlling whether or not to read data from stdin as opposed to the file given in the config."""
# Glottolog data
self.glottolog_loaded = False
self.force_glottolog_load = force_glottolog_load
self.classifications = {}
self.glotto_macroareas = {}
self.locations = {}
# Options set from the command line interface
self.prior = prior
# Stuff we compute ourselves
self.processed = False
self._files_to_embed = []
# Now read the config ...
self.cfg = ConfigParser(interpolation=None)
self.cfg.optionxform = str
if configfile:
if isinstance(configfile, dict):
self.cfg.read_dict(configfile)
else:
if isinstance(configfile, str):
configfile = (configfile,)
self.cfg.read([str(c) for c in configfile])
# ... and process the sections:
# [geography]
if 'geography' in self.cfg.sections():
self.geography = sections.Geography.from_config(cli_params, 'geography', self.cfg)
else:
self.geography = None
# [calibration]
for clade, calibration in sections.Calibration.from_config(
{}, "calibration", self.cfg).options.items():
self.calibration_configs[clade] = calibration
# [model ...] and [clock ...]
for prefix, cfg_cls in [('clock', sections.Clock), ('model', sections.Model)]:
for section in [s for s in self.cfg.sections() if s.lower().startswith(prefix)]:
getattr(self, prefix + 's').append(
cfg_cls.from_config({}, section, self.cfg))
# Make sure analysis is non-empty
if not (self.models or self.geography):
raise ValueError("Config file contains no model sections and no geography section.")
# [geo_priors]
if self.cfg.has_section("geo_priors"):
if not self.geography:
raise ValueError("Config file contains geo_priors section but no geography section.")
for clade, klm in sections.GeoPriors.from_config(
{}, 'geo_priors', self.cfg).iterpriors():
if clade not in self.geography.sampling_points:
self.geography.sampling_points.append(clade)
self.geography.priors[clade] = klm
# [admin]
self.admin = sections.Admin.from_config(cli_params, 'admin', self.cfg)
# [mcmc]
self.mcmc = sections.MCMC.from_config(
cli_params, 'mcmc' if self.cfg.has_section('mcmc') else 'MCMC', self.cfg)
# [languages]
self.languages = sections.Languages.from_config(cli_params, 'languages', self.cfg)
# [language_groups]
self.language_group_configs = sections.LanguageGroups.from_config(
{}, 'language_groups', self.cfg).options
# If log_every was not explicitly set to some non-zero
# value, then set it such that we expect 10,000 log
# entries
if not self.admin.log_every:
# If chainlength < 10000, this results in log_every = zero.
# This causes BEAST to die.
# So in this case, just log everything.
self.admin.log_every = self.mcmc.chainlength // 10000 or 1
if self.geography \
and [p for p in self.geography.sampling_points if p.lower() != "root"] \
and self.languages.sample_topology and not self.languages.monophyly:
log.warning(
"Geographic sampling and/or prior specified for clades other than root, but tree "
"topology is being sampled without monophyly constraints. BEAST may crash.")
def process(self):
"""
Prepares a Configuration object for being passed to the BeastXml
constructor.
This method checks the values of all options for invalid or ambiguous
settings, internal consistency, etc. Information is read from
external files as required. If this method returns without raising
any exceptions then this should function as a guarantee that a
BeastXml object can be instantiated from this Configuration with no
problems.
"""
if self.processed:
log.warning('Configuration has already been processed')
return
# Add dependency notices if required
if self.languages.monophyly and not self.languages.starting_tree:
log.dependency("ConstrainedRandomTree", "BEASTLabs")
if self.mcmc.path_sampling:
log.dependency("Path sampling", "MODEL_SELECTION")
self.load_glottolog_data()
self.load_user_geo()
self.instantiate_models()
self.build_language_filter()
self.process_models()
self.build_language_list()
self.define_language_groups()
self.handle_monophyly()
self.instantiate_calibrations()
# At this point, we can tell whether or not the tree's length units
# can be treated as arbitrary
self.arbitrary_tree = self.languages.sample_branch_lengths and not self.calibrations
# We also know what kind of tree prior we need to have –
# instantiate_calibrations may have changed the type if tip
# calibrations exist.
self.treeprior = {
"uniform": treepriors.UniformTree,
"yule": treepriors.YuleTree,
"birthdeath": treepriors.BirthDeathTree,
"coalescent": CoalescentTree
}[self.languages.tree_prior]()
# Now we can set the value of the ascertained attribute of each model
# Ideally this would happen during process_models, but this is impossible
# as set_ascertained() relies upon the value of arbitrary_tree defined above,
# which itself depends on process_models(). Ugly...
for m in self.models:
m.set_ascertained()
self.instantiate_clocks()
self.link_clocks_to_models()
self.processed = True
# Decide whether or not to log trees
if (
self.languages.starting_tree and
not self.languages.sample_topology and
not self.languages.sample_branch_lengths and
all([c.is_strict for c in self.clocks if c.is_used])
):
self.tree_logging_pointless = True
log.info(
"Tree logging disabled because starting tree is known and fixed and all clocks "
"are strict.")
else:
self.tree_logging_pointless = False
def define_language_groups(self):
"""Parse the [language_groups] section.
Every individual language is a language group of size one. Additional
groups can be specified as comma-separated lists of already-defined
groups. (This does of course include comma-separated lists of
languages, but definitions can be nested.)
TODO: In the future, the [languages] section should gain a property
such that language groups can be specified using external sources.
"""
self.language_groups = {language: {language} for language in self.languages.languages}
self.language_groups["root"] = set(self.languages.languages)
for name, specification in self.language_group_configs.items():
taxa = set()
for already_defined in specification:
taxa |= set(self.language_group(already_defined.strip()))
self.language_groups[name] = taxa
def load_glottolog_data(self):
"""
Loads the Glottolog classification information from the appropriate
newick file, parses it and stores the required datastructure in
self.classification.
"""
# Don't load if the analysis doesn't use it
if not self.check_glottolog_required():
return
# Don't load if we already have - can this really happen?
if self.glottolog_loaded:
log.warning('Glottolog data has already been loaded')
return
self.glottolog_loaded = True
self.classifications, glottocode2node, label2name = monophyly.classifications_from_newick(
str(get_glottolog_data('newick', self.admin.glottolog_release)))
# Load geographic metadata
dialects = []
for t in reader(
get_glottolog_data('geo', self.admin.glottolog_release), dicts=True):
identifiers = [t['glottocode']] +t['isocodes'].split()
if t['level'] == "dialect":
dialects.append((t, identifiers))
if t['macroarea']:
for id_ in identifiers:
self.glotto_macroareas[id_] = t['macroarea']
if t['latitude'] and t['longitude']:
latlon = (float(t['latitude']), float(t['longitude']))
for id_ in identifiers:
self.locations[id_] = latlon
# Second pass of geographic data to handle dialects, which inherit
# their parent language's location
for t, identifiers in dialects:
failed = False
if t['glottocode'] not in glottocode2node: # pragma: no cover
# This may only happen for newick downloads of older Glottolog releases, where
# possibly isolates may not be included.
continue
node = glottocode2node[t['glottocode']]
ancestor = node.ancestor
while label2name[ancestor.name][1] not in self.locations:
if not ancestor.ancestor:
# We've hit the root without finding an ancestral node
# with location data!
failed = True
break
else:
ancestor = ancestor.ancestor
if failed:
continue
latlon = self.locations[label2name[ancestor.name][1]]
for id_ in identifiers:
self.locations[id_] = latlon
def check_glottolog_required(self):
# We need Glottolog if...
return (
# ...we've been given a list of families
self.languages.families
# ...we've been given a list of macroareas
or self.languages.macroareas
# ...we're using monophyly constraints
or self.languages.monophyly
# ...we're using calibrations (well, sometimes)
or self.calibration_configs
# ...we're using geography
or self.geography
# ...we've been forced to by greater powers (like the CLI)
or self.force_glottolog_load
)
def load_user_geo(self):
if self.geography:
# Read location data from file, patching (rather than replacing) Glottolog
for loc_file in self.geography.data:
self.locations.update(dict(iterlocations(loc_file)))
def build_language_filter(self):
"""
Examines the values of various options, including self.languages.languages and
self.languages.families, and constructs self.lang_filter.
self.lang_filter is a Set object containing all ISO and glotto codes
which are compatible with the provided settings (e.g. belong to the
requested families). This set is later used as a mask with data sets.
Datapoints with language identifiers not in this set will not be used
in an analysis.
"""
# Load requirements
if len(self.languages.families) == 1:
log.warning("value of 'families' has length 1: have you misspelled a filename?")
# Enforce minimum data constraint
all_langs = set(itertools.chain(*[model.data.keys() for model in self.models]))
N = sum([max([len(lang.keys()) for lang in model.data.values()]) for model in self.models])
datapoint_props = {}
for lang in all_langs:
count = 0
for model in self.models:
count += len([x for x in model.data[lang].values() if x])
datapoint_props[lang] = 1.0*count / N
self.sparse_languages = [
l for l in all_langs if datapoint_props[l] < self.languages.minimum_data]
@property
def files_to_embed(self):
res = set(fname for fname in self._files_to_embed)
for section in [self.admin, self.mcmc, self.languages]:
res = res.union(section.files_to_embed)
return res
def filter_language(self, l):
if self.languages.languages and l not in self.languages.languages:
return False
if self.languages.families and not any(
name in self.languages.families or glottocode in self.languages.families
for (name, glottocode) in self.classifications.get(l,[])):
return False
if self.languages.macroareas and self.glotto_macroareas.get(l,None) not in self.languages.macroareas:
return False
if self.languages.exclusions and l in self.languages.exclusions:
return False
if l in self.sparse_languages:
return False
return True
def handle_monophyly(self):
"""
Construct a representation of the Glottolog monophyly constraints
for the languages in self.languages.languages. If the constraints are
meaningful, create and store a Newick tree representation of
them. If the constraints are not meaningful, e.g. all
languages are classified identically by Glottolog, then override
the monophyly=True setting.
"""
if (not self.languages.monophyly) or self.languages.monophyly_newick:
return
if len(self.languages.languages) < 3:
# Monophyly constraints are meaningless for so few languages
self.languages.monophyly = False
log.info(
"Disabling Glottolog monophyly constraints because there are only %d languages in "
"analysis." % len(self.languages.languages))
return
# Build a list-based representation of the Glottolog monophyly constraints
# This can be done in either a "top-down" or "bottom-up" way.
langs = [l for l in self.languages.languages if l.lower() in self.classifications]
if len(langs) != len(self.languages.languages):
# Warn the user that some taxa aren't in Glottolog and hence will be
# forced into an outgroup.
missing_langs = [l for l in self.languages.languages if l not in langs]
missing_langs.sort()
missing_str = ",".join(missing_langs[0:3])
missing_count = len(missing_langs)
if missing_count > 3:
missing_str += ",..."
log.warning(
"%d languages could not be found in Glottolog (%s). Monophyly constraints will "
"force them into an outgroup." % (missing_count, missing_str))
if self.languages.monophyly_end_depth is not None:
# A power user has explicitly provided start and end depths
start = self.languages.monophyly_start_depth
end = self.languages.monophyly_end_depth
elif self.languages.monophyly_direction == "top_down":
# Compute start and end in a top-down fashion
start = self.languages.monophyly_start_depth
end = start + self.languages.monophyly_levels
elif self.languages.monophyly_direction == "bottom_up":
# Compute start and end in a bottom-up fashion
classifications = [self.classifications[name.lower()] for name in langs]
end = max([len(c) for c in classifications]) - self.languages.monophyly_start_depth
start = max(0, end - self.languages.monophyly_levels)
struct = monophyly.make_structure(self.classifications, langs, depth=start, maxdepth=end)
# Make sure this struct is not pointlessly flat
if not monophyly.check_structure(struct):
self.languages.monophyly = False
log.info(
"Disabling Glottolog monophyly constraints because all languages in the analysis "
"are classified identically.")
# At this point everything looks good, so keep monophyly on and serialise the "monophyly structure" into a Newick tree.
self.languages.monophyly_newick = monophyly.make_newick(struct)
def instantiate_clocks(self):
"""
Populates self.clocks with a list of BaseClock subclasses, one for each
dictionary of settings in self.clock_configs.
"""
self.clocks = [clock.get_clock(self) for clock in self.clocks]
self.clocks_by_name = {clock.name: clock for clock in self.clocks}
if "default" not in self.clocks_by_name:
clock = sections.Clock(cli_params={}, name='clock default').get_clock(self)
self.clocks.append(clock)
self.clocks_by_name[clock.name] = clock
def instantiate_models(self):
"""
Populates self.models with a list of BaseModel subclasses, one for each
dictionary of settings in self.model_configs.
"""
# Handle request to read data from stdin
if self.stdin_data:
for config in self.models:
config["data"] = "stdin"
self.models = [model.get_model(self) for model in self.models]
if self.geography:
self.geo_model = geo.GeoModel(self.geography, self)
self.all_models = [self.geo_model] + self.models
else:
self.all_models = self.models
def process_models(self):
for model in self.models:
model.process()
def link_clocks_to_models(self):
"""
Ensures that for each model object in self.models, the attribute
"clock" is a reference to one of the clock objects in self.clocks.
Also determine which clock to estimate the mean of.
"""
for model in self.all_models:
if model.clock:
# User has explicitly specified a clock
if model.clock not in self.clocks_by_name:
raise ValueError("Unknown clock '%s' for model section '%s'." % (model.clock, model.name))
model.clock = self.clocks_by_name[model.clock]
elif model.name in self.clocks_by_name:
# Clock is associated by a common name
model.clock = self.clocks_by_name[model.name]
else:
# No clock specification - use default
model.clock = self.clocks_by_name["default"]
model.clock.is_used = True
# Disable pruned trees in models using RLCs
for model in self.models:
if model.pruned and isinstance(model.clock, random_clock.RandomLocalClock):
model.pruned = False
log.info(
"Disabling pruned trees because associated clock %s is a "
"RandomLocalClock. Pruned trees are currently only compatible with "
"StrictClocks and RelaxedClocks." % model.clock.name,
model=model)
# Warn user about unused clock(s) (but not the default clock)
for clock in self.clocks:
if clock.name != "default" and not clock.is_used:
log.info(
"Clock %s is not being used. Change its name to \"default\", or explicitly "
"associate it with a model." % clock.name)
# Remove unused clocks from the master clock list
self.clocks = [c for c in self.clocks if c.is_used]
# Get a list of model (i.e. non-geo) clocks for which the user has not
# indicated a preference on whether the mean should be estimated
free_clocks = list(set([m.clock for m in self.models
if m.clock.is_used
and m.clock.estimate_rate == None]))
if free_clocks:
# To begin with, estimate all free clocks
for clock in free_clocks:
clock.estimate_rate = True
# But if the tree is arbitrary, then fix one free clock, unless the
# user has fixed an un-free clock
if self.arbitrary_tree and all(
[m.clock.estimate_rate for m in self.models]):
free_clocks[0].estimate_rate = False
log.info(
"Clock \"%s\" has had it's mean rate fixed to 1.0. Tree branch lengths are in "
"units of expected substitutions for features in models using this "
"clock." % free_clocks[0].name)
# Determine whether or not precision-scaling is required
if self.geography:
self.geo_model.scale_precision = False
geo_clock = self.geo_model.clock
for m in self.models:
if m.clock == geo_clock:
log.warning(
"Geography model is sharing a clock with one or more data models. This may lead to a bad fit.")
self.geo_model.scale_precision = True
break
# If geo has it's own clock, estimate the mean
if not self.geo_model.scale_precision:
self.geo_model.clock.estimate_rate = True
def build_language_list(self):
"""
Combines the language sets of each model's data set, according to the
value of self.languages.overlap, to construct a final list of all the languages
in the analysis.
"""
if self.models:
self.languages.languages = set(self.models[0].data.keys())
else:
# There are no models
# So this must be a geography-only analysis
# Start with all languages in Glottolog, then apply filters
self.languages.languages = [l for l in self.classifications if self.filter_language(l)]
self.overlap_warning = False
for model in self.models:
addition = set(model.data.keys())
# If we're about to do a non-trivial union/intersect, alert the
# user.
if addition != self.languages.languages and not self.overlap_warning:
log.info(
"Not all data files have equal language sets. BEASTling will use the %s of all "
"language sets. Set the \"overlap\" option in [languages] to change "
"this." % self.languages.overlap)
self.overlap_warning = True
self.languages.languages = getattr(set, self.languages.overlap)(
self.languages.languages, addition)
## Make sure there's *something* left
if not self.languages.languages:
raise ValueError("No languages specified!")
## Convert back into a sorted list
self.languages.languages = sorted(self.languages.languages)
## Perform subsampling, if requested
self.languages.languages = sorted(self.subsample_languages(self.languages.languages))
log.info("{:d} languages included in analysis: {:}".format(
len(self.languages.languages), self.languages.languages))
## SPREAD THE WORD!
for m in self.models:
m.languages = [l for l in m.languages if l in self.languages.languages]
self.languages.sanitise_trees()
def subsample_languages(self, languages):
"""
Return a random subsample of languages with a specified size
"""
if not self.languages.subsample_size:
return languages
if self.languages.subsample_size > len(languages):
log.info(
"Requested subsample size is %d, but only %d languages to work with! Disabling "
"subsampling." % (self.languages.subsample_size, len(languages)))
return languages
# Seed PRNG with sorted language names
# Python will convert to an integer hash
# This means we always take the same subsample for a particular
# initial language set.
log.info("Subsampling %d languages down to %d." % (
len(languages), self.languages.subsample_size))
random.seed(",".join(sorted(languages)))
return random.sample(languages, self.languages.subsample_size)
def language_group(self, clade):
"""Look up a language group locally or as a glottolog clade."""
if clade not in self.language_groups:
self.language_groups[clade] = self.get_languages_by_glottolog_clade(clade)
if not self.language_groups[clade]:
raise ValueError(
"Language group or Glottolog clade {:} not found "
"or was empty for the languages given.".format(clade))
return self.language_groups[clade]
def instantiate_calibrations(self):
self.calibrations = {}
""" Calibration distributions for calibrated clades """
self.tip_calibrations = {}
""" Starting heights for calibrated tips """
useless_calibrations = []
for clade, cs in self.calibration_configs.items():
orig_clade = clade[:]
originate = False
is_tip_calibration = False
# Parse the clade identifier
# First check for originate()
if clade.lower().startswith("originate(") and clade.endswith(")"):
originate = True
clade = clade[10:-1]
# The clade is specified as a language_group, either
# explicitly defined or the builtin "root" or a Glottolog
# identifier
langs = self.language_group(clade)
if langs == self.language_groups["root"] and originate:
raise ValueError("Root has no ancestor, but originate(root) was given a calibration.")
# Figure out what kind of calibration this is and whether it's valid
if len(langs) > 1:
## Calibrations on multiple taxa are always valid
pass
elif not langs: # pragma: no cover
# Calibrations on zero taxa are never valid, so abort
# and skip to the next cal. This should never happen,
# because empty calibrations can only be specified by
# empty language groups, which should be caught before
# this.
log.info("Calibration on clade '%s' ignored as no matching languages in analysis." % clade)
continue
# At this point we know that len(langs) == 1, so that condition is
# implicit in the conditions for all the branches below
elif originate:
## Originate calibrations on single taxa are always valid
pass
elif "," not in clade and clade in self.languages.languages:
## This looks like a tip calibration, i.e. the user has specified
## only one identifier, not a comma-separated list, and that
## identifier matches a language, not a Glottolog family that we
## happen to only have one language for
log.info("Calibration on '%s' taken as tip age calibration." % clade)
is_tip_calibration = True
self.languages.tree_prior = "coalescent"
else: # pragma: no cover
# At this point we have a non-originate calibration on
# a single taxa, which is not the result of
# specifically asking for only this taxa. Probably the
# user did not expect to get here. They might want
# this to be an originate cal, or a tip cal, but we
# can't tell with what we know and shouldn't
# guess. Abort and skip to the next cal. This should
# never happen, because empty calibrations can only be
# specified by empty language groups, which should be
# caught before this.
log.info(
"Calibration on clade '%s' matches only one language. Ignoring due to "
"ambiguity. Use 'originate(%s)' if this was supposed to be an originate "
"calibration, or explicitly identify the single language using '%s' if this "
"was supposed to be a tip calibration." % (clade, clade, langs[0]))
continue
# Make sure this calibration point, which will induce a monophyly
# constraint, does not conflict with the overall monophyly
# constraints from Glottolog or a user-tree
if self.languages.monophyly and len(langs) > 1:
mono_tree = newick.loads(self.languages.monophyly_newick)[0]
cal_clade = set(langs)
for node in mono_tree.walk():
mono_clade = set(node.get_leaf_names())
# If the calibration clade is not a subset of this monophyly clade, keep searching
if not cal_clade.issubset(mono_clade):
continue
# At this point, we can take it for granted the cal clade is a subset of the mono_clade
# We are happy if the calibration clade is exactly this monophyly clade
if mono_clade == cal_clade:
break
# We are also happy if this mono_clade is a "terminal clade", i.e. has no finer structure
# which the calibration clade may violate
elif all((child.is_leaf for child in node.descendants)):
break
# We are also happy if the calibration clade is a union of descendant mono clades
elif all(set(child.get_leaf_names()).issubset(cal_clade) or len(set(child.get_leaf_names()).intersection(cal_clade)) == 0 for child in node.descendants):
break
else:
# If we didn't break out of this loop, then the languages
# in this calibration do not constitute a clade of the
# monophyly tree
raise ValueError("Calibration on for clade %s violates a monophyly constraint!" % (clade))
# Next parse the calibration string and build a Calibration object
cal_obj = Calibration.from_string(
string=cs,
context="calibration of clade {:}".format(orig_clade),
is_point=is_tip_calibration,
langs=langs,
originate=originate)
# Choose a name
if originate:
clade_identifier = "%s_originate" % clade
elif is_tip_calibration:
clade_identifier = "%s_tip" % clade
else:
clade_identifier = clade
# Store the Calibration object under the chosen name
if is_tip_calibration:
self.tip_calibrations[clade_identifier] = cal_obj
else:
self.calibrations[clade_identifier] = cal_obj
def get_languages_by_glottolog_clade(self, clade):
"""
Given a comma-separated list of Glottolog ids, return a list of all
languages descended from the corresponding Glottolog nodes.
"""
clades = set(c.strip() for c in clade.split(","))
# First look for clades which are actually language identifiers
langs = matched_clades = clades.intersection(self.languages.languages)
# Once a clade has matched against a language name, don't let it
# subsequently match against anything in Glottolog!
clades = clades - matched_clades
if clades:
# Now search against Glottolog
clades = [c.lower() for c in clades]
for l in self.languages.languages:
# No repeated matching!
if l not in langs:
for name, glottocode in self.classifications.get(l.lower(), ""):
if name.lower() in clades or glottocode in clades:
langs.add(l)
break
return list(langs)
| bsd-2-clause | -4,280,757,378,011,558,400 | 45.074742 | 201 | 0.601052 | false |
3dfxmadscientist/odoo-infrastructure | addons/infrastructure/environment_step.py | 1 | 1539 | # -*- coding: utf-8 -*-
##############################################################################
#
# Infrastructure
# Copyright (C) 2014 Ingenieria ADHOC
# No email
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import re
from openerp import netsvc
from openerp.osv import osv, fields
class environment_step(osv.osv):
""""""
_name = 'infrastructure.environment_step'
_description = 'environment_step'
_columns = {
'name': fields.char(string='Name', required=True),
'environment_version_id': fields.many2one('infrastructure.environment_version', string='Environment Version', ondelete='cascade', required=True),
}
_defaults = {
}
_constraints = [
]
environment_step()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 237,234,166,110,134,460 | 29.176471 | 154 | 0.621832 | false |
girving/tensorflow | tensorflow/python/kernel_tests/rnn_test.py | 1 | 44455 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for rnn module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import timeit
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib import rnn as contrib_rnn
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import keras
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops as ops_lib
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.keras import testing_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables as variables_lib
import tensorflow.python.ops.data_flow_grad # pylint: disable=unused-import
from tensorflow.python.ops.losses import losses
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
import tensorflow.python.ops.sparse_grad # pylint: disable=unused-import
import tensorflow.python.ops.tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.training import saver
from tensorflow.python.training import training
class Plus1RNNCell(rnn_cell_impl.RNNCell):
"""RNN Cell generating (output, new_state) = (input + 1, state + 1)."""
@property
def output_size(self):
return 5
@property
def state_size(self):
return 5
def call(self, input_, state, scope=None):
return (input_ + 1, state + 1)
class ScalarStateRNNCell(rnn_cell_impl.RNNCell):
"""RNN Cell generating (output, new_state) = (input + 1, state + 1)."""
@property
def output_size(self):
return 1
@property
def state_size(self):
return tensor_shape.TensorShape([])
def zero_state(self, batch_size, dtype):
return array_ops.zeros([], dtype=dtypes.int32)
def call(self, input_, state, scope=None):
return (input_, state + 1)
class UnbalancedOutputRNNCell(rnn_cell_impl.RNNCell):
"""RNN Cell generating (output, new_state) = (input + 1, state + 1)."""
@property
def output_size(self):
return tensor_shape.TensorShape(1), tensor_shape.TensorShape((2))
@property
def state_size(self):
return tensor_shape.TensorShape([])
def zero_state(self, batch_size, dtype):
return array_ops.zeros([], dtype=dtypes.int32)
def call(self, input_, state, scope=None):
concatenated = array_ops.concat((input_, input_), axis=-1)
return (input_, concatenated), state + 1
class TensorArrayStateRNNCell(rnn_cell_impl.RNNCell):
"""RNN Cell its state as a TensorArray."""
@property
def output_size(self):
return 1
@property
def state_size(self):
return (tensor_shape.TensorShape([]), ())
def zero_state(self, batch_size, dtype):
return (array_ops.zeros([], dtype=dtypes.int32),
tensor_array_ops.TensorArray(
dtype=dtype, size=0, dynamic_size=True))
def call(self, input_, state, scope=None):
new_array = state[1].write(state[0], input_)
return (input_, (state[0] + 1, new_array))
class RNNTest(test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
@test_util.run_in_graph_and_eager_modes
def testInvalidSequenceLengthShape(self):
cell = Plus1RNNCell()
if context.executing_eagerly():
inputs = [constant_op.constant(np.ones((3, 4)))]
else:
inputs = [array_ops.placeholder(dtypes.float32, shape=(3, 4))]
with self.assertRaisesRegexp(ValueError, "must be a vector"):
rnn.dynamic_rnn(
cell,
array_ops.stack(inputs),
dtype=dtypes.float32,
sequence_length=[[4]])
@test_util.run_in_graph_and_eager_modes
def testBatchSizeFromInput(self):
cell = Plus1RNNCell()
in_eager_mode = context.executing_eagerly()
# With static batch size
if in_eager_mode:
inputs = np.zeros((3, 4, 5), dtype=np.float32)
initial_state = np.zeros((3, 5), dtype=np.float32)
else:
inputs = array_ops.placeholder(dtypes.float32, shape=(3, 4, 5))
initial_state = array_ops.placeholder(dtypes.float32, shape=(3, 5))
# - Without initial_state
outputs, state = rnn.dynamic_rnn(cell, inputs, dtype=dtypes.float32)
self.assertEqual(3, outputs.shape[0])
self.assertEqual(3, state.shape[0])
# - With initial_state
outputs, state = rnn.dynamic_rnn(
cell, inputs, initial_state=initial_state)
self.assertEqual(3, outputs.shape[0])
self.assertEqual(3, state.shape[0])
# Without static batch size
# Tensor shapes are fully determined with eager execution enabled,
# so only run this test for graph construction.
if not in_eager_mode:
inputs = array_ops.placeholder(dtypes.float32, shape=(None, 4, 5))
# - Without initial_state
outputs, state = rnn.dynamic_rnn(cell, inputs, dtype=dtypes.float32)
self.assertEqual(None, outputs.shape[0].value)
self.assertEqual(None, state.shape[0].value)
# - With initial_state
outputs, state = rnn.dynamic_rnn(
cell,
inputs,
initial_state=array_ops.placeholder(dtypes.float32, shape=(None, 5)))
self.assertEqual(None, outputs.shape[0].value)
self.assertEqual(None, state.shape[0].value)
@test_util.run_in_graph_and_eager_modes
def testScalarStateIsAccepted(self):
cell = ScalarStateRNNCell()
in_eager_mode = context.executing_eagerly()
if in_eager_mode:
inputs = np.array([[[1], [2], [3], [4]]], dtype=np.float32)
else:
inputs = array_ops.placeholder(dtypes.float32, shape=(1, 4, 1))
with self.cached_session(use_gpu=True) as sess:
outputs, state = rnn.dynamic_rnn(
cell, inputs, dtype=dtypes.float32, sequence_length=[4])
if not in_eager_mode:
outputs, state = sess.run(
[outputs, state], feed_dict={inputs: [[[1], [2], [3], [4]]]})
self.assertAllEqual([[[1], [2], [3], [4]]], outputs)
self.assertAllEqual(4, state)
@test_util.run_in_graph_and_eager_modes
def testUnbalancedOutputIsAccepted(self):
cell = UnbalancedOutputRNNCell()
in_eager_mode = context.executing_eagerly()
if in_eager_mode:
inputs = np.array([[[1], [2], [3], [4]]], dtype=np.float32)
else:
inputs = array_ops.placeholder(dtypes.float32, shape=(1, 4, 1))
with self.cached_session(use_gpu=True) as sess:
outputs, state = rnn.dynamic_rnn(
cell, inputs, dtype=dtypes.float32, sequence_length=[4])
if not in_eager_mode:
outputs, state = sess.run(
[outputs, state], feed_dict={inputs: [[[1], [2], [3], [4]]]})
self.assertIsInstance(outputs, tuple)
self.assertAllEqual([[[1], [2], [3], [4]]], outputs[0])
self.assertAllEqual([[[1, 1], [2, 2], [3, 3], [4, 4]]], outputs[1])
self.assertAllEqual(4, state)
@test_util.assert_no_new_pyobjects_executing_eagerly
def testEagerMemory(self):
with context.eager_mode():
cell = TensorArrayStateRNNCell()
inputs = np.array([[[1], [2], [3], [4]]], dtype=np.float32)
rnn.dynamic_rnn(cell, inputs, dtype=dtypes.float32, sequence_length=[4])
@test_util.run_in_graph_and_eager_modes
def testTensorArrayStateIsAccepted(self):
cell = TensorArrayStateRNNCell()
in_eager_mode = context.executing_eagerly()
if in_eager_mode:
inputs = np.array([[[1], [2], [3], [4]]], dtype=np.float32)
else:
inputs = array_ops.placeholder(dtypes.float32, shape=(1, 4, 1))
with self.cached_session(use_gpu=True) as sess:
outputs, state = rnn.dynamic_rnn(
cell, inputs, dtype=dtypes.float32, sequence_length=[4])
state = (state[0], state[1].stack())
if not in_eager_mode:
outputs, state = sess.run(
[outputs, state], feed_dict={
inputs: [[[1], [2], [3], [4]]]
})
self.assertAllEqual([[[1], [2], [3], [4]]], outputs)
self.assertAllEqual(4, state[0])
self.assertAllEqual([[[1]], [[2]], [[3]], [[4]]], state[1])
def testCellGetInitialState(self):
cell = rnn_cell_impl.BasicRNNCell(5)
with self.assertRaisesRegexp(
ValueError, "batch_size and dtype cannot be None"):
cell.get_initial_state(None, None, None)
inputs = array_ops.placeholder(dtypes.float32, shape=(None, 4, 1))
with self.assertRaisesRegexp(
ValueError, "batch size from input tensor is different from"):
cell.get_initial_state(inputs=inputs, batch_size=50, dtype=None)
with self.assertRaisesRegexp(
ValueError, "batch size from input tensor is different from"):
cell.get_initial_state(
inputs=inputs, batch_size=constant_op.constant(50), dtype=None)
with self.assertRaisesRegexp(
ValueError, "dtype from input tensor is different from"):
cell.get_initial_state(inputs=inputs, batch_size=None, dtype=dtypes.int16)
initial_state = cell.get_initial_state(
inputs=inputs, batch_size=None, dtype=None)
self.assertEqual(initial_state.shape.as_list(), [None, 5])
self.assertEqual(initial_state.dtype, inputs.dtype)
batch = array_ops.shape(inputs)[0]
dtype = inputs.dtype
initial_state = cell.get_initial_state(None, batch, dtype)
self.assertEqual(initial_state.shape.as_list(), [None, 5])
self.assertEqual(initial_state.dtype, inputs.dtype)
def _assert_cell_builds(self, cell_class, dtype, batch_size, in_size,
out_size):
cell = cell_class(out_size, dtype=dtype)
in_shape = tensor_shape.TensorShape((batch_size, in_size))
cell.build(in_shape)
state_output = cell.get_initial_state(
inputs=None, batch_size=batch_size, dtype=dtype)
cell_output, _ = cell(array_ops.zeros(in_shape, dtype), state_output)
self.assertAllEqual([batch_size, out_size], cell_output.shape.as_list())
@test_util.run_in_graph_and_eager_modes
def testCellsBuild(self):
f32 = dtypes.float32
f64 = dtypes.float64
self._assert_cell_builds(rnn_cell_impl.BasicRNNCell, f32, 5, 7, 3)
self._assert_cell_builds(rnn_cell_impl.BasicRNNCell, f64, 5, 7, 3)
self._assert_cell_builds(rnn_cell_impl.BasicLSTMCell, f32, 5, 7, 3)
self._assert_cell_builds(rnn_cell_impl.BasicLSTMCell, f64, 5, 7, 3)
self._assert_cell_builds(rnn_cell_impl.GRUCell, f32, 5, 7, 3)
self._assert_cell_builds(rnn_cell_impl.GRUCell, f64, 5, 7, 3)
self._assert_cell_builds(rnn_cell_impl.LSTMCell, f32, 5, 7, 3)
self._assert_cell_builds(rnn_cell_impl.LSTMCell, f64, 5, 7, 3)
self._assert_cell_builds(contrib_rnn.IndRNNCell, f32, 5, 7, 3)
self._assert_cell_builds(contrib_rnn.IndRNNCell, f64, 5, 7, 3)
self._assert_cell_builds(contrib_rnn.IndyGRUCell, f32, 5, 7, 3)
self._assert_cell_builds(contrib_rnn.IndyGRUCell, f64, 5, 7, 3)
self._assert_cell_builds(contrib_rnn.IndyLSTMCell, f32, 5, 7, 3)
self._assert_cell_builds(contrib_rnn.IndyLSTMCell, f64, 5, 7, 3)
def testRNNWithKerasSimpleRNNCell(self):
with self.cached_session() as sess:
input_shape = 10
output_shape = 5
timestep = 4
batch = 100
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=batch,
test_samples=0,
input_shape=(timestep, input_shape),
num_classes=output_shape)
y_train = keras.utils.to_categorical(y_train)
cell = keras.layers.SimpleRNNCell(output_shape)
inputs = array_ops.placeholder(
dtypes.float32, shape=(None, timestep, input_shape))
predict = array_ops.placeholder(
dtypes.float32, shape=(None, output_shape))
outputs, state = rnn.dynamic_rnn(
cell, inputs, dtype=dtypes.float32)
self.assertEqual(outputs.shape.as_list(), [None, timestep, output_shape])
self.assertEqual(state.shape.as_list(), [None, output_shape])
loss = losses.softmax_cross_entropy(predict, state)
train_op = training.GradientDescentOptimizer(0.001).minimize(loss)
sess.run([variables_lib.global_variables_initializer()])
_, outputs, state = sess.run(
[train_op, outputs, state], {inputs: x_train, predict: y_train})
self.assertEqual(len(outputs), batch)
self.assertEqual(len(state), batch)
def testRNNWithKerasGRUCell(self):
with self.cached_session() as sess:
input_shape = 10
output_shape = 5
timestep = 4
batch = 100
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=batch,
test_samples=0,
input_shape=(timestep, input_shape),
num_classes=output_shape)
y_train = keras.utils.to_categorical(y_train)
cell = keras.layers.GRUCell(output_shape)
inputs = array_ops.placeholder(
dtypes.float32, shape=(None, timestep, input_shape))
predict = array_ops.placeholder(
dtypes.float32, shape=(None, output_shape))
outputs, state = rnn.dynamic_rnn(
cell, inputs, dtype=dtypes.float32)
self.assertEqual(outputs.shape.as_list(), [None, timestep, output_shape])
self.assertEqual(state.shape.as_list(), [None, output_shape])
loss = losses.softmax_cross_entropy(predict, state)
train_op = training.GradientDescentOptimizer(0.001).minimize(loss)
sess.run([variables_lib.global_variables_initializer()])
_, outputs, state = sess.run(
[train_op, outputs, state], {inputs: x_train, predict: y_train})
self.assertEqual(len(outputs), batch)
self.assertEqual(len(state), batch)
def testRNNWithKerasLSTMCell(self):
with self.cached_session() as sess:
input_shape = 10
output_shape = 5
timestep = 4
batch = 100
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=batch,
test_samples=0,
input_shape=(timestep, input_shape),
num_classes=output_shape)
y_train = keras.utils.to_categorical(y_train)
cell = keras.layers.LSTMCell(output_shape)
inputs = array_ops.placeholder(
dtypes.float32, shape=(None, timestep, input_shape))
predict = array_ops.placeholder(
dtypes.float32, shape=(None, output_shape))
outputs, state = rnn.dynamic_rnn(
cell, inputs, dtype=dtypes.float32)
self.assertEqual(outputs.shape.as_list(), [None, timestep, output_shape])
self.assertEqual(len(state), 2)
self.assertEqual(state[0].shape.as_list(), [None, output_shape])
self.assertEqual(state[1].shape.as_list(), [None, output_shape])
loss = losses.softmax_cross_entropy(predict, state[0])
train_op = training.GradientDescentOptimizer(0.001).minimize(loss)
sess.run([variables_lib.global_variables_initializer()])
_, outputs, state = sess.run(
[train_op, outputs, state], {inputs: x_train, predict: y_train})
self.assertEqual(len(outputs), batch)
self.assertEqual(len(state), 2)
self.assertEqual(len(state[0]), batch)
self.assertEqual(len(state[1]), batch)
def testRNNWithStackKerasCell(self):
with self.cached_session() as sess:
input_shape = 10
output_shape = 5
timestep = 4
batch = 100
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=batch,
test_samples=0,
input_shape=(timestep, input_shape),
num_classes=output_shape)
y_train = keras.utils.to_categorical(y_train)
cell = keras.layers.StackedRNNCells(
[keras.layers.LSTMCell(2 * output_shape),
keras.layers.LSTMCell(output_shape)])
inputs = array_ops.placeholder(
dtypes.float32, shape=(None, timestep, input_shape))
predict = array_ops.placeholder(
dtypes.float32, shape=(None, output_shape))
outputs, state = rnn.dynamic_rnn(
cell, inputs, dtype=dtypes.float32)
self.assertEqual(outputs.shape.as_list(), [None, timestep, output_shape])
self.assertEqual(len(state), 4)
self.assertEqual(state[0].shape.as_list(), [None, 2 * output_shape])
self.assertEqual(state[1].shape.as_list(), [None, 2 * output_shape])
self.assertEqual(state[2].shape.as_list(), [None, output_shape])
self.assertEqual(state[3].shape.as_list(), [None, output_shape])
loss = losses.softmax_cross_entropy(predict, state[2])
train_op = training.GradientDescentOptimizer(0.001).minimize(loss)
sess.run([variables_lib.global_variables_initializer()])
_, outputs, state = sess.run(
[train_op, outputs, state], {inputs: x_train, predict: y_train})
self.assertEqual(len(outputs), batch)
self.assertEqual(len(state), 4)
for s in state:
self.assertEqual(len(s), batch)
def testStaticRNNWithKerasSimpleRNNCell(self):
with self.cached_session() as sess:
input_shape = 10
output_shape = 5
timestep = 4
batch = 100
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=batch,
test_samples=0,
input_shape=(timestep, input_shape),
num_classes=output_shape)
x_train = np.transpose(x_train, (1, 0, 2))
y_train = keras.utils.to_categorical(y_train)
cell = keras.layers.SimpleRNNCell(output_shape)
inputs = [array_ops.placeholder(
dtypes.float32, shape=(None, input_shape))] * timestep
predict = array_ops.placeholder(
dtypes.float32, shape=(None, output_shape))
outputs, state = rnn.static_rnn(
cell, inputs, dtype=dtypes.float32)
self.assertEqual(len(outputs), timestep)
self.assertEqual(outputs[0].shape.as_list(), [None, output_shape])
self.assertEqual(state.shape.as_list(), [None, output_shape])
loss = losses.softmax_cross_entropy(predict, state)
train_op = training.GradientDescentOptimizer(0.001).minimize(loss)
sess.run([variables_lib.global_variables_initializer()])
feed_dict = {i: d for i, d in zip(inputs, x_train)}
feed_dict[predict] = y_train
_, outputs, state = sess.run(
[train_op, outputs, state], feed_dict)
self.assertEqual(len(outputs), timestep)
self.assertEqual(len(outputs[0]), batch)
self.assertEqual(len(state), batch)
def testKerasAndTFRNNLayerOutputComparison(self):
input_shape = 10
output_shape = 5
timestep = 4
batch = 20
(x_train, _), _ = testing_utils.get_test_data(
train_samples=batch,
test_samples=0,
input_shape=(timestep, input_shape),
num_classes=output_shape)
fix_weights_generator = keras.layers.SimpleRNNCell(output_shape)
fix_weights_generator.build((None, input_shape))
weights = fix_weights_generator.get_weights()
with self.session(graph=ops_lib.Graph()) as sess:
inputs = array_ops.placeholder(
dtypes.float32, shape=(None, timestep, input_shape))
cell = keras.layers.SimpleRNNCell(output_shape)
tf_out, tf_state = rnn.dynamic_rnn(
cell, inputs, dtype=dtypes.float32)
cell.set_weights(weights)
[tf_out, tf_state] = sess.run([tf_out, tf_state], {inputs: x_train})
with self.session(graph=ops_lib.Graph()) as sess:
k_input = keras.Input(shape=(timestep, input_shape),
dtype=dtypes.float32)
cell = keras.layers.SimpleRNNCell(output_shape)
layer = keras.layers.RNN(cell, return_sequences=True, return_state=True)
keras_out = layer(k_input)
cell.set_weights(weights)
k_out, k_state = sess.run(keras_out, {k_input: x_train})
self.assertAllClose(tf_out, k_out)
self.assertAllClose(tf_state, k_state)
def testSimpleRNNCellAndBasicRNNCellComparison(self):
input_shape = 10
output_shape = 5
timestep = 4
batch = 20
(x_train, _), _ = testing_utils.get_test_data(
train_samples=batch,
test_samples=0,
input_shape=(timestep, input_shape),
num_classes=output_shape)
fix_weights_generator = keras.layers.SimpleRNNCell(output_shape)
fix_weights_generator.build((None, input_shape))
# The SimpleRNNCell contains 3 weights: kernel, recurrent_kernel, and bias
# The BasicRNNCell contains 2 weight: kernel and bias, where kernel is
# zipped [kernel, recurrent_kernel] in SimpleRNNCell.
keras_weights = fix_weights_generator.get_weights()
kernel, recurrent_kernel, bias = keras_weights
tf_weights = [np.concatenate((kernel, recurrent_kernel)), bias]
with self.test_session(graph=ops_lib.Graph()) as sess:
inputs = array_ops.placeholder(
dtypes.float32, shape=(None, timestep, input_shape))
cell = keras.layers.SimpleRNNCell(output_shape)
k_out, k_state = rnn.dynamic_rnn(
cell, inputs, dtype=dtypes.float32)
cell.set_weights(keras_weights)
[k_out, k_state] = sess.run([k_out, k_state], {inputs: x_train})
with self.test_session(graph=ops_lib.Graph()) as sess:
inputs = array_ops.placeholder(
dtypes.float32, shape=(None, timestep, input_shape))
cell = rnn_cell_impl.BasicRNNCell(output_shape)
tf_out, tf_state = rnn.dynamic_rnn(
cell, inputs, dtype=dtypes.float32)
cell.set_weights(tf_weights)
[tf_out, tf_state] = sess.run([tf_out, tf_state], {inputs: x_train})
self.assertAllClose(tf_out, k_out, atol=1e-5)
self.assertAllClose(tf_state, k_state, atol=1e-5)
def testBasicLSTMCellInterchangeWithLSTMCell(self):
with self.session(graph=ops_lib.Graph()) as sess:
basic_cell = rnn_cell_impl.BasicLSTMCell(1)
basic_cell(array_ops.ones([1, 1]),
state=basic_cell.get_initial_state(inputs=None,
batch_size=1,
dtype=dtypes.float32))
self.evaluate([v.initializer for v in basic_cell.variables])
self.evaluate(basic_cell._bias.assign([10.] * 4))
save = saver.Saver()
prefix = os.path.join(self.get_temp_dir(), "ckpt")
save_path = save.save(sess, prefix)
with self.session(graph=ops_lib.Graph()) as sess:
lstm_cell = rnn_cell_impl.LSTMCell(1, name="basic_lstm_cell")
lstm_cell(array_ops.ones([1, 1]),
state=lstm_cell.get_initial_state(inputs=None,
batch_size=1,
dtype=dtypes.float32))
self.evaluate([v.initializer for v in lstm_cell.variables])
save = saver.Saver()
save.restore(sess, save_path)
self.assertAllEqual([10.] * 4, self.evaluate(lstm_cell._bias))
def testRNNCellSerialization(self):
for cell in [
rnn_cell_impl.LSTMCell(32, use_peepholes=True, cell_clip=True),
rnn_cell_impl.BasicLSTMCell(32, dtype=dtypes.float32),
rnn_cell_impl.BasicRNNCell(32, activation="relu", dtype=dtypes.float32),
rnn_cell_impl.GRUCell(
32, kernel_initializer="ones", dtype=dtypes.float32)
]:
with self.cached_session():
x = keras.Input((None, 5))
layer = keras.layers.RNN(cell)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(optimizer="rmsprop", loss="mse")
# Test basic case serialization.
x_np = np.random.random((6, 5, 5))
y_np = model.predict(x_np)
weights = model.get_weights()
config = layer.get_config()
# The custom_objects is important here since rnn_cell_impl is
# not visible as a Keras layer, and also has a name conflict with
# keras.LSTMCell and GRUCell.
layer = keras.layers.RNN.from_config(
config,
custom_objects={
"BasicRNNCell": rnn_cell_impl.BasicRNNCell,
"GRUCell": rnn_cell_impl.GRUCell,
"LSTMCell": rnn_cell_impl.LSTMCell,
"BasicLSTMCell": rnn_cell_impl.BasicLSTMCell
})
y = layer(x)
model = keras.models.Model(x, y)
model.set_weights(weights)
y_np_2 = model.predict(x_np)
self.assertAllClose(y_np, y_np_2, atol=1e-4)
######### Benchmarking RNN code
def _static_vs_dynamic_rnn_benchmark_static(inputs_list_t, sequence_length):
(_, input_size) = inputs_list_t[0].get_shape().as_list()
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = contrib_rnn.LSTMCell(
num_units=input_size,
use_peepholes=True,
initializer=initializer,
state_is_tuple=False)
outputs, final_state = contrib_rnn.static_rnn(
cell,
inputs_list_t,
sequence_length=sequence_length,
dtype=dtypes.float32)
trainable_variables = ops_lib.get_collection(
ops_lib.GraphKeys.TRAINABLE_VARIABLES)
gradients = gradients_impl.gradients(outputs + [final_state],
trainable_variables)
return control_flow_ops.group(final_state, *(gradients + outputs))
def _static_vs_dynamic_rnn_benchmark_dynamic(inputs_t, sequence_length):
(unused_0, unused_1, input_size) = inputs_t.get_shape().as_list()
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = contrib_rnn.LSTMCell(
num_units=input_size,
use_peepholes=True,
initializer=initializer,
state_is_tuple=False)
outputs, final_state = rnn.dynamic_rnn(
cell, inputs_t, sequence_length=sequence_length, dtype=dtypes.float32)
trainable_variables = ops_lib.get_collection(
ops_lib.GraphKeys.TRAINABLE_VARIABLES)
gradients = gradients_impl.gradients([outputs, final_state],
trainable_variables)
return control_flow_ops.group(final_state, outputs, *gradients)
def graph_creation_static_vs_dynamic_rnn_benchmark(max_time):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# These parameters don't matter
batch_size = 512
num_units = 512
# Set up sequence lengths
np.random.seed([127])
sequence_length = np.random.randint(0, max_time, size=batch_size)
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(max_time)
]
inputs = np.dstack(inputs_list).transpose([0, 2, 1]) # batch x time x depth
def _create_static_rnn():
with session.Session(config=config, graph=ops_lib.Graph()):
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
_static_vs_dynamic_rnn_benchmark_static(inputs_list_t, sequence_length)
def _create_dynamic_rnn():
with session.Session(config=config, graph=ops_lib.Graph()):
inputs_t = variables_lib.Variable(inputs, trainable=False).value()
_static_vs_dynamic_rnn_benchmark_dynamic(inputs_t, sequence_length)
delta_static = timeit.timeit(_create_static_rnn, number=5)
delta_dynamic = timeit.timeit(_create_dynamic_rnn, number=5)
print("%d \t %f \t %f \t %f" %
(max_time, delta_static, delta_dynamic, delta_dynamic / delta_static))
return delta_static, delta_dynamic
def _timer(sess, ops):
# Warm in
for _ in range(2):
sess.run(ops)
# Timing run
runs = 20
start = time.time()
for _ in range(runs):
sess.run(ops)
end = time.time()
return (end - start) / float(runs)
def static_vs_dynamic_rnn_benchmark(batch_size, max_time, num_units, use_gpu):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
np.random.seed([127])
sequence_length = np.random.randint(0, max_time, size=batch_size)
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(max_time)
]
inputs = np.dstack(inputs_list).transpose([0, 2, 1]) # batch x time x depth
# Using rnn()
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
ops = _static_vs_dynamic_rnn_benchmark_static(inputs_list_t,
sequence_length)
variables_lib.global_variables_initializer().run()
delta_static = _timer(sess, ops)
# Using dynamic_rnn()
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_t = variables_lib.Variable(inputs, trainable=False).value()
ops = _static_vs_dynamic_rnn_benchmark_dynamic(inputs_t, sequence_length)
variables_lib.global_variables_initializer().run()
delta_dynamic = _timer(sess, ops)
print("%d \t %d \t %d \t %s \t %f \t %f \t %f" %
(batch_size, max_time, num_units, use_gpu, delta_static, delta_dynamic,
delta_dynamic / delta_static))
return delta_static, delta_dynamic
def _half_seq_len_vs_unroll_half_rnn_benchmark(inputs_list_t, sequence_length):
(_, input_size) = inputs_list_t[0].get_shape().as_list()
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = contrib_rnn.LSTMCell(
num_units=input_size,
use_peepholes=True,
initializer=initializer,
state_is_tuple=False)
outputs, final_state = contrib_rnn.static_rnn(
cell,
inputs_list_t,
sequence_length=sequence_length,
dtype=dtypes.float32)
trainable_variables = ops_lib.get_collection(
ops_lib.GraphKeys.TRAINABLE_VARIABLES)
gradients = gradients_impl.gradients(outputs + [final_state],
trainable_variables)
return control_flow_ops.group(final_state, *(gradients + outputs))
def half_seq_len_vs_unroll_half_rnn_benchmark(batch_size, max_time, num_units,
use_gpu):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
np.random.seed([127])
sequence_length = max_time * np.ones((batch_size,))
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(max_time)
]
# Halve the sequence length, full static unroll
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
ops = _half_seq_len_vs_unroll_half_rnn_benchmark(inputs_list_t,
sequence_length / 2)
variables_lib.global_variables_initializer().run()
delta_half_seq_len = _timer(sess, ops)
# Halve the unroll size, don't use sequence length
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
ops = _half_seq_len_vs_unroll_half_rnn_benchmark(
inputs_list_t[:(max_time // 2)], sequence_length / 2)
variables_lib.global_variables_initializer().run()
delta_unroll_half = _timer(sess, ops)
print("%d \t %d \t\t %d \t %s \t %f \t\t %f \t\t %f" %
(batch_size, max_time, num_units, use_gpu, delta_half_seq_len,
delta_unroll_half, delta_half_seq_len / delta_unroll_half))
return delta_half_seq_len, delta_unroll_half
def _concat_state_vs_tuple_state_rnn_benchmark(inputs_list_t, sequence_length,
state_is_tuple):
(_, input_size) = inputs_list_t[0].get_shape().as_list()
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = contrib_rnn.LSTMCell(
num_units=input_size,
use_peepholes=True,
initializer=initializer,
state_is_tuple=state_is_tuple)
outputs, final_state = contrib_rnn.static_rnn(
cell,
inputs_list_t,
sequence_length=sequence_length,
dtype=dtypes.float32)
final_state = list(final_state) if state_is_tuple else [final_state]
trainable_variables = ops_lib.get_collection(
ops_lib.GraphKeys.TRAINABLE_VARIABLES)
gradients = gradients_impl.gradients(outputs + final_state,
trainable_variables)
return control_flow_ops.group(*(final_state + gradients + outputs))
def concat_state_vs_tuple_state_rnn_benchmark(batch_size, max_time, num_units,
use_gpu):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
np.random.seed([127])
sequence_length = max_time * np.ones((batch_size,))
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(max_time)
]
# Run with concatenated states (default)
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
ops = _concat_state_vs_tuple_state_rnn_benchmark(
inputs_list_t, sequence_length, state_is_tuple=False)
variables_lib.global_variables_initializer().run()
delta_concat_state = _timer(sess, ops)
# Run with tuple states (new)
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
ops = _concat_state_vs_tuple_state_rnn_benchmark(
inputs_list_t, sequence_length, state_is_tuple=True)
variables_lib.global_variables_initializer().run()
delta_tuple_state = _timer(sess, ops)
print("%d \t %d \t %d \t %s \t %f \t\t %f \t\t %f" %
(batch_size, max_time, num_units, use_gpu, delta_concat_state,
delta_tuple_state, delta_concat_state / delta_tuple_state))
return delta_concat_state, delta_tuple_state
def _dynamic_rnn_swap_memory_benchmark(inputs_t, sequence_length, swap_memory):
(unused_0, unused_1, input_size) = inputs_t.get_shape().as_list()
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = contrib_rnn.LSTMCell(
num_units=input_size,
use_peepholes=True,
initializer=initializer,
state_is_tuple=False)
outputs, final_state = rnn.dynamic_rnn(
cell,
inputs_t,
sequence_length=sequence_length,
swap_memory=swap_memory,
dtype=dtypes.float32)
trainable_variables = ops_lib.get_collection(
ops_lib.GraphKeys.TRAINABLE_VARIABLES)
gradients = gradients_impl.gradients([outputs, final_state],
trainable_variables)
return control_flow_ops.group(final_state, outputs, *gradients)
def dynamic_rnn_swap_memory_benchmark(batch_size, max_time, num_units):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
np.random.seed([127])
sequence_length = np.random.randint(0, max_time, size=batch_size)
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(max_time)
]
inputs = np.dstack(inputs_list).transpose([0, 2, 1]) # batch x time x depth
# No memory swap
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
inputs_t = variables_lib.Variable(inputs, trainable=False).value()
ops = _dynamic_rnn_swap_memory_benchmark(
inputs_t, sequence_length, swap_memory=False)
variables_lib.global_variables_initializer().run()
no_swap = _timer(sess, ops)
# Memory swap
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
inputs_t = variables_lib.Variable(inputs, trainable=False).value()
ops = _dynamic_rnn_swap_memory_benchmark(
inputs_t, sequence_length, swap_memory=True)
variables_lib.global_variables_initializer().run()
swap = _timer(sess, ops)
print("%d \t %d \t %d \t %f \t %f \t %f" %
(batch_size, max_time, num_units, no_swap, swap, swap / no_swap))
return no_swap, swap
def rnn_long_sequence_benchmark(batch_size, seqlen, num_units, dynamic,
swap_memory, nn):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
np.random.seed([127])
sequence_length = [seqlen for _ in range(batch_size)]
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(seqlen)
]
inputs = np.dstack(inputs_list).transpose([0, 2, 1]) # batch x time x depth
for _ in range(nn):
if dynamic:
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
inputs_t = variables_lib.Variable(inputs, trainable=False).value()
ops = _dynamic_rnn_swap_memory_benchmark(
inputs_t, sequence_length, swap_memory=swap_memory)
variables_lib.global_variables_initializer().run()
elapsed = _timer(sess, ops)
else:
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
ops = _static_vs_dynamic_rnn_benchmark_static(inputs_list_t,
sequence_length)
variables_lib.global_variables_initializer().run()
elapsed = _timer(sess, ops)
print("%d \t %d \t %d \t %s \t %f \t %f" % (batch_size, seqlen, num_units,
dynamic, elapsed,
elapsed / seqlen))
class BenchmarkRNN(test.Benchmark):
def benchmarkGraphCreationStaticVsDynamicLSTM(self):
print("Graph Creation: Static Unroll vs. Dynamic Unroll LSTM")
print("max_t \t dt(static) \t dt(dynamic) \t dt(dynamic)/dt(static)")
for max_time in (1, 25, 50):
s_dt, d_dt = graph_creation_static_vs_dynamic_rnn_benchmark(max_time)
self.report_benchmark(
name="graph_creation_time_static_T%02d" % max_time,
iters=5,
wall_time=s_dt)
self.report_benchmark(
name="graph_creation_time_dynamic_T%02d" % max_time,
iters=5,
wall_time=d_dt)
def benchmarkStaticUnrollVsDynamicFlowLSTM(self):
print("Calculation: Static Unroll with Dynamic Flow LSTM "
"vs. Dynamic Unroll LSTM")
print("batch \t max_t \t units \t gpu \t dt(static) \t dt(dynamic) "
"\t dt(dynamic)/dt(static)")
for batch_size in (256,):
for max_time in (50,):
for num_units in (512, 256, 128):
for use_gpu in (False, True):
s_dt, d_dt = static_vs_dynamic_rnn_benchmark(batch_size, max_time,
num_units, use_gpu)
self.report_benchmark(
name="static_unroll_time_T%02d_B%03d_N%03d_gpu_%s" %
(max_time, batch_size, num_units, use_gpu),
iters=20,
wall_time=s_dt)
self.report_benchmark(
name="dynamic_unroll_time_T%02d_B%03d_N%03d_gpu_%s" %
(max_time, batch_size, num_units, use_gpu),
iters=20,
wall_time=d_dt)
def benchmarkDynamicLSTMNoMemorySwapVsMemorySwap(self):
print("Calculation: Dynamic LSTM No Memory Swap vs. Memory Swap")
print("batch \t max_t \t units \t no_swap \t swap \t swap/no_swap")
for batch_size in (256, 512):
for max_time in (100,):
for num_units in (512, 256, 128):
no_swap, swap = dynamic_rnn_swap_memory_benchmark(batch_size,
max_time, num_units)
self.report_benchmark(
name="dynamic_lstm_no_memory_swap_T%02d_B%03d_N%03d" %
(max_time, batch_size, num_units),
iters=20,
wall_time=no_swap)
self.report_benchmark(
name="dynamic_lstm_with_memory_swap_T%02d_B%03d_N%03d" %
(max_time, batch_size, num_units),
iters=20,
wall_time=swap)
def benchmarkStaticUnrollHalfSequenceLengthVsHalfUnroll(self):
print("Calculation: Static Unroll with Halved Sequence Length "
"vs. Half Static Unroll")
print("batch \t full_t \t units \t gpu \t dt(half_seq_len) "
"\t dt(unroll_half) \t dt(half_seq_len)/dt(unroll_half)")
for batch_size in (128,):
for max_time in (50,):
for num_units in (256,):
for use_gpu in (False, True):
s_dt, d_dt = half_seq_len_vs_unroll_half_rnn_benchmark(batch_size,
max_time,
num_units,
use_gpu)
self.report_benchmark(
name="half_seq_len_time_T%02d_B%03d_N%03d_gpu_%s" %
(max_time, batch_size, num_units, use_gpu),
iters=20,
wall_time=s_dt)
self.report_benchmark(
name="unroll_half_time_T%02d_B%03d_N%03d_gpu_%s" %
(max_time, batch_size, num_units, use_gpu),
iters=20,
wall_time=d_dt)
def benchmarkStaticUnrollStateConcatVsStateTuple(self):
print("Calculation: Static Unroll with Concatenated State "
"vs. Tuple State")
print("batch \t time \t units \t gpu \t dt(concat_state) "
"\t dt(tuple_state) \t dt(concat_state)/dt(tuple_state)")
for batch_size in (
16,
128,):
for max_time in (50,):
for num_units in (
16,
128,):
for use_gpu in (False, True):
c_dt, t_dt = concat_state_vs_tuple_state_rnn_benchmark(batch_size,
max_time,
num_units,
use_gpu)
self.report_benchmark(
name="concat_state_time_T%02d_B%03d_N%03d_gpu_%s" %
(max_time, batch_size, num_units, use_gpu),
iters=20,
wall_time=c_dt)
self.report_benchmark(
name="tuple_state_time_T%02d_B%03d_N%03d_gpu_%s" %
(max_time, batch_size, num_units, use_gpu),
iters=20,
wall_time=t_dt)
def _benchmarkDynamicLSTMMemorySwapLongSeq(self):
"""The memory swapping test for the SOSP submission."""
print("Calculation: Long LSTM Sequence")
print("batch \t len \t units \t dynamic \t elapsed_t \t elapsed_t/len")
batch_size = 512
seqlen = 800
num_units = 512
dynamic = True
swap_memory = True
# Some warming up.
if swap_memory:
rnn_long_sequence_benchmark(batch_size, seqlen, num_units,
dynamic, swap_memory, 2)
# Measure the performance.
for slen in xrange(100, 1100, 100):
rnn_long_sequence_benchmark(batch_size, slen, num_units, dynamic,
swap_memory, 3)
if __name__ == "__main__":
test.main()
| apache-2.0 | 920,542,612,437,591,800 | 38.340708 | 80 | 0.6303 | false |
michaelpantic/tolScreenCleaner | parse.py | 1 | 1448 | import csv
import re
import parseH
import parseL
import outputAdapterTable
import outputAdapterODMax
from os import listdir
from os.path import isfile, join
indexFolder = "../tolScreenCleanerData/INDEX"
platesFolder = "../tolScreenCleanerData/PLATES"
outputFolder = "../tolScreenCleanerData/OUTPUT"
# Get available index files
indexFilesH = [ join(indexFolder,f) for f in listdir(indexFolder) if isfile(join(indexFolder,f)) and re.match("H.+.txt",f)]
indexFilesL = [ join(indexFolder,f) for f in listdir(indexFolder) if isfile(join(indexFolder,f)) and re.match("L\d+_STRAIN.txt",f)]
# Parse all Indexes
plates =[];
for fileL in indexFilesL:
plates.append(parseL.parseIndex(fileL));
for fileH in indexFilesH:
plates.append(parseH.parseIndex(fileH));
print('Found ' + str(len(plates)) + ' different plates:')
for plate in plates:
print ("\t",plate)
outputInfoTable = outputAdapterTable.createOutputFile(outputFolder);
outputInfoODMax = outputAdapterODMax.createOutputFile(outputFolder);
# go trough found Plates
for plate in plates:
if plate.MachineType=='H':
parseH.parsePlateData(plate, platesFolder)
elif plate.MachineType=='L':
parseL.parsePlateData(plate, platesFolder)
else:
raise NameError("Unknown plate type")
outputAdapterTable.outputPlateData(plate,outputInfoTable);
outputAdapterODMax.outputPlateData(plate,outputInfoODMax);
outputAdapterTable.finish(outputInfoTable)
outputAdapterODMax.finish(outputInfoODMax)
| gpl-2.0 | 781,799,530,293,800,100 | 28.55102 | 131 | 0.785221 | false |
shashankg7/pynet | tests/test_tree.py | 1 | 1404 | from pynet.net import tree
from lxml import etree
import unittest
#TODO create multiple trees and their answer.
s1 = """<root>
<b>
<d></d>
<e></e>
</b>
<c>
<f></f>
</c>
<b>
<e></e>
<d></d>
</b>
<c>
<g>
<h></h>
<i></i>
<j></j>
</g>
</c>
</root>
"""
s2 = """<root>
<b>
<d></d>
<e></e>
</b>
<c>
<g>
<h></h>
</g>
<f></f>
</c>
</root>
"""
ans12 = ["root", "b", "d", "e", "c", "f", "g", "h"]
# Space between only open-closed tag. #readability
s3 = """<c><b><a> </a></b></c>"""
s4 = """<c><a> </a><b> </b></c> """
ans34 = ["c", "a"]
ans13 = []
ans14 = []
class TestTreeMatch(unittest.TestCase):
def test_tree_match(self):
tree1 = etree.XML(s1)
tree2 = etree.XML(s2)
t = tree()
ans = ["root", "b", "d", "e", "c", "f", "g", "h"]
ret = []
self.assertEquals(7, t.tree_match(tree1, tree2, ret))
for tags, a in zip(ret, ans):
self.assertEqual( tags[0].tag, a)
if __name__ == "__main__":
unittest.main()
| mit | -1,266,251,149,165,280,800 | 21.285714 | 61 | 0.321937 | false |
ewheeler/rapidsms-timelines | timelines/handlers/base.py | 1 | 1997 | from __future__ import unicode_literals
import re
from django.utils.translation import ugettext_lazy as _
from django.utils import formats
from rapidsms.contrib.handlers.handlers.keyword import KeywordHandler
class AppointmentHandler(KeywordHandler):
"Base keyword handler for the APPT prefix."
prefix = 'APPT|REPORT|REP'
form = None
success_text = ''
@classmethod
def _keyword(cls):
if hasattr(cls, "keyword"):
pattern = r"^\s*(?:%s)\s*(?:%s)(?:[\s,;:]+(.+))?$"\
% (cls.prefix, cls.keyword)
else:
pattern = r"^\s*(?:%s)\s*?$" % cls.prefix
return re.compile(pattern, re.IGNORECASE)
def handle(self, text):
"Parse text, validate data, and respond."
parsed = self.parse_message(text)
form = self.form(data=parsed, connection=self.msg.connection)
if form.is_valid():
params = form.save()
if 'date' in params:
params['date'] = formats.date_format(params['date'],
'SHORT_DATE_FORMAT')
self.respond(self.success_text % params)
else:
error = form.error()
if error is None:
self.unknown()
else:
self.respond(error)
return True
def help(self):
"Return help mesage."
if self.help_text:
keyword = self.keyword.split('|')[0].upper()
help_text = self.help_text % {'prefix': self.prefix,
'keyword': keyword}
self.respond(help_text)
def unknown(self):
"Common fallback for unknown errors."
keyword = self.keyword.split('|')[0].upper()
params = {'prefix': self.prefix, 'keyword': keyword}
self.respond(_('Sorry, we cannot understand that message. '
'For additional help send: %(prefix)s %(keyword)s')
% params)
| bsd-3-clause | 5,640,505,439,613,818,000 | 32.283333 | 74 | 0.536805 | false |
cjforman/pele | pele/landscape/connect_min.py | 1 | 22807 | import logging
import numpy as np
import networkx as nx
from pele.landscape import TSGraph, LocalConnect
from pele.landscape._distance_graph import _DistanceGraph
__all__ = ["DoubleEndedConnect"]
logger = logging.getLogger("pele.connect")
class DoubleEndedConnect(object):
"""
Find a connected network of minima and transition states between min1 and min2
Parameters
----------
min1, min2 : Mimumum() objects
the two minima to try to connect
pot : potential object
the potential
mindist : callable
the function which returns the optimized minimum distance between
two structures
database : pele Database object
Used to store the new minima and transition states found.
niter : int, optional
maximum number of iterations
verbosity : int
this controls how many status messages are printed. (not really
implemented yet)
merge_minima : bool
if True, minima for which NEB finds no transition state candidates
between them will be merged
max_dist_merge : float
merging minima will be aborted if the distance between them is greater
than max_dist_merge
local_connect_params : dict
parameters passed to the local connect algorithm. This includes all
NEB and all transition state search parameters, along with, e.g.
now many times to retry a local connect run. See documentation for
LocalConnect for details.
fresh_connect : bool
if true, ignore all existing minima and transition states in the
database and try to find a new path
longest_first : bool
if true, always try to connect the longest segment in the path guess
first
conf_checks : list of callables
a list of callable function that determine if a configuration is valid.
They must return a bool, and accept the keyword parameters
conf_check(energy=energy, coords=coords)
If any configuration in a minimum-transition_state-minimum triplet fails
a test then the whole triplet is rejected.
Notes
-----
The algorithm is iterative, with each iteration composed of
While min1 and min2 are not connected:
1) choose a pair of known minima to try to connect
2) use NEB to get a guess for the transition states between them
3) refine the transition states to desired accuracy
4) fall off either side of the transition states to find the two
minima associated with that candidate
5) add the transition states and associated minima to the known
network
Of the above, steps 1 and 2 and 3 are the most involved. 2, 3, 4 are
wrapped into a separate class called LocalConnect. See this class and
the NEB and FindTransitionState classes for detailed descriptions of
these procedures.
An important note is that the NEB is used only to get a *guess* for the
transition state. Thus we only want to put enough time and energy into
the NEB routine to get the guess close enough that FindTransitionState
can refine it to the correct transition state. FindTransitionState is
very fast if the initial guess is good, but can be very slow otherwise.
Choose a pair:
Here I will describe step 1), the algorithm to find a pair of known
minima to try to connect. This choice will keep in mind that the
ultimate goal is to connect min1 and min2.
In addition to the input parameter "graph", we keep a second graph
"Gdist" (now wrapped in a separate class _DistanceGraph) which also has
minima as the vertices. Gdist has an edge between every pair of nodes.
The edge weight between vertices u and v
is
if u and v are connected by transition states:
weight(u, v) = 0.
elif we have already tried local_connect on (u,v):
weight(u, v) = Infinity
else:
weight(u, v) = dist(u, v)**2
This edge weight is set to Infinity to ensure we don't repeat
LocalConnect runs over and over
again. The minimum weight path between min1 and min2 in Gdist gives a
good guess for the best way to try connect min1 and min2. So the
algorithm to find a pair of know minima (trial1, trial2) to try to
connect is
path = Gdist.minimum_weight_path(min1, min2)
trial1, trial2 = minima pair in path with lowest nonzero edge weight. (note:
if parameter longest_first is True) then the edgepair with the largest
edge weight will be selected)
todo:
allow user to pass graph
See Also
--------
LocalConnect : the core algorithm of this routine
"""
def __init__(self, min1, min2, pot, mindist, database,
verbosity=1,
merge_minima=False,
max_dist_merge=0.1, local_connect_params=None,
fresh_connect=False, longest_first=True,
niter=200, conf_checks=None
):
self.minstart = min1
assert min1.id() == min1, "minima must compare equal with their id %d %s %s" % (
min1.id(), str(min1), str(min1.__hash__()))
self.minend = min2
self.pot = pot
self.mindist = mindist
self.pairsNEB = dict()
self.longest_first = longest_first
self.niter = niter
if conf_checks is None:
self.conf_checks = []
else:
self.conf_checks = conf_checks
self.verbosity = int(verbosity)
if local_connect_params is None:
local_connect_params = dict()
self.local_connect_params = dict([("verbosity", verbosity)] + local_connect_params.items())
self.database = database
self.fresh_connect = fresh_connect
if self.fresh_connect:
self.graph = TSGraph(self.database, minima=[self.minstart, self.minend], no_edges=True)
else:
self.graph = TSGraph(self.database)
self.merge_minima = merge_minima
self.max_dist_merge = float(max_dist_merge)
self.dist_graph = _DistanceGraph(self.database, self.graph, self.mindist, self.verbosity)
# check if a connection exists before initializing distance graph
if self.graph.areConnected(self.minstart, self.minend):
logger.info("minima are already connected. not initializing distance graph")
return
self.dist_graph.initialize(self.minstart, self.minend)
if self.verbosity > 0:
logger.info("************************************************************")
logger.info("starting a double ended connect run between")
logger.info(" minimum 1: id %d energy %f" % (self.minstart.id(), self.minstart.energy))
logger.info(" minimum 2: id %d energy %f" % (self.minend.id(), self.minend.energy))
logger.info(" dist %f" % self.getDist(self.minstart, self.minend))
logger.info("************************************************************")
def mergeMinima(self, min1, min2):
"""merge two minimum objects
This will delete min2 and make everything that
pointed to min2 point to min1.
"""
# prefer to delete the minima with the large id. this potentially will be easier
if min2.id() < min1.id():
min1, min2 = min2, min1
debug = False
dist = self.getDist(min1, min2)
logger.info("merging minima %s %s %s %s %s", min1.id(), min2.id(), dist, "E1-E2", min1.energy - min2.energy)
# deal with the case where min1 and/or min2 are the same as minstart and/or minend
# make sure the one that is deleted (min2) is not minstart or minend
if ((min1 == self.minstart and min2 == self.minend) or
(min2 == self.minstart and min1 == self.minend)):
logger.error("ERROR: trying to merge the start and end minima. aborting")
return
if min2 == self.minstart or min2 == self.minend:
min1, min2 = min2, min1
if dist > self.max_dist_merge:
logger.info(" minima merge aborted. distance is too large %s", dist)
return
# merge minima in transition state graph
self.graph.mergeMinima(min1, min2)
# merge minima in the database also
self.database.mergeMinima(min1, min2)
if debug:
# testing
if min2 in self.graph.graph.nodes():
logger.error("error, min2 is still in self.graph.graph")
logger.debug("self.graph.graph. nnodes %s", self.graph.graph.number_of_nodes())
# merge minima in distance graph
self.dist_graph.mergeMinima(min1, min2)
def getDist(self, min1, min2):
"""
get the distance between min1 and min2.
"""
return self.dist_graph.getDist(min1, min2)
def _addTransitionState(self, ts_ret, min_ret1, min_ret2):
"""
add a transition state to the database, the transition state graph and
the distance graph
"""
# if isinstance(min_ret1, tuple): # for compatability with old and new quenchers
# min_ret1 = min_ret1[4]
# if isinstance(min_ret2, tuple): # for compatability with old and new quenchers
# min_ret2 = min_ret2[4]
# if isinstance(min1_ret)
# sanity check for the energies
me1, me2 = min_ret1.energy, min_ret2.energy
if ts_ret.energy < me1 or ts_ret.energy < me2:
logger.warning("trying to add a transition state that has energy lower than its minima.")
logger.warning(" TS energy %s %s %s %s", ts_ret.energy, "minima energy", me1, me2)
logger.warning(" aborting")
return False
# check the minima and transition states are valid configurations.
# if any fail, then don't add anything.
configs_ok = True
for ret in [min_ret1, min_ret2, ts_ret]:
for check in self.conf_checks:
if not check(energy=ret.energy, coords=ret.coords):
configs_ok = False
break
if not configs_ok:
break
if not configs_ok:
return False
# Add the minima to the database
min1 = self.database.addMinimum(min_ret1.energy, min_ret1.coords)
min2 = self.database.addMinimum(min_ret2.energy, min_ret2.coords)
# Add the minima to the transition state graph.
self.graph.addMinimum(min1)
self.graph.addMinimum(min2)
if min1 == min2:
logger.warning("stepping off the transition state resulted in twice the same minima %s", min1.id())
return False
logger.info("adding transition state %s %s", min1.id(), min2.id())
# add the transition state to the database
ts = self.database.addTransitionState(ts_ret.energy, ts_ret.coords, min1, min2,
eigenvec=ts_ret.eigenvec, eigenval=ts_ret.eigenval)
# update the transition state graph
self.graph.addTransitionState(ts)
# self.graph.refresh()
# update the distance graph
self.dist_graph.addMinimum(min1)
self.dist_graph.addMinimum(min2)
self.dist_graph.setTransitionStateConnection(min1, min2)
if self.verbosity > 1:
# print some information
dse = self.getDist(self.minend, self.minstart)
msid = self.minstart.id()
meid = self.minend.id()
m1id = min1.id()
m2id = min2.id()
if min1 != self.minstart and min1 != self.minend:
ds = self.getDist(min1, self.minstart)
de = self.getDist(min1, self.minend)
if ds < dse > de:
triangle = ""
else:
triangle = ": new minima not in between start and end"
logger.info(" distances: %4d -> %4d = %f %4d -> %4d = %f %4d -> %4d = %f %s" %
(msid, m1id, ds, m1id, meid, de, m1id, m2id, dse, triangle))
if min2 != self.minstart and min2 != self.minend:
ds = self.getDist(min2, self.minstart)
de = self.getDist(min2, self.minend)
# if ds < dse > de:
# triangle = ""
# else:
# triangle = ": new minima not in between start and end"
logger.info(" distances: %4d -> %4d = %f %4d -> %4d = %f %4d -> %4d = %f" %
(msid, m2id, ds, m2id, meid, de, m2id, m2id, dse))
return True
def _getLocalConnectObject(self):
return LocalConnect(self.pot, self.mindist, **self.local_connect_params)
def _localConnect(self, min1, min2):
"""
do a local connect run between min1 and min2
Notes
-----
1) NEB to find transition state candidates.
for each transition state candidate:
2) refine the transition state candidates
3) if successful, fall off either side of the transition state
to find the minima the transition state connects. Add the new
transition state and minima to the graph
"""
# Make sure we haven't already tried this pair and
# record some data so we don't try it again in the future
if (min1, min2) in self.pairsNEB:
logger.warning("WARNING: redoing NEB for minima %s %s", min1.id(), min2.id())
logger.warning(" aborting NEB")
# self._remove_edgeGdist(min1, min2)
self.dist_graph.removeEdge(min1, min2)
return True
self.pairsNEB[(min1, min2)] = True
self.pairsNEB[(min2, min1)] = True
# Make sure they're not already connected. sanity test
if self.graph.areConnected(min1, min2):
logger.warning("in _local_connect, but minima are already connected. aborting %s %s %s", min1.id(), min2.id(),
self.getDist(min1, min2))
self.dist_graph.setTransitionStateConnection(min1, min2)
self.dist_graph.checkGraph()
return True
# do local connect run
local_connect = self._getLocalConnectObject()
res = local_connect.connect(min1, min2)
# now add each new transition state to the graph and database.
nsuccess = 0
for tsret, m1ret, m2ret in res.new_transition_states:
goodts = self._addTransitionState(tsret, m1ret, m2ret)
if goodts:
nsuccess += 1
# check results
if nsuccess == 0:
dist = self.getDist(min1, min2)
if dist < self.max_dist_merge:
logger.warning("local connect failed and the minima are close. Are the minima really the same?")
logger.warning(" energies: %s %s %s %s", min1.energy, min2.energy, "distance", dist)
if self.merge_minima:
self.mergeMinima(min1, min2)
else:
logger.warning(" set merge_minima=True to merge the minima")
return False
# remove this edge from Gdist so we don't try this pair again again
self.dist_graph.removeEdge(min1, min2)
return nsuccess > 0
def _getNextPair(self):
"""
return a pair of minima to attempt to connect
Notes
-----
this is the function which attempts to find a clever pair of minima to try to
connect with the ultimate goal of connecting minstart and minend
this method can be described as follows:
make a new graph Gnew which is complete (all vetices connected). The edges
have a weight given by
if self.graph.areConnected(u,v):
weight(u,v) = 0.
else:
weight(u,v) = mindist(u,v)
if an NEB has been attempted between u and v then the edge is removed.
we then find the shortest path between minstart and minend. We return
the pair in this path which has edge weight with the smallest non zero value
update: find the shortest path weighted by distance squared. This penalizes finding
the NEB between minima that are very far away. (Does this too much favor long paths?)
"""
logger.info("finding a good pair to try to connect")
# get the shortest path on dist_graph between minstart and minend
if True:
logger.debug("Gdist has %s %s %s %s", self.dist_graph.Gdist.number_of_nodes(),
"nodes and", self.dist_graph.Gdist.number_of_edges(), "edges")
path, weights = self.dist_graph.shortestPath(self.minstart, self.minend)
weightsum = sum(weights)
if path is None or weightsum >= 10e9:
logger.warning("Can't find any way to try to connect the minima")
return None, None
# get the weights of the path segements
weightlist = []
for i in range(1, len(path)):
min1 = path[i - 1]
min2 = path[i]
# w = weights.get((min1,min2))
# if w is None:
# w = weights.get((min2,min1))
w = weights[i - 1]
weightlist.append((w, min1, min2))
if True:
# print the path
logger.info("best guess for path. (dist=0.0 means the path is known)")
for w, min1, min2 in weightlist:
if w > 1e-6:
dist = self.getDist(min1, min2)
else:
dist = w
logger.info(" path guess %s %s %s", min1.id(), min2.id(), dist)
# select which minima pair to return
if self.longest_first:
weightlist.sort()
w, min1, min2 = weightlist[-1]
else:
weightlist.sort()
for w, min1, min2 in weightlist:
if w > 1e-6:
break
return min1, min2
def connect(self):
"""
the main loop of the algorithm
"""
self.NEBattempts = 2
for i in range(self.niter):
# stop if we're done
if self.graph.areConnected(self.minstart, self.minend):
logger.info("found connection!")
return
logger.info("")
logger.info("======== starting connect cycle %s %s", i, "========")
# get pair of minima to try to connect
min1, min2 = self._getNextPair()
# fail if we can't find a good pair to try
if min1 is None or min2 is None:
break
# try to connect those minima
from pele.optimize.optimization_exceptions import LineSearchError
try:
self._localConnect(min1, min2)
except LineSearchError as err:
print err
print "caught line search error, aborting connection attempt"
break
if False and i % 10 == 0:
# do some sanity checks
self.dist_graph.checkGraph()
logger.info("failed to find connection between %s %s", self.minstart.id(), self.minend.id())
def success(self):
return self.graph.areConnected(self.minstart, self.minend)
def returnPath(self):
"""return information about the path
Returns
-------
mints : list of Minimum and TransitionStates
a list of Minimum, TransitionState, Minimum objects that make up
the path
S : list of float
numpy array of the distance along the path. len(S) == len(mints)
energies : list of float
numpy array of the energies along the path
If the minima are not connected, return (None, None, None)
"""
if not self.graph.areConnected(self.minstart, self.minend):
return None, None, None
minima = nx.shortest_path(self.graph.graph, self.minstart, self.minend)
transition_states = []
mints = [minima[0]]
for i in range(1, len(minima)):
m1 = minima[i - 1]
m2 = minima[i]
ts = self.database.getTransitionState(m1, m2)
transition_states.append(ts)
mints.append(ts)
mints.append(m2)
S = np.zeros(len(mints))
for i in range(1, len(mints)):
coords1 = mints[i - 1].coords
coords2 = mints[i].coords
dist, c1, c2 = self.mindist(coords1, coords2)
S[i] = S[i - 1] + dist
energies = np.array([m.energy for m in mints])
return mints, S, energies
# ##########################################################
# only testing stuff below here
# ##########################################################
def getSetOfMinLJ(system): # pragma: no cover
db = system.create_database()
bh = system.get_basinhopping(db, outstream=None)
bh.run(100)
return system.get_potential(), db
def test(Connect=DoubleEndedConnect, natoms=16): # pragma: no cover
from pele.systems import LJCluster
# get min1
system = LJCluster(natoms)
pot, database = getSetOfMinLJ(system)
minima = database.minima()
min1 = minima[0]
min2 = minima[1]
print min1.energy, min2.energy
mindist = system.get_mindist()
connect = Connect(min1, min2, pot, mindist, database)
connect.connect()
graph = connect.graph
if False:
print graph
for node in graph.graph.nodes():
print node.id(), node.energy
for ts in graph.storage.transition_states():
print ts.minimum1.id(), ts.minimum2.id(), "E", ts.minimum1.energy, ts.minimum2.energy, ts.energy
ret = graph.getPath(min1, min2)
if ret is None:
print "no path found"
return
distances, path = ret
with open("path.out", "w") as fout:
for i in range(len(path) - 1):
m1 = path[i]
m2 = path[i + 1]
n1 = m1.id()
m2 = m2.id()
# ts = graph._getTS(n1, n2)
# print "path", n1, "->", n2, m1.E, "/->", ts.E, "\->", m2.E
fout.write("%f\n" % m1.energy)
fout.write("%f\n" % ts.energy)
m2 = path[-1]
n2 = m2.id()
fout.write("%f\n" % m2.energy)
if __name__ == "__main__":
# logger.basicConfig(level=logger.DEBUG)
test(natoms=38)
| gpl-3.0 | -8,087,875,305,333,027,000 | 37.590525 | 122 | 0.577717 | false |
fgimian/flaskage | flaskage/cli.py | 1 | 12912 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import sys
import logging
import click
import flaskage
from flaskage.scaffold import Scaffold
from flaskage.utils import camelcase, AliasedGroup, MODULE_NAME
from flaskage.helpers import (
valid_project_directory, ColoredFormatter, PROJECT_NAME, MODEL_COLUMN,
COLUMN_TYPE_MAPPING, COLUMN_FACTORY_MAPPING, COLUMN_MODIFIER_MAPPING,
COLUMN_MODIFIER_PRIMARY_KEY
)
# Determine the location of our templates
TEMPLATE_DIR = os.path.abspath(
os.path.join(
os.path.dirname(flaskage.__file__), 'templates'
)
)
# Setup our ignored directories and files
IGNORED_DIRS = ['__pycache__']
IGNORED_FILES = ['*.pyc']
def configure_logging(use_color=True):
"""Adjust log output formatting."""
formatter = ColoredFormatter(
'<c>%(description)12s<r> : %(destination)s', use_color=use_color
)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.INFO)
ch.setFormatter(formatter)
logger = logging.getLogger('flaskage.scaffold')
logger.setLevel(logging.INFO)
logger.addHandler(ch)
def mode_option(f):
o1 = click.option('-f', '--force', 'mode',
flag_value=Scaffold.EXISTING_OVERWRITE,
help='Force overwriting of existing files')
o2 = click.option('-p', '--prompt', 'mode', default=True,
flag_value=Scaffold.EXISTING_PROMPT,
help='Prompt to overwrite existing files (default)')
o3 = click.option('-s', '--skip', 'mode',
flag_value=Scaffold.EXISTING_SKIP,
help='Skip existing files')
return o1(o2(o3(f)))
@click.command(add_help_option=False, cls=AliasedGroup)
@click.help_option('-h', '--help')
@click.option('--color/--no-color', default=True, help='Use colors in output')
def cli(color):
"""
The Flaskage command provides the ability to generate components of a
Flaskage web application.
"""
# Setup log formatting and display
configure_logging(use_color=color)
@cli.command(add_help_option=False)
@click.help_option('-h', '--help')
@mode_option
@click.argument('project_name', type=PROJECT_NAME)
@click.pass_context
def new(ctx, project_name, mode):
"""Create a new Flaskage project."""
# Unpack the project directory and name
name, directory = project_name
# Convert the name to CamelCase for use with class names
name_camelcase = camelcase(name)
# Generation of a new project can only run outside a valid project
# directory
if valid_project_directory(os.path.dirname(directory)):
ctx.fail('You cannot create a new project inside a project directory')
click.echo()
click.echo('Generating new project %s:' % name)
click.echo()
scaffold = Scaffold(
source_root=os.path.join(TEMPLATE_DIR, 'project'),
target_root=directory,
variables={'name': name, 'name_camelcase': name_camelcase},
ignored_dirs=IGNORED_DIRS, ignored_files=IGNORED_FILES,
overwrite_target_root=True, existing_policy=mode
)
scaffold.render_structure()
click.echo()
click.echo('Getting started with your project:')
click.echo()
click.echo(' 1. Change into the new project directory')
click.echo(' cd %s' % directory)
click.echo()
click.echo(' 2. Install all client-side components using Bower')
click.echo(' bower install')
click.echo()
click.echo(' 3. Install all server-side dependencies using pip')
click.echo(' pip install -r requirements/development.txt')
click.echo()
click.echo(' 4. Start up the development web server')
click.echo(' ./manage.py server')
click.echo()
@cli.command(add_help_option=False, cls=AliasedGroup)
@click.help_option('-h', '--help')
def generate():
"""Generate code for an application component."""
pass
@generate.command(add_help_option=False)
@click.help_option('-h', '--help')
@mode_option
@click.argument('name', type=MODULE_NAME)
@click.pass_context
def asset(ctx, name, mode):
"""Generate a set of assets."""
# Convert the name to CamelCase for use with class names
name_camelcase = camelcase(name)
# Generation of items can only run in a valid project directory
if not valid_project_directory():
ctx.fail(
'You can only run the generate command from a valid project '
'directory'
)
click.echo()
click.echo('Generating new asset named %s:' % name)
click.echo()
scaffold = Scaffold(
source_root=os.path.join(TEMPLATE_DIR, 'asset'),
target_root=os.getcwd(),
variables={'name': name, 'name_camelcase': name_camelcase},
ignored_dirs=IGNORED_DIRS, ignored_files=IGNORED_FILES,
overwrite_target_root=True, existing_policy=mode
)
scaffold.render_structure()
click.echo()
@generate.command(add_help_option=False)
@click.help_option('-h', '--help')
@mode_option
@click.argument('name', type=MODULE_NAME)
@click.pass_context
def blueprint(ctx, name, mode):
"""Generate an application component (blueprint)."""
# Convert the name to CamelCase for use with class names
name_camelcase = camelcase(name)
# Generation of items can only run in a valid project directory
if not valid_project_directory():
ctx.fail(
'You can only run the generate command from a valid project '
'directory'
)
click.echo()
click.echo('Generating new blueprint named %s:' % name)
click.echo()
scaffold = Scaffold(
source_root=[
os.path.join(TEMPLATE_DIR, 'asset'),
os.path.join(TEMPLATE_DIR, 'blueprint'),
],
target_root=os.getcwd(),
variables={'name': name, 'name_camelcase': name_camelcase},
ignored_dirs=IGNORED_DIRS, ignored_files=IGNORED_FILES,
overwrite_target_root=True, existing_policy=mode
)
scaffold.render_structure()
click.echo()
click.echo('Steps required to activate the new blueprint:')
click.echo()
click.echo(' Add the blueprint import to app/__init__.py in the '
'configure_blueprints function')
click.echo()
click.echo(' from .views import %s_view' % name)
click.echo(' app.register_blueprint(%s_view.mod)' % name)
click.echo()
@generate.command(add_help_option=False)
@click.help_option('-h', '--help')
@mode_option
@click.argument('name', type=MODULE_NAME)
@click.pass_context
def helper(ctx, name, mode):
"""Generate an application-related helper."""
# Convert the name to CamelCase for use with class names
name_camelcase = camelcase(name)
# Generation of items can only run in a valid project directory
if not valid_project_directory():
ctx.fail(
'You can only run the generate command from a valid project '
'directory'
)
click.echo()
click.echo('Generating new helper named %s:' % name)
click.echo()
scaffold = Scaffold(
source_root=os.path.join(TEMPLATE_DIR, 'helper'),
target_root=os.getcwd(),
variables={'name': name, 'name_camelcase': name_camelcase},
ignored_dirs=IGNORED_DIRS, ignored_files=IGNORED_FILES,
overwrite_target_root=True, existing_policy=mode
)
scaffold.render_structure()
click.echo()
@generate.command(
add_help_option=False, short_help='Generate a database model'
)
@click.help_option('-h', '--help')
@mode_option
@click.argument('name', type=MODULE_NAME)
@click.argument('columns', nargs=-1, type=MODEL_COLUMN)
@click.pass_context
def model(ctx, name, columns, mode):
"""
Generate a database model using a given name. You may also specify the
columns you need following the model name using the format:
<name>[:<type>[,<length>][:<modifier>,<modifier>...]]
e.g.
flaskage g model user email:string:primary name:string,80:index:required
The following types are listed below along with their corresponding
SQLAlchemy mapping:
\b
Numeric Types:
- integer (or int): Integer
- decimal: Numeric
- float: Float
\b
Text Types:
- string (or str): String
- text: Text
\b
Date & Time Types:
- date: Date
- time: Time
- datetime: DateTime
\b
Other Types:
- binary (or bin): LargeBinary
- boolean (or bool): Boolean
The string, text and binary types also accept an optional length.
\b
The column modifiers available are:
- index
- primary
- required
- unique
If no primary key is specified, a primary key integer column named id
will be created for you.
"""
# Convert the name to CamelCase for use with class names
name_camelcase = camelcase(name)
# Generation of items can only run in a valid project directory
if not valid_project_directory():
ctx.fail(
'You can only run the generate command from a valid project '
'directory'
)
# Generate the Python code required for each column (this is too
# tedious to do in templates)
primary_key_provided = False
column_model_definitions = []
column_factory_definitions = []
for column_name, type, length, modifiers in columns:
# Generate the type and its size (if applicable)
model_definition = 'db.%s' % COLUMN_TYPE_MAPPING[type]
if length:
model_definition += '(%i)' % length
# Generate modifiers (primary key, index .etc)
for modifier in modifiers:
model_definition += ', %s' % COLUMN_MODIFIER_MAPPING[modifier]
if modifier == COLUMN_MODIFIER_PRIMARY_KEY:
primary_key_provided = True
# Add the model column definition to our list
column_model_definitions.append((column_name, model_definition))
# Generate the model factory fakers
factory_definition = None
if type in COLUMN_FACTORY_MAPPING:
if column_name in COLUMN_FACTORY_MAPPING[type]:
factory_definition = COLUMN_FACTORY_MAPPING[type][column_name]
elif '*' in COLUMN_FACTORY_MAPPING[type]:
factory_definition = COLUMN_FACTORY_MAPPING[type]['*']
# Add the factory column definition to our list
if factory_definition:
column_factory_definitions.append(
(column_name, factory_definition)
)
click.echo()
click.echo('Generating new model named %s:' % name)
click.echo()
scaffold = Scaffold(
source_root=os.path.join(TEMPLATE_DIR, 'model'),
target_root=os.getcwd(),
variables={
'name': name, 'name_camelcase': name_camelcase,
'column_model_definitions': column_model_definitions,
'primary_key_provided': primary_key_provided,
'column_factory_definitions': column_factory_definitions
},
ignored_dirs=IGNORED_DIRS, ignored_files=IGNORED_FILES,
overwrite_target_root=True, existing_policy=mode
)
scaffold.render_structure()
click.echo()
click.echo('Steps required to activate the new model:')
click.echo()
click.echo(' 1. Add the model import to app/models/__init__.py')
click.echo(' from .%s import %s # noqa' % (name, name_camelcase))
click.echo()
click.echo(' 2. Add the factory import to test/factories/__init__.py')
click.echo(' from .%s_factory import %sFactory # noqa' %
(name, name_camelcase))
click.echo()
click.echo(' 3. Generate a migration to add the new model to your '
'database')
click.echo(' ./manage.py db migrate')
click.echo()
click.echo(' 4. Apply the migration')
click.echo(' ./manage.py db upgrade')
click.echo()
@generate.command(add_help_option=False)
@click.help_option('-h', '--help')
@mode_option
@click.argument('name', type=MODULE_NAME)
@click.pass_context
def library(ctx, name, mode):
"""Generate an application-agnostic library."""
# Convert the name to CamelCase for use with class names
name_camelcase = camelcase(name)
# Generation of items can only run in a valid project directory
if not valid_project_directory():
ctx.fail(
'You can only run the generate command from a valid project '
'directory'
)
click.echo()
click.echo('Generating new library named %s:' % name)
click.echo()
scaffold = Scaffold(
source_root=os.path.join(TEMPLATE_DIR, 'lib'),
target_root=os.getcwd(),
variables={'name': name, 'name_camelcase': name_camelcase},
ignored_dirs=IGNORED_DIRS, ignored_files=IGNORED_FILES,
overwrite_target_root=True, existing_policy=mode
)
scaffold.render_structure()
click.echo()
if __name__ == '__main__':
cli()
| mit | -9,081,164,089,704,797,000 | 31.688608 | 78 | 0.644129 | false |
sametmax/Django--an-app-at-a-time | ignore_this_directory/django/utils/datastructures.py | 4 | 10194 | import copy
from collections import OrderedDict
from collections.abc import Mapping
class OrderedSet:
"""
A set which keeps the ordering of the inserted items.
Currently backs onto OrderedDict.
"""
def __init__(self, iterable=None):
self.dict = OrderedDict.fromkeys(iterable or ())
def add(self, item):
self.dict[item] = None
def remove(self, item):
del self.dict[item]
def discard(self, item):
try:
self.remove(item)
except KeyError:
pass
def __iter__(self):
return iter(self.dict)
def __contains__(self, item):
return item in self.dict
def __bool__(self):
return bool(self.dict)
def __len__(self):
return len(self.dict)
class MultiValueDictKeyError(KeyError):
pass
class MultiValueDict(dict):
"""
A subclass of dictionary customized to handle multiple values for the
same key.
>>> d = MultiValueDict({'name': ['Adrian', 'Simon'], 'position': ['Developer']})
>>> d['name']
'Simon'
>>> d.getlist('name')
['Adrian', 'Simon']
>>> d.getlist('doesnotexist')
[]
>>> d.getlist('doesnotexist', ['Adrian', 'Simon'])
['Adrian', 'Simon']
>>> d.get('lastname', 'nonexistent')
'nonexistent'
>>> d.setlist('lastname', ['Holovaty', 'Willison'])
This class exists to solve the irritating problem raised by cgi.parse_qs,
which returns a list for every key, even though most Web forms submit
single name-value pairs.
"""
def __init__(self, key_to_list_mapping=()):
super().__init__(key_to_list_mapping)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, super().__repr__())
def __getitem__(self, key):
"""
Return the last data value for this key, or [] if it's an empty list;
raise KeyError if not found.
"""
try:
list_ = super().__getitem__(key)
except KeyError:
raise MultiValueDictKeyError(key)
try:
return list_[-1]
except IndexError:
return []
def __setitem__(self, key, value):
super().__setitem__(key, [value])
def __copy__(self):
return self.__class__([
(k, v[:])
for k, v in self.lists()
])
def __deepcopy__(self, memo):
result = self.__class__()
memo[id(self)] = result
for key, value in dict.items(self):
dict.__setitem__(result, copy.deepcopy(key, memo),
copy.deepcopy(value, memo))
return result
def __getstate__(self):
return {**self.__dict__, '_data': {k: self._getlist(k) for k in self}}
def __setstate__(self, obj_dict):
data = obj_dict.pop('_data', {})
for k, v in data.items():
self.setlist(k, v)
self.__dict__.update(obj_dict)
def get(self, key, default=None):
"""
Return the last data value for the passed key. If key doesn't exist
or value is an empty list, return `default`.
"""
try:
val = self[key]
except KeyError:
return default
if val == []:
return default
return val
def _getlist(self, key, default=None, force_list=False):
"""
Return a list of values for the key.
Used internally to manipulate values list. If force_list is True,
return a new copy of values.
"""
try:
values = super().__getitem__(key)
except KeyError:
if default is None:
return []
return default
else:
if force_list:
values = list(values) if values is not None else None
return values
def getlist(self, key, default=None):
"""
Return the list of values for the key. If key doesn't exist, return a
default value.
"""
return self._getlist(key, default, force_list=True)
def setlist(self, key, list_):
super().__setitem__(key, list_)
def setdefault(self, key, default=None):
if key not in self:
self[key] = default
# Do not return default here because __setitem__() may store
# another value -- QueryDict.__setitem__() does. Look it up.
return self[key]
def setlistdefault(self, key, default_list=None):
if key not in self:
if default_list is None:
default_list = []
self.setlist(key, default_list)
# Do not return default_list here because setlist() may store
# another value -- QueryDict.setlist() does. Look it up.
return self._getlist(key)
def appendlist(self, key, value):
"""Append an item to the internal list associated with key."""
self.setlistdefault(key).append(value)
def items(self):
"""
Yield (key, value) pairs, where value is the last item in the list
associated with the key.
"""
for key in self:
yield key, self[key]
def lists(self):
"""Yield (key, list) pairs."""
return iter(super().items())
def values(self):
"""Yield the last value on every key list."""
for key in self:
yield self[key]
def copy(self):
"""Return a shallow copy of this object."""
return copy.copy(self)
def update(self, *args, **kwargs):
"""Extend rather than replace existing key lists."""
if len(args) > 1:
raise TypeError("update expected at most 1 argument, got %d" % len(args))
if args:
other_dict = args[0]
if isinstance(other_dict, MultiValueDict):
for key, value_list in other_dict.lists():
self.setlistdefault(key).extend(value_list)
else:
try:
for key, value in other_dict.items():
self.setlistdefault(key).append(value)
except TypeError:
raise ValueError("MultiValueDict.update() takes either a MultiValueDict or dictionary")
for key, value in kwargs.items():
self.setlistdefault(key).append(value)
def dict(self):
"""Return current object as a dict with singular values."""
return {key: self[key] for key in self}
class ImmutableList(tuple):
"""
A tuple-like object that raises useful errors when it is asked to mutate.
Example::
>>> a = ImmutableList(range(5), warning="You cannot mutate this.")
>>> a[3] = '4'
Traceback (most recent call last):
...
AttributeError: You cannot mutate this.
"""
def __new__(cls, *args, warning='ImmutableList object is immutable.', **kwargs):
self = tuple.__new__(cls, *args, **kwargs)
self.warning = warning
return self
def complain(self, *wargs, **kwargs):
if isinstance(self.warning, Exception):
raise self.warning
else:
raise AttributeError(self.warning)
# All list mutation functions complain.
__delitem__ = complain
__delslice__ = complain
__iadd__ = complain
__imul__ = complain
__setitem__ = complain
__setslice__ = complain
append = complain
extend = complain
insert = complain
pop = complain
remove = complain
sort = complain
reverse = complain
class DictWrapper(dict):
"""
Wrap accesses to a dictionary so that certain values (those starting with
the specified prefix) are passed through a function before being returned.
The prefix is removed before looking up the real value.
Used by the SQL construction code to ensure that values are correctly
quoted before being used.
"""
def __init__(self, data, func, prefix):
super().__init__(data)
self.func = func
self.prefix = prefix
def __getitem__(self, key):
"""
Retrieve the real value after stripping the prefix string (if
present). If the prefix is present, pass the value through self.func
before returning, otherwise return the raw value.
"""
use_func = key.startswith(self.prefix)
if use_func:
key = key[len(self.prefix):]
value = super().__getitem__(key)
if use_func:
return self.func(value)
return value
def _destruct_iterable_mapping_values(data):
for i, elem in enumerate(data):
if len(elem) != 2:
raise ValueError(
'dictionary update sequence element #{} has '
'length {}; 2 is required.'.format(i, len(elem))
)
if not isinstance(elem[0], str):
raise ValueError('Element key %r invalid, only strings are allowed' % elem[0])
yield tuple(elem)
class CaseInsensitiveMapping(Mapping):
"""
Mapping allowing case-insensitive key lookups. Original case of keys is
preserved for iteration and string representation.
Example::
>>> ci_map = CaseInsensitiveMapping({'name': 'Jane'})
>>> ci_map['Name']
Jane
>>> ci_map['NAME']
Jane
>>> ci_map['name']
Jane
>>> ci_map # original case preserved
{'name': 'Jane'}
"""
def __init__(self, data):
if not isinstance(data, Mapping):
data = {k: v for k, v in _destruct_iterable_mapping_values(data)}
self._store = {k.lower(): (k, v) for k, v in data.items()}
def __getitem__(self, key):
return self._store[key.lower()][1]
def __len__(self):
return len(self._store)
def __eq__(self, other):
return isinstance(other, Mapping) and {
k.lower(): v for k, v in self.items()
} == {
k.lower(): v for k, v in other.items()
}
def __iter__(self):
return (original_key for original_key, value in self._store.values())
def __repr__(self):
return repr({key: value for key, value in self._store.values()})
def copy(self):
return self
| mit | 5,136,058,506,778,628,000 | 28.894428 | 107 | 0.558171 | false |
nicko96/Chrome-Infra | appengine/chromium_rietveld/tests/test_dependency_utils.py | 1 | 7176 | #!/usr/bin/env python
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for codereview/dependency_utils.py."""
import unittest
import setup
setup.process_args()
from codereview import models
from codereview import dependency_utils
class MockKey(object):
def __init__(self, key_id):
self.key_id = key_id
def id(self):
return self.key_id
class MockIssue(object):
def __init__(self, key_id):
self.key_id = key_id
@property
def key(self):
return MockKey(self.key_id)
class MockPatchSet(object):
def __init__(self, key_id, issue_key_id, dependent_patchsets,
depends_on_patchset):
self.key_id = key_id
self.issue_key_id = issue_key_id
self.dependent_patchsets = dependent_patchsets
self.depends_on_patchset = depends_on_patchset
self.put_called = False
def put(self):
self.put_called = True
@property
def key(self):
return MockKey(self.key_id)
@property
def issue_key(self):
return MockKey(self.issue_key_id)
class TestPatchSetDependencyUtils(unittest.TestCase):
"""Test the dependency_utils module."""
def setUp(self):
# Allow models.Issue.get_by_id to be monkeypatched by the tests.
self.original_issue_get_by_id = models.Issue.get_by_id
# Allow models.PatchSet.get_by_id to be monkeypatched by the tests.
self.original_patchset_get_by_id = models.PatchSet.get_by_id
def tearDown(self):
# Undo any monkeypatching done by the tests.
models.Issue.get_by_id = self.original_issue_get_by_id
models.PatchSet.get_by_id = self.original_patchset_get_by_id
def test_remove_as_dependent(self):
# Create the patchset we will be removing as a dependent.
patchset = MockPatchSet('40', '4', [], '3:30')
# Make get_by_id methods return what we expect.
def mock_issue_get_by_id():
def _w(*args, **_kwargs):
return MockIssue(args[1])
return classmethod(_w)
models.Issue.get_by_id = mock_issue_get_by_id()
mockpatchset = MockPatchSet('30', '3', ['4:40', '1:10'], '')
def mock_patchset_get_by_id():
def _w(*_args, **_kwargs):
return mockpatchset
return classmethod(_w)
models.PatchSet.get_by_id = mock_patchset_get_by_id()
# Assert that dependent_patchsets of the MockpatchSet is as expected and
# that put was called on it.
dependency_utils.remove_as_dependent(patchset)
self.assertEquals(['1:10'], mockpatchset.dependent_patchsets)
self.assertTrue(mockpatchset.put_called)
def test_remove_dependencies(self):
# Create the patchset we will be removing dependencies of.
dependent_patchsets = ['1:10', '2:20', '3:30']
patchset = MockPatchSet('40', '4', dependent_patchsets, '')
# Make get_by_id methods return what we expect.
def mock_issue_get_by_id():
def _w(*args, **_kwargs):
return MockIssue(args[1])
return classmethod(_w)
models.Issue.get_by_id = mock_issue_get_by_id()
mockpatchsets = []
def mock_patchset_get_by_id():
def _w(*args, **kwargs):
mockpatchset = MockPatchSet(args[1], kwargs['parent'].id(), [], '4:40')
mockpatchsets.append(mockpatchset)
return mockpatchset
return classmethod(_w)
models.PatchSet.get_by_id = mock_patchset_get_by_id()
# Assert that depends_on_patchset of the MockpatchSets are empty and that
# put was called on them.
dependency_utils.remove_dependencies(patchset)
for mockpatchset in mockpatchsets:
self.assertEquals('', mockpatchset.depends_on_patchset)
self.assertTrue(mockpatchset.put_called)
# Now change the depends_on_str for the dependents. Their dependency should
# not be changed and put should not be called on them.
mockpatchsets = []
def mock_patchset_get_by_id():
def _w(*args, **kwargs):
mockpatchset = MockPatchSet(args[1], kwargs['parent'].id(), [], '4:41')
mockpatchsets.append(mockpatchset)
return mockpatchset
return classmethod(_w)
models.PatchSet.get_by_id = mock_patchset_get_by_id()
dependency_utils.remove_dependencies(patchset)
for mockpatchset in mockpatchsets:
self.assertEquals('4:41', mockpatchset.depends_on_patchset)
self.assertFalse(mockpatchset.put_called)
def test_mark_as_dependent_and_get_dependency_str(self):
# Make get_by_id methods return what we expect.
def mock_issue_get_by_id():
def _w(*args, **_kwargs):
return MockIssue(args[1])
return classmethod(_w)
models.Issue.get_by_id = mock_issue_get_by_id()
mockpatchset = MockPatchSet('40', '4', ['1:10', '2:20'], '')
def mock_patchset_get_by_id():
def _w(*_args, **_kwargs):
return mockpatchset
return classmethod(_w)
models.PatchSet.get_by_id = mock_patchset_get_by_id()
dependency_str = (
dependency_utils.mark_as_dependent_and_get_dependency_str(
'4:40', '3', '30'))
# Since the depends on Issue and PatchSet were found the dependency str
# should be returned.
self.assertEquals('4:40', dependency_str)
# Assert that the dependent_patchsets was updated and that put was called.
self.assertEquals(['1:10', '2:20', '3:30'],
mockpatchset.dependent_patchsets)
self.assertTrue(mockpatchset.put_called)
# Make the referenced Issue be invalid and assert that a dependency str is
# not returned and dependent_patchsets is not updated and that put is not
# called.
def mock_issue_get_by_id():
def _w(*_args, **_kwargs):
return None
return classmethod(_w)
models.Issue.get_by_id = mock_issue_get_by_id()
mockpatchset = MockPatchSet('40', '4', ['1:10', '2:20'], '')
dependency_str = (
dependency_utils.mark_as_dependent_and_get_dependency_str(
'4:40', '3', '30'))
self.assertEquals(None, dependency_str)
self.assertEquals(['1:10', '2:20'], mockpatchset.dependent_patchsets)
self.assertFalse(mockpatchset.put_called)
# Make the referenced Patchset be invalid and assert that a dependency str
# is not returned.
def mock_issue_get_by_id():
def _w(*args, **_kwargs):
return MockIssue(args[1])
return classmethod(_w)
models.Issue.get_by_id = mock_issue_get_by_id()
def mock_patchset_get_by_id():
def _w(*_args, **_kwargs):
return None
return classmethod(_w)
models.PatchSet.get_by_id = mock_patchset_get_by_id()
dependency_str = (
dependency_utils.mark_as_dependent_and_get_dependency_str(
'4:40', '3', '30'))
self.assertEquals(None, dependency_str)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 6,662,359,119,886,542,000 | 33.666667 | 79 | 0.665412 | false |
seriyps/ru_number_to_text | tests.py | 1 | 6388 | # -*- coding: utf-8 -*-
'''
Created on 13.03.2016 by Artem Tiumentcev
@author: Sergey Prokhorov <[email protected]>
'''
import unittest
from num2t4ru import num2text, decimal2text
class TestStrToText(unittest.TestCase):
def test_units(self):
self.assertEqual(num2text(0), u'ноль')
self.assertEqual(num2text(1), u'один')
self.assertEqual(num2text(9), u'девять')
def test_gender(self):
self.assertEqual(num2text(1000), u'одна тысяча')
self.assertEqual(num2text(2000), u'две тысячи')
self.assertEqual(num2text(1000000), u'один миллион')
self.assertEqual(num2text(2000000), u'два миллиона')
def test_teens(self):
self.assertEqual(num2text(10), u'десять')
self.assertEqual(num2text(11), u'одиннадцать')
self.assertEqual(num2text(19), u'девятнадцать')
def test_tens(self):
self.assertEqual(num2text(20), u'двадцать')
self.assertEqual(num2text(90), u'девяносто')
def test_hundreeds(self):
self.assertEqual(num2text(100), u'сто')
self.assertEqual(num2text(900), u'девятьсот')
def test_orders(self):
self.assertEqual(num2text(1000), u'одна тысяча')
self.assertEqual(num2text(2000), u'две тысячи')
self.assertEqual(num2text(5000), u'пять тысяч')
self.assertEqual(num2text(1000000), u'один миллион')
self.assertEqual(num2text(2000000), u'два миллиона')
self.assertEqual(num2text(5000000), u'пять миллионов')
self.assertEqual(num2text(1000000000), u'один миллиард')
self.assertEqual(num2text(2000000000), u'два миллиарда')
self.assertEqual(num2text(5000000000), u'пять миллиардов')
def test_inter_oreders(self):
self.assertEqual(num2text(1100), u'одна тысяча сто')
self.assertEqual(num2text(2001), u'две тысячи один')
self.assertEqual(num2text(5011), u'пять тысяч одиннадцать')
self.assertEqual(num2text(1002000), u'один миллион две тысячи')
self.assertEqual(num2text(2020000), u'два миллиона двадцать тысяч')
self.assertEqual(num2text(5300600), u'пять миллионов триста тысяч шестьсот')
self.assertEqual(num2text(1002000000), u'один миллиард два миллиона')
self.assertEqual(num2text(2030000000), u'два миллиарда тридцать миллионов')
self.assertEqual(num2text(1234567891),
u'один миллиард двести тридцать четыре миллиона '
u'пятьсот шестьдесят семь тысяч '
u'восемьсот девяносто один')
def test_main_units(self):
male_units = ((u'рубль', u'рубля', u'рублей'), 'm')
female_units = ((u'копейка', u'копейки', u'копеек'), 'f')
self.assertEqual(num2text(101, male_units), u'сто один рубль')
self.assertEqual(num2text(102, male_units), u'сто два рубля')
self.assertEqual(num2text(105, male_units), u'сто пять рублей')
self.assertEqual(num2text(101, female_units), u'сто одна копейка')
self.assertEqual(num2text(102, female_units), u'сто две копейки')
self.assertEqual(num2text(105, female_units), u'сто пять копеек')
self.assertEqual(num2text(0, male_units), u'ноль рублей')
self.assertEqual(num2text(0, female_units), u'ноль копеек')
self.assertEqual(num2text(3000, male_units), u'три тысячи рублей')
def test_decimal2text(self):
int_units = ((u'рубль', u'рубля', u'рублей'), 'm')
exp_units = ((u'копейка', u'копейки', u'копеек'), 'f')
self.assertEqual(
decimal2text(
'105.245',
int_units=int_units,
exp_units=exp_units),
u'сто пять рублей двадцать четыре копейки')
self.assertEqual(
decimal2text(
'101.26',
int_units=int_units,
exp_units=exp_units),
u'сто один рубль двадцать шесть копеек')
self.assertEqual(
decimal2text(
'102.2450',
places=4,
int_units=int_units,
exp_units=exp_units),
u'сто два рубля две тысячи четыреста пятьдесят копеек') # xD
self.assertEqual(
decimal2text(
'111',
int_units=int_units,
exp_units=exp_units),
u'сто одиннадцать рублей ноль копеек')
self.assertEqual(
decimal2text(
'3000.00',
int_units=int_units,
exp_units=exp_units),
u'три тысячи рублей ноль копеек')
def test_negative(self):
self.assertEqual(num2text(-12345),
u"минус двенадцать тысяч триста сорок пять")
self.assertEqual(
decimal2text('-123.45'),
u'минус сто двадцать три сорок пять')
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
try:
num = sys.argv[1]
if '.' in num:
print(decimal2text(
num,
int_units=((u'штука', u'штуки', u'штук'), 'f'),
exp_units=((u'кусок', u'куска', u'кусков'), 'm')))
else:
print(num2text(
int(num),
main_units=((u'штука', u'штуки', u'штук'), 'f')))
except ValueError:
print (sys.stderr, "Invalid argument {}".format(sys.argv[1]))
sys.exit()
unittest.main()
| apache-2.0 | 1,866,135,707,669,408,300 | 38.550725 | 84 | 0.587211 | false |
wheelcms/wheelcms_axle | wheelcms_axle/tests/test_content_crud.py | 1 | 7287 | from wheelcms_axle.models import Node
from wheelcms_axle.tests.models import Type1, Type1Type, Type2Type
from wheelcms_axle.forms import formfactory
from .fixtures import multilang_ENNL, root
from .utils import MockedQueryDict
import pytest
@pytest.mark.usefixtures("multilang_ENNL")
@pytest.mark.usefixtures("localtyperegistry")
class TestContentCreate(object):
types = (Type1Type, Type2Type)
def test_success(self, client, root):
""" simple case where create succeeds """
form = formfactory(Type1)(parent=root,
data=MockedQueryDict(title="hello",
slug="world",
language="en"))
assert form.is_valid()
assert form.cleaned_data['slug'] == "world"
tp1 = form.save()
assert tp1.title == "hello"
def test_title_missing(self, client, root):
""" title is missing """
form = formfactory(Type1)(parent=root,
data=MockedQueryDict(slug="world",
language="en"))
assert not form.is_valid()
assert 'title' in form.errors
def test_slug_invalid(self, client, root):
""" invalid characters in slug """
form = formfactory(Type1)(parent=root,
data=MockedQueryDict(title="hello",
slug="world$", language="en"))
assert not form.is_valid()
assert 'slug' in form.errors
def test_slug_used(self, client, root):
""" slug already exists in parent """
root.add('world')
form = formfactory(Type1)(parent=root,
data=MockedQueryDict(title="hello",
slug="world", language="en"))
assert not form.is_valid()
assert 'slug' in form.errors
def test_tags(self, client, root):
""" test tag suport on content """
form = formfactory(Type1)(parent=root,
data=MockedQueryDict(title="hello",
slug="world",
tags="hello, world",
language="en"))
assert form.is_valid()
assert form.cleaned_data['slug'] == "world"
tp1 = form.save()
assert tp1.title == "hello"
assert "hello" in tp1.tags.values_list("name", flat=True)
assert "world" in tp1.tags.values_list("name", flat=True)
def test_available_languages(self, client, root):
form = formfactory(Type1)(parent=root,node=root)
assert set((x[0] for x in form.fields['language'].choices)) == \
set(('en', 'nl', 'any'))
def test_allowed_subcontent_empty(self, client, root):
"""
If no subcontent is explicitly selected, allowed should
be saved as NULL which will be interpreted as "use class defaults"
"""
form = formfactory(Type1)(parent=root,
data=MockedQueryDict(title="hello",
slug="world",
tags="hello, world",
language="en",
allowed=[]))
assert form.is_valid()
tp1 = form.save()
assert tp1.allowed is None
def test_allowed_subcontent_selection(self, client, root):
"""
If an explicit selection is made, this selection should
be saved as comma separated string
"""
form = formfactory(Type1)(parent=root,
data=MockedQueryDict(
title="hello",
slug="world",
tags="hello, world",
language="en",
allowed=["tests.type1", "tests.type2"]))
# import pytest;pytest.set_trace()
assert form.is_valid()
tp1 = form.save()
assert tp1.allowed == "tests.type1,tests.type2"
def test_allowed_subcontent_nosubcontent(self, client, root):
"""
If the "no_sucontent" checkbox is checked, no subcontent
is allowed, which is saved as an empty string (not NULL!)
Regardless of an "allowed" selection!
"""
form = formfactory(Type1)(parent=root,
data=MockedQueryDict(
title="hello",
slug="world",
tags="hello, world",
language="en",
allowed=["tests.type1", "tests.type2"],
no_subcontent=True))
assert form.is_valid()
tp1 = form.save()
assert tp1.allowed == ""
def test_allowed_subcontent_selection_existing(self, client, root):
"""
Verify the selection is correctly initialized from a
comma separated string
"""
t = Type1(node=root, title="test", language="en",
allowed="tests.type1,tests.type2").save()
form = formfactory(Type1)(parent=root, instance=t)
assert set(form['allowed'].value()) == \
set(('tests.type1', 'tests.type2'))
@pytest.mark.usefixtures("localtyperegistry")
@pytest.mark.usefixtures("multilang_ENNL")
class TestContentUpdate(object):
type = Type1Type
def test_available_languages(self, client, root):
t = self.type.create(node=root, title="EN trans", language="en").save()
form = formfactory(Type1)(parent=root,node=root)
assert 'en' not in set((x[0] for x in form.fields['language'].choices))
assert 'nl' in set((x[0] for x in form.fields['language'].choices))
def test_available_languages_any(self, client, root):
self.type.create(node=root, title="EN trans", language="en").save()
self.type.create(node=root, title="ANY trans", language="any").save()
form = formfactory(Type1)(parent=root,node=root)
assert 'en' not in set((x[0] for x in form.fields['language'].choices))
assert 'any' not in set((x[0] for x in form.fields['language'].choices))
assert 'nl' in set((x[0] for x in form.fields['language'].choices))
def test_available_languages_current(self, client, root):
""" language can, of course, be selected if it's the content being
editted """
en = self.type.create(node=root, title="EN trans", language="en").save()
any = self.type.create(node=root, title="ANY trans", language="any").save()
form = formfactory(Type1)(parent=root,node=root, instance=en.instance)
assert 'en' in set((x[0] for x in form.fields['language'].choices))
assert 'any' not in set((x[0] for x in form.fields['language'].choices))
assert 'nl' in set((x[0] for x in form.fields['language'].choices))
| bsd-2-clause | 7,097,146,730,783,951,000 | 42.634731 | 83 | 0.519693 | false |
chitr/neutron | neutron/tests/functional/agent/l3/test_dvr_router.py | 1 | 29844 | # Copyright (c) 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import mock
import netaddr
from neutron.agent.l3 import agent as neutron_l3_agent
from neutron.agent.l3 import dvr_snat_ns
from neutron.agent.l3 import namespaces
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.common import constants as l3_constants
from neutron.tests.common import l3_test_common
from neutron.tests.common import net_helpers
from neutron.tests.functional.agent.l3 import framework
DEVICE_OWNER_COMPUTE = l3_constants.DEVICE_OWNER_COMPUTE_PREFIX + 'fake'
class TestDvrRouter(framework.L3AgentTestFramework):
def test_dvr_router_lifecycle_without_ha_without_snat_with_fips(self):
self._dvr_router_lifecycle(enable_ha=False, enable_snat=False)
def test_dvr_router_lifecycle_without_ha_with_snat_with_fips(self):
self._dvr_router_lifecycle(enable_ha=False, enable_snat=True)
def test_dvr_router_lifecycle_ha_with_snat_with_fips(self):
self._dvr_router_lifecycle(enable_ha=True, enable_snat=True)
def _helper_create_dvr_router_fips_for_ext_network(
self, agent_mode, **dvr_router_kwargs):
self.agent.conf.agent_mode = agent_mode
router_info = self.generate_dvr_router_info(**dvr_router_kwargs)
self.mock_plugin_api.get_external_network_id.return_value = (
router_info['_floatingips'][0]['floating_network_id'])
router = self.manage_router(self.agent, router_info)
fip_ns = router.fip_ns.get_name()
return router, fip_ns
def _validate_fips_for_external_network(self, router, fip_ns):
self.assertTrue(self._namespace_exists(router.ns_name))
self.assertTrue(self._namespace_exists(fip_ns))
self._assert_dvr_floating_ips(router)
self._assert_snat_namespace_does_not_exist(router)
def test_dvr_router_fips_for_multiple_ext_networks(self):
agent_mode = 'dvr'
# Create the first router fip with external net1
dvr_router1_kwargs = {'ip_address': '19.4.4.3',
'subnet_cidr': '19.4.4.0/24',
'gateway_ip': '19.4.4.1',
'gateway_mac': 'ca:fe:de:ab:cd:ef'}
router1, fip1_ns = (
self._helper_create_dvr_router_fips_for_ext_network(
agent_mode, **dvr_router1_kwargs))
# Validate the fip with external net1
self._validate_fips_for_external_network(router1, fip1_ns)
# Create the second router fip with external net2
dvr_router2_kwargs = {'ip_address': '19.4.5.3',
'subnet_cidr': '19.4.5.0/24',
'gateway_ip': '19.4.5.1',
'gateway_mac': 'ca:fe:de:ab:cd:fe'}
router2, fip2_ns = (
self._helper_create_dvr_router_fips_for_ext_network(
agent_mode, **dvr_router2_kwargs))
# Validate the fip with external net2
self._validate_fips_for_external_network(router2, fip2_ns)
def _dvr_router_lifecycle(self, enable_ha=False, enable_snat=False,
custom_mtu=2000,
ip_version=4,
dual_stack=False):
'''Test dvr router lifecycle
:param enable_ha: sets the ha value for the router.
:param enable_snat: the value of enable_snat is used
to set the agent_mode.
'''
# The value of agent_mode can be dvr, dvr_snat, or legacy.
# Since by definition this is a dvr (distributed = true)
# only dvr and dvr_snat are applicable
self.agent.conf.agent_mode = 'dvr_snat' if enable_snat else 'dvr'
self.agent.conf.network_device_mtu = custom_mtu
# We get the router info particular to a dvr router
router_info = self.generate_dvr_router_info(
enable_ha, enable_snat)
# We need to mock the get_agent_gateway_port return value
# because the whole L3PluginApi is mocked and we need the port
# gateway_port information before the l3_agent will create it.
# The port returned needs to have the same information as
# router_info['gw_port']
self.mock_plugin_api.get_agent_gateway_port.return_value = router_info[
'gw_port']
# We also need to mock the get_external_network_id method to
# get the correct fip namespace.
self.mock_plugin_api.get_external_network_id.return_value = (
router_info['_floatingips'][0]['floating_network_id'])
# With all that set we can now ask the l3_agent to
# manage the router (create it, create namespaces,
# attach interfaces, etc...)
router = self.manage_router(self.agent, router_info)
if enable_ha:
port = router.get_ex_gw_port()
interface_name = router.get_external_device_name(port['id'])
self._assert_no_ip_addresses_on_interface(router.ha_namespace,
interface_name)
utils.wait_until_true(lambda: router.ha_state == 'master')
# Keepalived notifies of a state transition when it starts,
# not when it ends. Thus, we have to wait until keepalived finishes
# configuring everything. We verify this by waiting until the last
# device has an IP address.
device = router.router[l3_constants.INTERFACE_KEY][-1]
device_exists = functools.partial(
self.device_exists_with_ips_and_mac,
device,
router.get_internal_device_name,
router.ns_name)
utils.wait_until_true(device_exists)
ext_gateway_port = router_info['gw_port']
self.assertTrue(self._namespace_exists(router.ns_name))
utils.wait_until_true(
lambda: self._metadata_proxy_exists(self.agent.conf, router))
self._assert_internal_devices(router)
self._assert_dvr_external_device(router)
self._assert_dvr_gateway(router)
self._assert_dvr_floating_ips(router)
self._assert_snat_chains(router)
self._assert_floating_ip_chains(router)
self._assert_metadata_chains(router)
self._assert_extra_routes(router)
self._assert_rfp_fpr_mtu(router, custom_mtu)
if enable_snat:
ip_versions = [4, 6] if (ip_version == 6 or dual_stack) else [4]
snat_ns_name = dvr_snat_ns.SnatNamespace.get_snat_ns_name(
router.router_id)
self._assert_onlink_subnet_routes(
router, ip_versions, snat_ns_name)
self._delete_router(self.agent, router.router_id)
self._assert_fip_namespace_deleted(ext_gateway_port)
self._assert_router_does_not_exist(router)
self._assert_snat_namespace_does_not_exist(router)
def generate_dvr_router_info(self,
enable_ha=False,
enable_snat=False,
agent=None,
**kwargs):
if not agent:
agent = self.agent
router = l3_test_common.prepare_router_data(
enable_snat=enable_snat,
enable_floating_ip=True,
enable_ha=enable_ha,
**kwargs)
internal_ports = router.get(l3_constants.INTERFACE_KEY, [])
router['distributed'] = True
router['gw_port_host'] = agent.conf.host
router['gw_port']['binding:host_id'] = agent.conf.host
floating_ip = router['_floatingips'][0]
floating_ip['floating_network_id'] = router['gw_port']['network_id']
floating_ip['host'] = agent.conf.host
floating_ip['port_id'] = internal_ports[0]['id']
floating_ip['status'] = 'ACTIVE'
self._add_snat_port_info_to_router(router, internal_ports)
# FIP has a dependency on external gateway. So we need to create
# the snat_port info and fip_agent_gw_port_info irrespective of
# the agent type the dvr supports. The namespace creation is
# dependent on the agent_type.
external_gw_port = router['gw_port']
self._add_fip_agent_gw_port_info_to_router(router, external_gw_port)
return router
def _add_fip_agent_gw_port_info_to_router(self, router, external_gw_port):
# Add fip agent gateway port information to the router_info
fip_gw_port_list = router.get(
l3_constants.FLOATINGIP_AGENT_INTF_KEY, [])
if not fip_gw_port_list and external_gw_port:
# Get values from external gateway port
fixed_ip = external_gw_port['fixed_ips'][0]
float_subnet = external_gw_port['subnets'][0]
port_ip = fixed_ip['ip_address']
# Pick an ip address which is not the same as port_ip
fip_gw_port_ip = str(netaddr.IPAddress(port_ip) + 5)
# Add floatingip agent gateway port info to router
prefixlen = netaddr.IPNetwork(float_subnet['cidr']).prefixlen
router[l3_constants.FLOATINGIP_AGENT_INTF_KEY] = [
{'subnets': [
{'cidr': float_subnet['cidr'],
'gateway_ip': float_subnet['gateway_ip'],
'id': fixed_ip['subnet_id']}],
'network_id': external_gw_port['network_id'],
'device_owner': l3_constants.DEVICE_OWNER_AGENT_GW,
'mac_address': 'fa:16:3e:80:8d:89',
'binding:host_id': self.agent.conf.host,
'fixed_ips': [{'subnet_id': fixed_ip['subnet_id'],
'ip_address': fip_gw_port_ip,
'prefixlen': prefixlen}],
'id': framework._uuid(),
'device_id': framework._uuid()}
]
def _add_snat_port_info_to_router(self, router, internal_ports):
# Add snat port information to the router
snat_port_list = router.get(l3_constants.SNAT_ROUTER_INTF_KEY, [])
if not snat_port_list and internal_ports:
# Get values from internal port
port = internal_ports[0]
fixed_ip = port['fixed_ips'][0]
snat_subnet = port['subnets'][0]
port_ip = fixed_ip['ip_address']
# Pick an ip address which is not the same as port_ip
snat_ip = str(netaddr.IPAddress(port_ip) + 5)
# Add the info to router as the first snat port
# in the list of snat ports
prefixlen = netaddr.IPNetwork(snat_subnet['cidr']).prefixlen
router[l3_constants.SNAT_ROUTER_INTF_KEY] = [
{'subnets': [
{'cidr': snat_subnet['cidr'],
'gateway_ip': snat_subnet['gateway_ip'],
'id': fixed_ip['subnet_id']}],
'network_id': port['network_id'],
'device_owner': l3_constants.DEVICE_OWNER_ROUTER_SNAT,
'mac_address': 'fa:16:3e:80:8d:89',
'fixed_ips': [{'subnet_id': fixed_ip['subnet_id'],
'ip_address': snat_ip,
'prefixlen': prefixlen}],
'id': framework._uuid(),
'device_id': framework._uuid()}
]
def _assert_dvr_external_device(self, router):
external_port = router.get_ex_gw_port()
snat_ns_name = dvr_snat_ns.SnatNamespace.get_snat_ns_name(
router.router_id)
# if the agent is in dvr_snat mode, then we have to check
# that the correct ports and ip addresses exist in the
# snat_ns_name namespace
if self.agent.conf.agent_mode == 'dvr_snat':
device_exists = functools.partial(
self.device_exists_with_ips_and_mac,
external_port,
router.get_external_device_name,
snat_ns_name)
utils.wait_until_true(device_exists)
# if the agent is in dvr mode then the snat_ns_name namespace
# should not be present at all:
elif self.agent.conf.agent_mode == 'dvr':
self.assertFalse(
self._namespace_exists(snat_ns_name),
"namespace %s was found but agent is in dvr mode not dvr_snat"
% (str(snat_ns_name))
)
# if the agent is anything else the test is misconfigured
# we force a test failure with message
else:
self.assertTrue(False, " agent not configured for dvr or dvr_snat")
def _assert_dvr_gateway(self, router):
gateway_expected_in_snat_namespace = (
self.agent.conf.agent_mode == 'dvr_snat'
)
if gateway_expected_in_snat_namespace:
self._assert_dvr_snat_gateway(router)
self._assert_removal_of_already_deleted_gateway_device(router)
snat_namespace_should_not_exist = (
self.agent.conf.agent_mode == 'dvr'
)
if snat_namespace_should_not_exist:
self._assert_snat_namespace_does_not_exist(router)
def _assert_dvr_snat_gateway(self, router):
namespace = dvr_snat_ns.SnatNamespace.get_snat_ns_name(
router.router_id)
external_port = router.get_ex_gw_port()
external_device_name = router.get_external_device_name(
external_port['id'])
external_device = ip_lib.IPDevice(external_device_name,
namespace=namespace)
existing_gateway = (
external_device.route.get_gateway().get('gateway'))
expected_gateway = external_port['subnets'][0]['gateway_ip']
self.assertEqual(expected_gateway, existing_gateway)
def _assert_removal_of_already_deleted_gateway_device(self, router):
namespace = dvr_snat_ns.SnatNamespace.get_snat_ns_name(
router.router_id)
device = ip_lib.IPDevice("fakedevice",
namespace=namespace)
# Assert that no exception is thrown for this case
self.assertIsNone(router._delete_gateway_device_if_exists(
device, "192.168.0.1", 0))
def _assert_snat_namespace_does_not_exist(self, router):
namespace = dvr_snat_ns.SnatNamespace.get_snat_ns_name(
router.router_id)
self.assertFalse(self._namespace_exists(namespace))
def _assert_dvr_floating_ips(self, router):
# in the fip namespace:
# Check that the fg-<port-id> (floatingip_agent_gateway)
# is created with the ip address of the external gateway port
floating_ips = router.router[l3_constants.FLOATINGIP_KEY]
self.assertTrue(floating_ips)
# We need to fetch the floatingip agent gateway port info
# from the router_info
floating_agent_gw_port = (
router.router[l3_constants.FLOATINGIP_AGENT_INTF_KEY])
self.assertTrue(floating_agent_gw_port)
external_gw_port = floating_agent_gw_port[0]
fip_ns = self.agent.get_fip_ns(floating_ips[0]['floating_network_id'])
fip_ns_name = fip_ns.get_name()
fg_port_created_successfully = ip_lib.device_exists_with_ips_and_mac(
fip_ns.get_ext_device_name(external_gw_port['id']),
[self._port_first_ip_cidr(external_gw_port)],
external_gw_port['mac_address'],
namespace=fip_ns_name)
self.assertTrue(fg_port_created_successfully)
# Check fpr-router device has been created
device_name = fip_ns.get_int_device_name(router.router_id)
fpr_router_device_created_successfully = ip_lib.device_exists(
device_name, namespace=fip_ns_name)
self.assertTrue(fpr_router_device_created_successfully)
# In the router namespace
# Check rfp-<router-id> is created correctly
for fip in floating_ips:
device_name = fip_ns.get_rtr_ext_device_name(router.router_id)
self.assertTrue(ip_lib.device_exists(
device_name, namespace=router.ns_name))
def test_dvr_router_rem_fips_on_restarted_agent(self):
self.agent.conf.agent_mode = 'dvr_snat'
router_info = self.generate_dvr_router_info()
router1 = self.manage_router(self.agent, router_info)
fip_ns = router1.fip_ns.get_name()
self.assertTrue(self._namespace_exists(fip_ns))
restarted_agent = neutron_l3_agent.L3NATAgentWithStateReport(
self.agent.host, self.agent.conf)
router1.router[l3_constants.FLOATINGIP_KEY] = []
self.manage_router(restarted_agent, router1.router)
self._assert_dvr_snat_gateway(router1)
self.assertTrue(self._namespace_exists(fip_ns))
def test_dvr_router_add_fips_on_restarted_agent(self):
self.agent.conf.agent_mode = 'dvr'
router_info = self.generate_dvr_router_info()
router = self.manage_router(self.agent, router_info)
floating_ips = router.router[l3_constants.FLOATINGIP_KEY]
router_ns = router.ns_name
fip_rule_prio_1 = self._get_fixed_ip_rule_priority(
router_ns, floating_ips[0]['fixed_ip_address'])
restarted_agent = neutron_l3_agent.L3NATAgent(
self.agent.host, self.agent.conf)
floating_ips[0]['floating_ip_address'] = '21.4.4.2'
floating_ips[0]['fixed_ip_address'] = '10.0.0.2'
self.manage_router(restarted_agent, router_info)
fip_rule_prio_2 = self._get_fixed_ip_rule_priority(
router_ns, floating_ips[0]['fixed_ip_address'])
self.assertNotEqual(fip_rule_prio_1, fip_rule_prio_2)
def _get_fixed_ip_rule_priority(self, namespace, fip):
iprule = ip_lib.IPRule(namespace)
lines = iprule.rule._as_root([4], ['show']).splitlines()
for line in lines:
if fip in line:
info = iprule.rule._parse_line(4, line)
return info['priority']
def test_dvr_router_add_internal_network_set_arp_cache(self):
# Check that, when the router is set up and there are
# existing ports on the uplinked subnet, the ARP
# cache is properly populated.
self.agent.conf.agent_mode = 'dvr_snat'
router_info = l3_test_common.prepare_router_data()
router_info['distributed'] = True
expected_neighbor = '35.4.1.10'
port_data = {
'fixed_ips': [{'ip_address': expected_neighbor}],
'mac_address': 'fa:3e:aa:bb:cc:dd',
'device_owner': DEVICE_OWNER_COMPUTE
}
self.agent.plugin_rpc.get_ports_by_subnet.return_value = [port_data]
router1 = self.manage_router(self.agent, router_info)
internal_device = router1.get_internal_device_name(
router_info['_interfaces'][0]['id'])
neighbors = ip_lib.IPDevice(internal_device, router1.ns_name).neigh
self.assertEqual(expected_neighbor,
neighbors.show(ip_version=4).split()[0])
def _assert_rfp_fpr_mtu(self, router, expected_mtu=1500):
dev_mtu = self.get_device_mtu(
router.router_id, router.fip_ns.get_rtr_ext_device_name,
router.ns_name)
self.assertEqual(expected_mtu, dev_mtu)
dev_mtu = self.get_device_mtu(
router.router_id, router.fip_ns.get_int_device_name,
router.fip_ns.get_name())
self.assertEqual(expected_mtu, dev_mtu)
def test_dvr_router_fip_agent_mismatch(self):
"""Test to validate the floatingip agent mismatch.
This test validates the condition where floatingip agent
gateway port host mismatches with the agent and so the
binding will not be there.
"""
self.agent.conf.agent_mode = 'dvr'
router_info = self.generate_dvr_router_info()
floating_ip = router_info['_floatingips'][0]
floating_ip['host'] = 'my_new_host'
# In this case the floatingip binding is different and so it
# should not create the floatingip namespace on the given agent.
# This is also like there is no current binding.
router1 = self.manage_router(self.agent, router_info)
fip_ns = router1.fip_ns.get_name()
self.assertTrue(self._namespace_exists(router1.ns_name))
self.assertFalse(self._namespace_exists(fip_ns))
self._assert_snat_namespace_does_not_exist(router1)
def test_dvr_router_fip_late_binding(self):
"""Test to validate the floatingip migration or latebinding.
This test validates the condition where floatingip private
port changes while migration or when the private port host
binding is done later after floatingip association.
"""
self.agent.conf.agent_mode = 'dvr'
router_info = self.generate_dvr_router_info()
fip_agent_gw_port = router_info[l3_constants.FLOATINGIP_AGENT_INTF_KEY]
# Now let us not pass the FLOATINGIP_AGENT_INTF_KEY, to emulate
# that the server did not create the port, since there was no valid
# host binding.
router_info[l3_constants.FLOATINGIP_AGENT_INTF_KEY] = []
self.mock_plugin_api.get_agent_gateway_port.return_value = (
fip_agent_gw_port[0])
router1 = self.manage_router(self.agent, router_info)
fip_ns = router1.fip_ns.get_name()
self.assertTrue(self._namespace_exists(router1.ns_name))
self.assertTrue(self._namespace_exists(fip_ns))
self._assert_snat_namespace_does_not_exist(router1)
def _assert_snat_namespace_exists(self, router):
namespace = dvr_snat_ns.SnatNamespace.get_snat_ns_name(
router.router_id)
self.assertTrue(self._namespace_exists(namespace))
def _get_dvr_snat_namespace_device_status(
self, router, internal_dev_name=None):
"""Function returns the internal and external device status."""
snat_ns = dvr_snat_ns.SnatNamespace.get_snat_ns_name(
router.router_id)
external_port = router.get_ex_gw_port()
external_device_name = router.get_external_device_name(
external_port['id'])
qg_device_created_successfully = ip_lib.device_exists(
external_device_name, namespace=snat_ns)
sg_device_created_successfully = ip_lib.device_exists(
internal_dev_name, namespace=snat_ns)
return qg_device_created_successfully, sg_device_created_successfully
def test_dvr_router_snat_namespace_with_interface_remove(self):
"""Test to validate the snat namespace with interface remove.
This test validates the snat namespace for all the external
and internal devices. It also validates if the internal
device corresponding to the router interface is removed
when the router interface is deleted.
"""
self.agent.conf.agent_mode = 'dvr_snat'
router_info = self.generate_dvr_router_info()
snat_internal_port = router_info[l3_constants.SNAT_ROUTER_INTF_KEY]
router1 = self.manage_router(self.agent, router_info)
csnat_internal_port = (
router1.router[l3_constants.SNAT_ROUTER_INTF_KEY])
# Now save the internal device name to verify later
internal_device_name = router1._get_snat_int_device_name(
csnat_internal_port[0]['id'])
self._assert_snat_namespace_exists(router1)
qg_device, sg_device = self._get_dvr_snat_namespace_device_status(
router1, internal_dev_name=internal_device_name)
self.assertTrue(qg_device)
self.assertTrue(sg_device)
self.assertEqual(router1.snat_ports, snat_internal_port)
# Now let us not pass INTERFACE_KEY, to emulate
# the interface has been removed.
router1.router[l3_constants.INTERFACE_KEY] = []
# Now let us not pass the SNAT_ROUTER_INTF_KEY, to emulate
# that the server did not send it, since the interface has been
# removed.
router1.router[l3_constants.SNAT_ROUTER_INTF_KEY] = []
self.agent._process_updated_router(router1.router)
router_updated = self.agent.router_info[router_info['id']]
self._assert_snat_namespace_exists(router_updated)
qg_device, sg_device = self._get_dvr_snat_namespace_device_status(
router_updated, internal_dev_name=internal_device_name)
self.assertFalse(sg_device)
self.assertTrue(qg_device)
def _mocked_dvr_ha_router(self, agent):
r_info = self.generate_dvr_router_info(enable_ha=True,
enable_snat=True,
agent=agent)
r_snat_ns_name = namespaces.build_ns_name(dvr_snat_ns.SNAT_NS_PREFIX,
r_info['id'])
mocked_r_snat_ns_name = r_snat_ns_name + '@' + agent.host
r_ns_name = namespaces.build_ns_name(namespaces.NS_PREFIX,
r_info['id'])
mocked_r_ns_name = r_ns_name + '@' + agent.host
return r_info, mocked_r_ns_name, mocked_r_snat_ns_name
def _setup_dvr_ha_agents(self):
self.agent.conf.agent_mode = 'dvr_snat'
conf = self._configure_agent('agent2')
self.failover_agent = neutron_l3_agent.L3NATAgentWithStateReport(
'agent2', conf)
self.failover_agent.conf.agent_mode = 'dvr_snat'
def _setup_dvr_ha_bridges(self):
br_int_1 = self._get_agent_ovs_integration_bridge(self.agent)
br_int_2 = self._get_agent_ovs_integration_bridge(self.failover_agent)
veth1, veth2 = self.useFixture(net_helpers.VethFixture()).ports
br_int_1.add_port(veth1.name)
br_int_2.add_port(veth2.name)
def _create_dvr_ha_router(self, agent):
get_ns_name = mock.patch.object(namespaces.RouterNamespace,
'_get_ns_name').start()
get_snat_ns_name = mock.patch.object(dvr_snat_ns.SnatNamespace,
'get_snat_ns_name').start()
(r_info,
mocked_r_ns_name,
mocked_r_snat_ns_name) = self._mocked_dvr_ha_router(agent)
get_ns_name.return_value = mocked_r_ns_name
get_snat_ns_name.return_value = mocked_r_snat_ns_name
router = self.manage_router(agent, r_info)
return router
def _assert_ip_addresses_in_dvr_ha_snat_namespace(self, router):
namespace = router.ha_namespace
ex_gw_port = router.get_ex_gw_port()
snat_port = router.get_snat_interfaces()[0]
ex_gw_port_name = router.get_external_device_name(
ex_gw_port['id'])
snat_port_name = router._get_snat_int_device_name(
snat_port['id'])
ip = ex_gw_port["fixed_ips"][0]['ip_address']
prefix_len = ex_gw_port["fixed_ips"][0]['prefixlen']
ex_gw_port_cidr = ip + "/" + str(prefix_len)
ip = snat_port["fixed_ips"][0]['ip_address']
prefix_len = snat_port["fixed_ips"][0]['prefixlen']
snat_port_cidr = ip + "/" + str(prefix_len)
self._assert_ip_address_on_interface(namespace,
ex_gw_port_name,
ex_gw_port_cidr)
self._assert_ip_address_on_interface(namespace,
snat_port_name,
snat_port_cidr)
def _assert_no_ip_addresses_in_dvr_ha_snat_namespace(self, router):
namespace = router.ha_namespace
ex_gw_port = router.get_ex_gw_port()
snat_port = router.get_snat_interfaces()[0]
ex_gw_port_name = router.get_external_device_name(
ex_gw_port['id'])
snat_port_name = router._get_snat_int_device_name(
snat_port['id'])
self._assert_no_ip_addresses_on_interface(namespace,
snat_port_name)
self._assert_no_ip_addresses_on_interface(namespace,
ex_gw_port_name)
def test_dvr_ha_router_failover(self):
self._setup_dvr_ha_agents()
self._setup_dvr_ha_bridges()
router1 = self._create_dvr_ha_router(self.agent)
router2 = self._create_dvr_ha_router(self.failover_agent)
utils.wait_until_true(lambda: router1.ha_state == 'master')
utils.wait_until_true(lambda: router2.ha_state == 'backup')
self._assert_ip_addresses_in_dvr_ha_snat_namespace(router1)
self._assert_no_ip_addresses_in_dvr_ha_snat_namespace(router2)
self.fail_ha_router(router1)
utils.wait_until_true(lambda: router2.ha_state == 'master')
utils.wait_until_true(lambda: router1.ha_state == 'backup')
self._assert_ip_addresses_in_dvr_ha_snat_namespace(router2)
self._assert_no_ip_addresses_in_dvr_ha_snat_namespace(router1)
def _assert_fip_namespace_deleted(self, ext_gateway_port):
ext_net_id = ext_gateway_port['network_id']
self.agent.fipnamespace_delete_on_ext_net(
self.agent.context, ext_net_id)
self._assert_interfaces_deleted_from_ovs()
| apache-2.0 | -3,600,914,941,048,306,700 | 45.850863 | 79 | 0.604276 | false |
semkiv/heppy_fcc | analyzers/CMSReader.py | 1 | 1582 | from PhysicsTools.Heppy.analyzers.core.Analyzer import Analyzer
from PhysicsTools.Heppy.analyzers.core.AutoHandle import AutoHandle
from heppy_fcc.particles.cms.particle import Particle
import math
class CMSReader(Analyzer):
def declareHandles(self):
super(CMSReader, self).declareHandles()
self.handles['gen_particles'] = AutoHandle(
self.cfg_ana.gen_particles,
'std::vector<reco::GenParticle>'
)
self.read_pf = self.cfg_ana.pf_particles is not None
if self.read_pf:
self.handles['pf_particles'] = AutoHandle(
self.cfg_ana.pf_particles,
'std::vector<reco::PFCandidate>'
)
def process(self, event):
self.readCollections(event.input)
store = event.input
genp = self.handles['gen_particles'].product()
gen_particles = map(Particle, genp)
event.gen_particles = sorted( gen_particles,
key = lambda ptc: ptc.e(), reverse=True )
event.gen_particles_stable = [ptc for ptc in event.gen_particles
if ptc.status()==1 and
not math.isnan(ptc.e()) and
ptc.e()>1e-5 and
ptc.pt()>1e-5 and
not abs(ptc.pdgid()) in [12, 14, 16]]
if self.read_pf:
pfp = self.handles['pf_particles'].product()
event.pf_particles = map(Particle, pfp)
| gpl-3.0 | -7,577,250,021,358,035,000 | 40.631579 | 81 | 0.53287 | false |
DedMemez/ODS-August-2017 | makeatoon/ClothesGUI.py | 1 | 10449 | # Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: toontown.makeatoon.ClothesGUI
from toontown.toon import ToonDNA
from direct.fsm import StateData
from direct.gui.DirectGui import *
from MakeAToonGlobals import *
from toontown.toonbase import TTLocalizer
from direct.directnotify import DirectNotifyGlobal
import ShuffleButton
import random
CLOTHES_MAKETOON = 0
CLOTHES_TAILOR = 1
CLOTHES_CLOSET = 2
class ClothesGUI(StateData.StateData):
notify = DirectNotifyGlobal.directNotify.newCategory('ClothesGUI')
def __init__(self, type, doneEvent, swapEvent = None):
StateData.StateData.__init__(self, doneEvent)
self.type = type
self.toon = None
self.swapEvent = swapEvent
self.gender = '?'
self.girlInShorts = 0
self.swappedTorso = 0
return
def load(self):
self.matGui = loader.loadModel('phase_3/models/gui/tt_m_gui_mat_mainGui')
guiRArrowUp = self.matGui.find('**/tt_t_gui_mat_arrowUp')
guiRArrowRollover = self.matGui.find('**/tt_t_gui_mat_arrowUp')
guiRArrowDown = self.matGui.find('**/tt_t_gui_mat_arrowDown')
guiRArrowDisabled = self.matGui.find('**/tt_t_gui_mat_arrowDisabled')
self.shuffleFrame = self.matGui.find('**/tt_t_gui_mat_shuffleFrame')
shuffleArrowUp = self.matGui.find('**/tt_t_gui_mat_shuffleArrowUp')
shuffleArrowDown = self.matGui.find('**/tt_t_gui_mat_shuffleArrowDown')
shuffleArrowRollover = self.matGui.find('**/tt_t_gui_mat_shuffleArrowUp')
shuffleArrowDisabled = self.matGui.find('**/tt_t_gui_mat_shuffleArrowDisabled')
self.parentFrame = DirectFrame(relief=DGG.RAISED, pos=(0.98, 0, 0.416), frameColor=(1, 0, 0, 0))
self.parentFrame.setPos(-0.36, 0, -0.5)
self.parentFrame.reparentTo(base.a2dTopRight)
self.shirtFrame = DirectFrame(parent=self.parentFrame, image=self.shuffleFrame, image_scale=halfButtonInvertScale, relief=None, pos=(0, 0, -0.4), hpr=(0, 0, 3), scale=1.2, frameColor=(1, 1, 1, 1), text=TTLocalizer.ClothesShopShirt, text_scale=0.0575, text_pos=(-0.001, -0.015), text_fg=(1, 1, 1, 1))
self.topLButton = DirectButton(parent=self.shirtFrame, relief=None, image=(shuffleArrowUp,
shuffleArrowDown,
shuffleArrowRollover,
shuffleArrowDisabled), image_scale=halfButtonScale, image1_scale=halfButtonHoverScale, image2_scale=halfButtonHoverScale, pos=(-0.2, 0, 0), command=self.swapTop, extraArgs=[-1])
self.topRButton = DirectButton(parent=self.shirtFrame, relief=None, image=(shuffleArrowUp,
shuffleArrowDown,
shuffleArrowRollover,
shuffleArrowDisabled), image_scale=halfButtonInvertScale, image1_scale=halfButtonInvertHoverScale, image2_scale=halfButtonInvertHoverScale, pos=(0.2, 0, 0), command=self.swapTop, extraArgs=[1])
self.bottomFrame = DirectFrame(parent=self.parentFrame, image=self.shuffleFrame, image_scale=halfButtonInvertScale, relief=None, pos=(0, 0, -0.65), hpr=(0, 0, -2), scale=1.2, frameColor=(1, 1, 1, 1), text=TTLocalizer.ColorShopToon, text_scale=0.0575, text_pos=(-0.001, -0.015), text_fg=(1, 1, 1, 1))
self.bottomLButton = DirectButton(parent=self.bottomFrame, relief=None, image=(shuffleArrowUp,
shuffleArrowDown,
shuffleArrowRollover,
shuffleArrowDisabled), image_scale=halfButtonScale, image1_scale=halfButtonHoverScale, image2_scale=halfButtonHoverScale, pos=(-0.2, 0, 0), command=self.swapBottom, extraArgs=[-1])
self.bottomRButton = DirectButton(parent=self.bottomFrame, relief=None, image=(shuffleArrowUp,
shuffleArrowDown,
shuffleArrowRollover,
shuffleArrowDisabled), image_scale=halfButtonInvertScale, image1_scale=halfButtonInvertHoverScale, image2_scale=halfButtonInvertHoverScale, pos=(0.2, 0, 0), command=self.swapBottom, extraArgs=[1])
self.parentFrame.hide()
self.shuffleFetchMsg = 'ClothesShopShuffle'
self.shuffleButton = ShuffleButton.ShuffleButton(self, self.shuffleFetchMsg)
return
def unload(self):
self.matGui.removeNode()
del self.matGui
self.parentFrame.destroy()
self.shirtFrame.destroy()
self.bottomFrame.destroy()
self.topLButton.destroy()
self.topRButton.destroy()
self.bottomLButton.destroy()
self.bottomRButton.destroy()
del self.shuffleFrame
del self.parentFrame
del self.shirtFrame
del self.bottomFrame
del self.topLButton
del self.topRButton
del self.bottomLButton
del self.bottomRButton
self.shuffleButton.unload()
self.ignore('MAT-newToonCreated')
def showButtons(self):
self.parentFrame.show()
def hideButtons(self):
self.parentFrame.hide()
def enter(self, toon):
self.notify.debug('enter')
base.disableMouse()
self.toon = toon
self.setupScrollInterface()
if not self.type == CLOTHES_TAILOR:
currTop = (self.toon.style.topTex,
self.toon.style.topTexColor,
self.toon.style.sleeveTex,
self.toon.style.sleeveTexColor)
currTopIndex = self.tops.index(currTop)
self.swapTop(currTopIndex - self.topChoice)
currBottom = (self.toon.style.botTex, self.toon.style.botTexColor)
currBottomIndex = self.bottoms.index(currBottom)
self.swapBottom(currBottomIndex - self.bottomChoice)
choicePool = [self.tops, self.bottoms]
self.shuffleButton.setChoicePool(choicePool)
self.accept(self.shuffleFetchMsg, self.changeClothes)
self.acceptOnce('MAT-newToonCreated', self.shuffleButton.cleanHistory)
def exit(self):
try:
del self.toon
except:
self.notify.warning('ClothesGUI: toon not found')
self.hideButtons()
self.ignore('enter')
self.ignore('next')
self.ignore('last')
self.ignore(self.shuffleFetchMsg)
def setupButtons(self):
self.girlInShorts = 0
if self.gender == 'f':
if self.bottomChoice == -1:
botTex = self.bottoms[0][0]
else:
botTex = self.bottoms[self.bottomChoice][0]
if ToonDNA.GirlBottoms[botTex][1] == ToonDNA.SHORTS:
self.girlInShorts = 1
if self.toon.style.getGender() == 'm':
self.bottomFrame['text'] = TTLocalizer.ClothesShopShorts
else:
self.bottomFrame['text'] = TTLocalizer.ClothesShopBottoms
self.acceptOnce('last', self.__handleBackward)
self.acceptOnce('next', self.__handleForward)
return None
def swapTop(self, offset):
length = len(self.tops)
self.topChoice += offset
if self.topChoice <= 0:
self.topChoice = 0
self.updateScrollButtons(self.topChoice, length, 0, self.topLButton, self.topRButton)
if self.topChoice < 0 or self.topChoice >= len(self.tops) or len(self.tops[self.topChoice]) != 4:
self.notify.warning('topChoice index is out of range!')
return
else:
self.toon.style.topTex = self.tops[self.topChoice][0]
self.toon.style.topTexColor = self.tops[self.topChoice][1]
self.toon.style.sleeveTex = self.tops[self.topChoice][2]
self.toon.style.sleeveTexColor = self.tops[self.topChoice][3]
self.toon.generateToonClothes()
if self.swapEvent != None:
messenger.send(self.swapEvent)
messenger.send('wakeup')
return
def swapBottom(self, offset):
length = len(self.bottoms)
self.bottomChoice += offset
if self.bottomChoice <= 0:
self.bottomChoice = 0
self.updateScrollButtons(self.bottomChoice, length, 0, self.bottomLButton, self.bottomRButton)
if self.bottomChoice < 0 or self.bottomChoice >= len(self.bottoms) or len(self.bottoms[self.bottomChoice]) != 2:
self.notify.warning('bottomChoice index is out of range!')
return
else:
self.toon.style.botTex = self.bottoms[self.bottomChoice][0]
self.toon.style.botTexColor = self.bottoms[self.bottomChoice][1]
if self.toon.generateToonClothes() == 1:
self.toon.loop('neutral', 0)
self.swappedTorso = 1
if self.swapEvent != None:
messenger.send(self.swapEvent)
messenger.send('wakeup')
return
def updateScrollButtons(self, choice, length, startTex, lButton, rButton):
if choice >= length - 1:
rButton['state'] = DGG.DISABLED
else:
rButton['state'] = DGG.NORMAL
if choice <= 0:
lButton['state'] = DGG.DISABLED
else:
lButton['state'] = DGG.NORMAL
def __handleForward(self):
self.doneStatus = 'next'
messenger.send(self.doneEvent)
def __handleBackward(self):
self.doneStatus = 'last'
messenger.send(self.doneEvent)
def resetClothes(self, style):
if self.toon:
self.toon.style.makeFromNetString(style.makeNetString())
if self.swapEvent != None and self.swappedTorso == 1:
self.toon.swapToonTorso(self.toon.style.torso, genClothes=0)
self.toon.generateToonClothes()
self.toon.loop('neutral', 0)
return
def changeClothes(self):
self.notify.debug('Entering changeClothes')
newChoice = self.shuffleButton.getCurrChoice()
if newChoice[0] in self.tops:
newTopIndex = self.tops.index(newChoice[0])
else:
newTopIndex = self.topChoice
if newChoice[1] in self.bottoms:
newBottomIndex = self.bottoms.index(newChoice[1])
else:
newBottomIndex = self.bottomChoice
oldTopIndex = self.topChoice
oldBottomIndex = self.bottomChoice
self.swapTop(newTopIndex - oldTopIndex)
self.swapBottom(newBottomIndex - oldBottomIndex)
def getCurrToonSetting(self):
return [self.tops[self.topChoice], self.bottoms[self.bottomChoice]] | apache-2.0 | -5,630,352,317,131,774,000 | 44.65625 | 307 | 0.633266 | false |
basho/riak-python-client | riak/tests/test_misc.py | 1 | 1659 | # Copyright 2010-present Basho Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
class MiscTests(unittest.TestCase):
def test_timeout_validation(self):
from riak.client.operations import _validate_timeout
# valid cases
try:
_validate_timeout(None)
_validate_timeout(None, infinity_ok=True)
_validate_timeout('infinity', infinity_ok=True)
_validate_timeout(1234)
_validate_timeout(1234567898765432123456789)
except ValueError:
self.fail('_validate_timeout() unexpectedly raised ValueError')
# invalid cases
with self.assertRaises(ValueError):
_validate_timeout('infinity')
with self.assertRaises(ValueError):
_validate_timeout('infinity-foo')
with self.assertRaises(ValueError):
_validate_timeout('foobarbaz')
with self.assertRaises(ValueError):
_validate_timeout('1234')
with self.assertRaises(ValueError):
_validate_timeout(0)
with self.assertRaises(ValueError):
_validate_timeout(12.34)
| apache-2.0 | 4,406,450,344,882,683,400 | 38.5 | 75 | 0.675105 | false |
nttcom/eclcli | eclcli/network/v2/public_ip.py | 1 | 5947 | from eclcli.common import command
from eclcli.common import utils
from ..networkclient.common import utils as to_obj
class ListPubicIP(command.Lister):
def get_parser(self, prog_name):
parser = super(ListPubicIP, self).get_parser(prog_name)
parser.add_argument(
'--name',
metavar="name",
help="filter by name")
parser.add_argument(
'--id',
metavar="id",
help="filter by id")
parser.add_argument(
'--status',
metavar="status",
help="filter by status")
parser.add_argument(
'--internet_gw_id',
metavar="internet_gw_id",
help="filter by internet gateway id")
parser.add_argument(
'--submask_length',
metavar="submask_length",
help="filter by submask length")
parser.add_argument(
'--cidr',
metavar="cidr",
help="filter by cidr")
return parser
def take_action(self, parsed_args):
network_client = self.app.client_manager.network
columns = (
'id',
'name',
'cidr',
'submask_length',
'status',
)
column_headers = (
'ID',
'Name',
'CIDR',
'Submask',
'Status',
)
search_opts = {}
if parsed_args.name:
search_opts.update({"name": parsed_args.name})
if parsed_args.id:
search_opts.update({"id": parsed_args.id})
if parsed_args.status:
search_opts.update({"status": parsed_args.status})
if parsed_args.internet_gw_id:
search_opts.update({"internet_gw_id": parsed_args.internet_gw_id})
if parsed_args.submask_length:
search_opts.update({"submask_length": parsed_args.submask_length})
if parsed_args.cidr:
search_opts.update({"cidr": parsed_args.cidr})
data = [to_obj.PubicIP(public_ip)
for public_ip in network_client.list_public_ips(**search_opts).get('public_ips')]
return (column_headers,
(utils.get_item_properties(
s, columns,
) for s in data))
class ShowPubicIP(command.ShowOne):
def get_parser(self, prog_name):
parser = super(ShowPubicIP, self).get_parser(prog_name)
parser.add_argument(
'public_ip_id',
metavar="PUBLIC_IP_ID",
help="ID of Pubic IP to show."
)
return parser
def take_action(self, parsed_args):
network_client = self.app.client_manager.network
public_ip_id = parsed_args.public_ip_id
dic = network_client.show_public_ip(public_ip_id).get('public_ip')
columns = utils.get_columns(dic)
obj = to_obj.PubicIP(dic)
data = utils.get_item_properties(
obj, columns,)
return columns, data
class CreatePubicIP(command.ShowOne):
def get_parser(self, prog_name):
parser = super(CreatePubicIP, self).get_parser(prog_name)
parser.add_argument(
'--name',
metavar='<string>',
help='Name of public ip to create.')
parser.add_argument(
'--description',
metavar='<string>',
help='Description of public ip to create.')
parser.add_argument(
'--internet_gw_id',
metavar='INTERNET_GATEWAY_ID',
required=True,
help='Internet Gateway ID of public ip to create')
parser.add_argument(
'--submask_length',
metavar='SUBNET_MASK_LENGTH',
required=True,
type=int,
help='Submask length of public ip to create.')
return parser
def take_action(self, parsed_args):
network_client = self.app.client_manager.network
body = {'public_ip': {}}
utils.update_dict(
parsed_args,
body['public_ip'],
['name', 'description',
'internet_gw_id', 'submask_length'])
dic = network_client.create_public_ip(body).get('public_ip')
columns = utils.get_columns(dic)
obj = to_obj.PubicIP(dic)
data = utils.get_item_properties(
obj, columns,)
return columns, data
class SetPubicIP(command.ShowOne):
def get_parser(self, prog_name):
parser = super(SetPubicIP, self).get_parser(prog_name)
parser.add_argument(
'public_ip_id',
metavar='PUBLIC_IP_ID',
help='ID of Public IP to update.')
parser.add_argument(
'--description',
metavar='<string>',
help='Description of public ip to update.')
return parser
def take_action(self, parsed_args):
network_client = self.app.client_manager.network
body = {'public_ip': {}}
public_ip_id = parsed_args.public_ip_id
utils.update_dict(
parsed_args,
body['public_ip'],
['description', ])
dic = network_client.update_public_ip(
public_ip_id, body).get('public_ip')
columns = utils.get_columns(dic)
obj = to_obj.PubicIP(dic)
data = utils.get_item_properties(
obj, columns,)
return columns, data
class DeletePubicIP(command.Command):
def get_parser(self, prog_name):
parser = super(DeletePubicIP, self).get_parser(prog_name)
parser.add_argument(
'public_ip_id',
metavar="PUBLIC_IP_ID",
nargs="+",
help="ID(s) of Public IP to delete."
)
return parser
def take_action(self, parsed_args):
network_client = self.app.client_manager.network
for giid in parsed_args.public_ip_id:
network_client.delete_public_ip(giid)
| apache-2.0 | -1,071,310,958,543,056,000 | 30.3 | 97 | 0.543972 | false |
merll/docker-map | dockermap/exceptions.py | 1 | 2281 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import traceback
import six
class SourceExceptionMixin(object):
def __init__(self, src_exc, *args, **kwargs):
self._src_exc = src_exc
super(SourceExceptionMixin, self).__init__(*args, **kwargs)
@property
def source_exception(self):
"""
The original exception information from where the error occurred. Tuple of exception type and exception
instance (e.g. the output of ``sys.exc_info()``).
:return: Exception tuple.
:rtype: tuple
"""
return self._src_exc
@property
def source_message(self):
"""
Formatted output of the exception, without the stack trace.
:return: Exception text.
:rtype: unicode | str
"""
return ''.join(traceback.format_exception_only(self._src_exc[0], self._src_exc[1]))
def reraise(self):
"""
Utility method for re-raising the original exception, including output of its stacktrace.
"""
six.reraise(*self._src_exc)
class PartialResultsMixin(object):
def __init__(self, partial_results, *args, **kwargs):
self._results = partial_results
super(PartialResultsMixin, self).__init__(*args, **kwargs)
@property
def results(self):
"""
Partial results before the exception occurred.
:return: list
"""
return self._results
@six.python_2_unicode_compatible
class PartialResultsError(SourceExceptionMixin, PartialResultsMixin, Exception):
"""
Exception where partial results might be available.
"""
def __str__(self):
return self.source_message
@six.python_2_unicode_compatible
class DockerStatusError(Exception):
def __init__(self, message, detail):
self._message = message
if isinstance(detail, dict):
detail.pop('message', None)
if not detail:
self._detail = None
else:
self._detail = detail
elif detail:
self._detail = detail
@property
def message(self):
return self._message
@property
def detail(self):
return self._detail
def __str__(self):
return self._message
| mit | 5,798,657,893,036,697,000 | 24.920455 | 111 | 0.600614 | false |
qedsoftware/commcare-hq | corehq/ex-submodules/casexml/apps/phone/management/commands/sync_log_debugger.py | 1 | 6017 | import json
from optparse import make_option
import os
from django.core.management import BaseCommand
import sys
from casexml.apps.phone.checksum import Checksum
class Command(BaseCommand):
"""Compare sync logs to see mismatched case ids.
This is useful for debugging state mismatches
Can also pass in a hash to see which cases HQ is sending down that mobile has purged.
Usage:
./manage.py sync_log_debugger <synclog1> [synclog2 synclog3]...
<synclog> is a json file of the synclog you are trying to compare. This
could also be a folder of synclogs and will compare all of them.
optional arguments:
--check <hash>
removes cases one by one in synclog1 until it matches <hash>
--index <index>
if you have more than one file passed in, <index> is the one that --check will be run on
--depth <depth>
specify the number of cases to try removing until you find a match in --check
(it's a N choose X problem so gets very slow after --depth > 1 if you
have a significant number of cases in the log)
"""
option_list = BaseCommand.option_list + (
make_option('--debugger',
action='store_true',
dest='debugger',
default=False,
help="Drop into a debugger at the end of running the command for manual queries"),
make_option('--check',
action='store',
dest='check_hash',
default=None,
help=("Run a hash check. Removes cases one by one from the passed-in synclog until "
"they hash matches CHECK_HASH")),
make_option('--index',
action='store',
dest='index',
default=0,
help=("if you have more than one file passed in, <index> is the one "
"that --check will be run on")),
make_option('--depth',
action='store',
dest='depth',
default=1,
help=("specify the number of cases to try removing until you find a match in --check"
"(it's a N choose X problem so gets very slow after --depth > 1 if you"
"have a significant number of cases in the log)\n")),
)
def handle(self, *args, **options):
from casexml.apps.phone.models import properly_wrap_sync_log, SyncLog, SimplifiedSyncLog
if len(args) < 1:
print(
"Usage:\n"
"./manage.py sync_log_debugger <synclog1> [synclog2 synclog3]...\n"
" <synclog> is a json file of the synclog you are trying to compare. Passing\n"
" in a folder will compare all of the files in that folder.\n"
)
sys.exit(0)
logs = []
log_names = []
for filename in args:
if os.path.isdir(filename):
filenames = [os.path.join(filename, item) for item in sorted(os.listdir(filename))]
else:
filenames = [filename]
for filename in filenames:
log_name = os.path.basename(filename)
log_names.append(log_name)
with open(filename) as f:
wrapped_log = properly_wrap_sync_log(json.loads(f.read()))
logs.append(wrapped_log)
if isinstance(wrapped_log, SyncLog):
log_names.append('migrated-{}'.format(log_name))
logs.append(SimplifiedSyncLog.from_other_format(wrapped_log))
elif getattr(wrapped_log, 'migrated_from', None):
log_names.append('migrated_from-{}'.format(log_name))
logs.append(properly_wrap_sync_log(wrapped_log.to_json()['migrated_from']))
print 'state hashes'
for i in range(len(log_names)):
print '{} ({}): {}'.format(log_names[i], logs[i]._id, logs[i].get_state_hash())
print '\ncase diffs'
for i in range(len(log_names)):
for j in range(len(log_names)):
if i != j:
case_diff = set(logs[i].get_footprint_of_cases_on_phone()) - \
set(logs[j].get_footprint_of_cases_on_phone())
if case_diff:
print 'cases on {} and not {}: {}'.format(
log_names[i],
log_names[j],
', '.join(sorted(case_diff))
)
if options['debugger']:
union_of_ids = set().union(*[set(log.get_footprint_of_cases_on_phone()) for log in logs])
intersection_of_ids = set().intersection(*[set(log.get_footprint_of_cases_on_phone()) for log in logs])
import pdb
pdb.set_trace()
if options['check_hash']:
log_to_check = logs[int(options['index'])]
result = _brute_force_search(
log_to_check.case_ids_on_phone, options['check_hash'], depth=int(options['depth'])
)
if result:
print 'check successful - missing ids {}'.format(result)
else:
print 'no match found'
def _brute_force_search(case_id_set, expected_hash, diff=None, depth=1):
# utility for brute force searching for a hash
diff = diff or set()
if _get_hash(case_id_set) == expected_hash:
return diff
else:
if depth > 0:
for id in case_id_set:
list_to_check = case_id_set - set([id])
newdiff = diff | set([id])
result = _brute_force_search(list_to_check, expected_hash, newdiff, depth-1)
if result:
return result
else:
return None
def _get_hash(ids):
return Checksum(list(ids)).hexdigest()
| bsd-3-clause | 6,120,540,430,923,160,000 | 41.373239 | 115 | 0.531328 | false |
EmreAtes/spack | var/spack/repos/builtin/packages/py-psyclone/package.py | 1 | 2683 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
#
from spack import *
class PyPsyclone(PythonPackage):
"""Code generation for the PSyKAl framework from the GungHo project,
as used by the LFRic model at the UK Met Office."""
homepage = "https://github.com/stfc/PSyclone"
url = "https://github.com/stfc/PSyclone/archive/1.5.1.tar.gz"
giturl = "https://github.com/stfc/PSyclone.git"
version('1.5.1', git=giturl,
commit='eba7a097175b02f75dec70616cf267b7b3170d78')
version('develop', git=giturl, branch='master')
depends_on('py-setuptools', type='build')
depends_on('python', type=('build', 'run'))
depends_on('py-pyparsing', type=('build', 'run'))
# Test cases fail without compatible versions of py-fparser:
depends_on('[email protected]', type=('build', 'run'), when='@1.5.1')
depends_on('py-fparser', type=('build', 'run'), when='@1.5.2:')
# Dependencies only required for tests:
depends_on('py-numpy', type='test')
depends_on('py-nose', type='test')
depends_on('py-pytest', type='test')
@run_after('install')
@on_package_attributes(run_tests=True)
def check_build(self):
# Limit py.test to search inside the build tree:
touch('pytest.ini')
with working_dir('src'):
Executable('py.test')()
def setup_environment(self, spack_env, run_env):
# Allow testing with installed executables:
spack_env.prepend_path('PATH', self.prefix.bin)
| lgpl-2.1 | 6,283,712,854,435,846,000 | 40.276923 | 78 | 0.653 | false |
GeosoftInc/gxpy | geosoft/gxapi/GXE3DV.py | 1 | 2424 | ### extends 'class_empty.py'
### block ClassImports
# NOTICE: Do not edit anything here, it is generated code
from . import gxapi_cy
from geosoft.gxapi import GXContext, float_ref, int_ref, str_ref
from .GXMVIEW import GXMVIEW
### endblock ClassImports
### block Header
# NOTICE: The code generator will not replace the code in this block
### endblock Header
### block ClassImplementation
# NOTICE: Do not edit anything here, it is generated code
class GXE3DV(gxapi_cy.WrapE3DV):
"""
GXE3DV class.
Methods to manipulate an active 3D View
"""
def __init__(self, handle=0):
super(GXE3DV, self).__init__(GXContext._get_tls_geo(), handle)
@classmethod
def null(cls):
"""
A null (undefined) instance of `GXE3DV <geosoft.gxapi.GXE3DV>`
:returns: A null `GXE3DV <geosoft.gxapi.GXE3DV>`
:rtype: GXE3DV
"""
return GXE3DV()
def is_null(self):
"""
Check if this is a null (undefined) instance
:returns: True if this is a null (undefined) instance, False otherwise.
:rtype: bool
"""
return self._internal_handle() == 0
# Miscellaneous
def get_data_view(self):
"""
Get the current data (3D) `GXMVIEW <geosoft.gxapi.GXMVIEW>`
:returns: `GXMVIEW <geosoft.gxapi.GXMVIEW>` object
:rtype: GXMVIEW
.. versionadded:: 9.3
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
ret_val = self._get_data_view()
return GXMVIEW(ret_val)
def get_base_view(self):
"""
Get the current Base `GXMVIEW <geosoft.gxapi.GXMVIEW>` (used to draw 2D legends for groups)
:returns: `GXMVIEW <geosoft.gxapi.GXMVIEW>` object
:rtype: GXMVIEW
.. versionadded:: 9.3
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
ret_val = self._get_base_view()
return GXMVIEW(ret_val)
### endblock ClassImplementation
### block ClassExtend
# NOTICE: The code generator will not replace the code in this block
### endblock ClassExtend
### block Footer
# NOTICE: The code generator will not replace the code in this block
### endblock Footer | bsd-2-clause | -2,210,660,780,140,298,200 | 24 | 135 | 0.62005 | false |
lodemo/CATANA | src/face_recognition/collabCreateMemMap.py | 1 | 4950 | # -*- coding: utf-8 -*-
'''
Get the features data from DB, de-pickle data and store it into array-on-disk.
'''
# MIT License
#
# Copyright (c) 2017 Moritz Lode
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import unicode_literals
import os
import time
import numpy as np
import pandas as pa
import cPickle as cp
import json
import math
from threading import Thread
from database import *
from sqlalchemy import exists, and_, func
from scipy.spatial.distance import cdist, pdist, squareform
from scipy.stats import describe
import itertools
import string
import hdbscan
from sklearn.cluster import DBSCAN
from sklearn.cluster import AgglomerativeClustering
import facedist
import networkx as nx
fileDir = os.path.dirname(os.path.realpath(__file__))
from itertools import izip_longest
def find_shape(seq):
try:
len_ = len(seq)
except TypeError:
return ()
shapes = [find_shape(subseq) for subseq in seq]
return (len_,) + tuple(max(sizes) for sizes in izip_longest(*shapes,
fillvalue=1))
def fill_array(arr, seq):
if arr.ndim == 1:
try:
len_ = len(seq)
except TypeError:
len_ = 0
arr[:len_] = seq
arr[len_:] = np.nan
else:
for subarr, subseq in izip_longest(arr, seq, fillvalue=()):
fill_array(subarr, subseq)
db = YTDatabase()
start = time.time()
with db._session_scope(False) as session:
num_features = session.query(VideoFeatures).filter(VideoFeatures.duration > 30.0).count()
print 'ALL features > 30:', num_features
from sqlalchemy.sql import func
videoIDs = []
with db._session_scope(False) as session:
#vids = session.query(VideoFeatures.videoID).filter(VideoFeatures.duration > 0.0).distinct().all()
vids = session.query(VideoFeatures.videoID).filter(VideoFeatures.duration > 0.0).filter(and_(Video.id==VideoFeatures.videoID, func.date(Video.crawlTimestamp).between('2016-12-28', '2017-03-28'))).distinct().all()
print 'first videos:', len(vids)
for id in vids:
v = session.query(Video).filter(Video.id==id[0]).first()
if not v.deleted and session.query(VideoHistory.id).filter(VideoHistory.videoID==v.id).count() > 11:
videoIDs.append(v.id)
print 'videos', len(videoIDs)
pa.DataFrame(videoIDs).to_csv('videoIDs_filtered.csv')
#videoIDs = pa.read_csv('videoIDs_filtered.csv',index_col=0)['0'].values.tolist()
with db._session_scope(False) as session:
num_features = session.query(VideoFeatures).filter(and_(VideoFeatures.duration > 30.0, VideoFeatures.videoID.in_(videoIDs))).count()
print 'Video features:', num_features
# This does not work, as the features array is not a perfect array, has different row lengths
#mmap = np.memmap(os.path.join(fileDir, 'features_memmap.npy'), mode='w+', dtype=np.double, shape=(num_features, 100, 1792))
n = 0
with db._session_scope(False) as session:
# Get features from database, filter duration on query, above 30 sec.
data = pa.read_sql(session.query(VideoFeatures.feature).filter(and_(VideoFeatures.duration > 30.0, VideoFeatures.videoID.in_(videoIDs))).statement, db.engine)
# Feature are stored cpickled
data['feature'] = data['feature'].apply(cp.loads)
# We can transform the array into a perfect array but the memory usage will rise aswell
#fixed_data = np.empty( (len(chunk), 100, 1792) )
#fill_array(fixed_data, chunk['feature'].values)
# Due to time consumption of the next computation step, we could only use the first 20 features instead max. 100
#firstth = np.asarray([f[:20] for f in data['feature'].values])
# Save features to disk
np.save('features_3MONTH', np.asarray(data['feature']))
#mmap[n:n+chunk.shape[0]] = fixed_data
#n += chunk.shape[0]
processTime = time.time() - start
print 'data extraction took', processTime
| mit | -4,439,551,050,796,614,000 | 30.528662 | 216 | 0.702626 | false |
Remi-C/interactive_map_tracking | interactive_map_tracking.py | 1 | 66133 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
interactive_map_tracking
A QGIS plugin
A QGIS 2.6 plugin to track camera of user , AND/OR to autocommit/refresh edit on PostGIS vector layer
-------------------
begin : 2015-02-20
git sha : $Format:%H$
copyright : (C) 2015 by Lionel Atty, IGN, SIDT
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt4.QtCore import QSettings, QTranslator, qVersion, QCoreApplication
from PyQt4.QtGui import QAction, QIcon
# Initialize Qt resources from file resources.py
import resources_rc
# Import the code for the dialog
from interactive_map_tracking_dialog import interactive_map_trackingDialog
import os.path
from PyQt4.QtCore import QSettings, QTranslator, qVersion, QCoreApplication
from PyQt4.QtCore import QObject, SIGNAL, QUrl
from PyQt4.QtGui import QAction, QIcon, QTabWidget
from PyQt4.QtWebKit import QWebSettings, QWebView
from qgis.gui import QgsMessageBar
from qgis.core import *
import qgis_layer_tools
import qgis_mapcanvas_tools
import qgis_log_tools
import imt_tools
#
# for beta test purposes
#
from PyQt4.QtCore import QTimer
import Queue
from collections import namedtuple
import time
import threading
def CONVERT_S_TO_MS(s):
return s*1000
# with Qt resources files
# gui_doc_about = ":/plugins/interactive_map_tracking/gui_doc/About.htm"
# gui_doc_user_doc = ":/plugins/interactive_map_tracking/gui_doc/Simplified_User_Guide.htm"
# with absolute (os) path
class interactive_map_tracking:
"""QGIS Plugin Implementation."""
def __init__(self, iface):
"""Constructor.
:param iface: An interface instance that will be passed to this class
which provides the hook by which you can manipulate the QGIS
application at run time.
:type iface: QgsInterface
"""
import time
from PyQt4.QtNetwork import QNetworkProxy
current_time = time.time()
# Save reference to the QGIS interface
self.iface = iface
# initialize plugin directory
self.plugin_dir = os.path.normcase(os.path.dirname(__file__))
# initialize locale
locale = QSettings().value('locale/userLocale')[0:2]
locale_path = os.path.join(
self.plugin_dir,
'i18n',
'interactive_map_tracking_{}.qm'.format(locale))
if os.path.exists(locale_path):
self.translator = QTranslator()
self.translator.load(locale_path)
if qVersion() > '4.3.3':
QCoreApplication.installTranslator(self.translator)
# Create the dialog (after translation) and keep reference
self.dlg = interactive_map_trackingDialog()
# Declare instance attributes
self.actions = []
self.menu = self.tr(u'&Interactive Map Tracking')
# TODO: We are going to let the user set this up in a future iteration
self.toolbar = self.iface.addToolBar(u'interactive_map_tracking')
self.toolbar.setObjectName(u'interactive_map_tracking')
# self.selections = []
self.qsettings_prefix_name = "imt/"
self.bSignalForLayerModifiedConnected = False
self.bSignalForLayerChangedConnected = False
self.bSignalForExtentsChangedConnected = False
# self.idCameraPositionLayerInBox = 0
self.currentLayerForTrackingPosition = None
self.bSignalForProjectReadConnected = True
QObject.connect(self.iface, SIGNAL("projectRead()"), self.qgisInterfaceProjectRead)
# MUTEX
self.bUseV2Functionnalities = self.dlg.enableUseMutexForTP.isChecked()
# url: https://docs.python.org/2/library/collections.html#collections.namedtuple
# Definition : namedtuples 'type'
self.TP_NAMEDTUPLE_LET = namedtuple('TP_NAMEDTUPLE_LET', ['layer', 'extent', 'w_time'])
self.TP_NAMEDTUPLE_ET = namedtuple('TP_NAMEDTUPLE_ET', ['extent', 'w_time'])
# LIFO Queue to save (in real time) requests for tracking position
self.tp_queue_rt_ntuples_let = Queue.LifoQueue()
# self.tp_rt_ntuples_let = self.TP_NAMEDTUPLE_LET(None, None, current_time)
self.tp_dict_key_l_values_et = {}
self.tp_list_fets = []
self.tp_dict_key_l_values_listfeatures = {}
self.tp_dict_layers_to_commit = {}
#
self.qtimer_tracking_position_rtt_to_memory = QTimer()
self.qtimer_tracking_position_rtt_to_memory.timeout.connect(self.tracking_position_qtimer_rttp_to_memory)
self.qtimer_tracking_position_memory_to_geom = QTimer()
self.qtimer_tracking_position_memory_to_geom.timeout.connect(self.tracking_position_qtimer_memory_to_geom)
self.qtimer_tracking_position_geom_to_layer = QTimer()
self.qtimer_tracking_position_geom_to_layer.timeout.connect(self.tracking_position_qtimer_geom_to_layer)
self.qtimer_tracking_position_layers_to_commit = QTimer()
self.qtimer_tracking_position_layers_to_commit.timeout.connect(self.tracking_position_qtimer_layers_to_commit)
# OPTIONS: timing reactions
#
self.tp_timers = imt_tools.TpTimer()
# TODO : add this options timing on GUI
# in S
tp_threshold_time_for_realtime_tracking_position = 0.125 # i.e. 8hz => (max) 8 tracking positions record per second
# in MS
tp_threshold_time_for_tp_to_mem = 250 # add to reference timing: realtime_tracking_position
tp_threshold_time_for_construct_geom = 50 # add to reference timing: tp_to_mem
tp_threshold_time_for_sending_geom_to_layer = 100 # add to reference timing: construct_geom
tp_threshold_time_for_sending_layer_to_dp = 100 # add to reference timing: sending_geom_to_layer
#
self.tp_timers.set_delay("tp_threshold_time_for_realtime_tracking_position",
tp_threshold_time_for_realtime_tracking_position)
self.tp_timers.set_delay("tp_threshold_time_for_tp_to_mem", tp_threshold_time_for_tp_to_mem)
self.tp_timers.set_delay("tp_threshold_time_for_construct_geom", tp_threshold_time_for_construct_geom)
self.tp_timers.set_delay("tp_threshold_time_for_sending_geom_to_layer",
tp_threshold_time_for_sending_geom_to_layer)
self.tp_timers.set_delay("tp_threshold_time_for_sending_layer_to_dp", tp_threshold_time_for_sending_layer_to_dp)
# in S
delay_time_still_moving = 0.750 # delta time used to decide if the user still moving on the map
self.tp_timers.set_delay("delay_time_still_moving", delay_time_still_moving)
# for timing
self.tp_time_last_rttp_to_mem = current_time
self.tp_time_last_construct_geom = current_time
self.tp_time_last_send_geom_to_layer = current_time
self.tp_time_last_send_layer_to_dp = current_time
self.tp_queue_qgis_event_to_mem = []
"""
Delay on manager of trackposition requests
can be interesting to evaluate/benchmark the impact on this value
"""
self.qtimer_tracking_position_delay = self.tp_timers.get_delay(
"p_threshold_time_for_realtime_tracking_position") # in ms
# user-id:
# from user id OS
os_username = imt_tools.get_os_username()
# try to use IP to identify the user
user_ip = imt_tools.get_lan_ip()
#
self.tp_user_name = os_username + " (" + user_ip + ")"
# default value for threshold scale
self.threshold = 0
self.tp_id_user_id = 0
self.tp_id_w_time = 0
self.values = []
self.bRefreshMapFromAutoSave = False
self.TP_NAMEDTUPLE_WEBVIEW = namedtuple(
'TP_NAMEDTUPLE_WEBVIEW',
['state', 'width', 'height', 'online_url', 'offline_url']
)
# very dirty @FIXME @TODO : here is the proper way to do it (from within the class `self.plugin_dir`)
self.qgis_plugins_directory = self.plugin_dir
self.webview_offline_about = os.path.join(self.qgis_plugins_directory , "gui_doc","About.htm" )
self.webview_offline_user_doc = os.path.join(self.qgis_plugins_directory , "gui_doc", "Simplified_User_Guide.htm" )
self.webview_online_about = "https://github.com/Remi-C/interactive_map_tracking/wiki/[User]-About"
self.webview_online_user_doc = "https://github.com/Remi-C/interactive_map_tracking/wiki/[User]-User-Guide"
self.webview_dict = {}
# url : http://qt-project.org/doc/qt-4.8/qurl.html
self.webview_default_tuple = self.TP_NAMEDTUPLE_WEBVIEW('init', 0, 0, QUrl(""), QUrl(""))
self.webview_dict[self.dlg.webView_userdoc] = self.TP_NAMEDTUPLE_WEBVIEW(
'init',
0, 0,
QUrl(self.webview_online_user_doc),
QUrl(self.webview_offline_user_doc)
)
self.webview_dict[self.dlg.webView_about] = self.TP_NAMEDTUPLE_WEBVIEW(
'init',
0, 0,
QUrl(self.webview_online_about),
QUrl(self.webview_offline_about)
)
self.webview_current = None
self.webview_margin = 60
#getting proxy
s = QSettings() #getting proxy from qgis options settings
proxyEnabled = s.value("proxy/proxyEnabled", "")
proxyType = s.value("proxy/proxyType", "" )
proxyHost = s.value("proxy/proxyHost", "" )
proxyPort = s.value("proxy/proxyPort", "" )
proxyUser = s.value("proxy/proxyUser", "" )
proxyPassword = s.value("proxy/proxyPassword", "" )
if proxyEnabled == "true": # test if there are proxy settings
proxy = QNetworkProxy()
if proxyType == "DefaultProxy":
proxy.setType(QNetworkProxy.DefaultProxy)
elif proxyType == "Socks5Proxy":
proxy.setType(QNetworkProxy.Socks5Proxy)
elif proxyType == "HttpProxy":
proxy.setType(QNetworkProxy.HttpProxy)
elif proxyType == "HttpCachingProxy":
proxy.setType(QNetworkProxy.HttpCachingProxy)
elif proxyType == "FtpCachingProxy":
proxy.setType(QNetworkProxy.FtpCachingProxy)
proxy.setHostName(proxyHost)
proxy.setPort(int(proxyPort))
proxy.setUser(proxyUser)
proxy.setPassword(proxyPassword)
QNetworkProxy.setApplicationProxy(proxy)
self.dict_tabs_size = {}
self.tp_last_extent_saved = QgsRectangle()
# noinspection PyMethodMayBeStatic
def tr(self, message):
"""Get the translation for a string using Qt translation API.
We implement this ourselves since we do not inherit QObject.
:param message: String for translation.
:type message: str, QString
:returns: Translated version of message.
:rtype: QString
"""
# noinspection PyTypeChecker,PyArgumentList,PyCallByClass
return QCoreApplication.translate('interactive_map_tracking', message)
def add_action(
self,
icon_path,
text,
callback,
enabled_flag=True,
add_to_menu=True,
add_to_toolbar=True,
status_tip=None,
whats_this=None,
parent=None):
"""Add a toolbar icon to the toolbar.
:param icon_path: Path to the icon for this action. Can be a resource
path (e.g. ':/plugins/foo/bar.png') or a normal file system path.
:type icon_path: str
:param text: Text that should be shown in menu items for this action.
:type text: str
:param callback: Function to be called when the action is triggered.
:type callback: function
:param enabled_flag: A flag indicating if the action should be enabled
by default. Defaults to True.
:type enabled_flag: bool
:param add_to_menu: Flag indicating whether the action should also
be added to the menu. Defaults to True.
:type add_to_menu: bool
:param add_to_toolbar: Flag indicating whether the action should also
be added to the toolbar. Defaults to True.
:type add_to_toolbar: bool
:param status_tip: Optional text to show in a popup when mouse pointer
hovers over the action.
:type status_tip: str
:param parent: Parent widget for the new action. Defaults None.
:type parent: QWidget
:param whats_this: Optional text to show in the status bar when the
mouse pointer hovers over the action.
:returns: The action that was created. Note that the action is also
added to self.actions list.
:rtype: QAction
"""
icon = QIcon(icon_path)
action = QAction(icon, text, parent)
action.triggered.connect(callback)
action.setEnabled(enabled_flag)
if status_tip is not None:
action.setStatusTip(status_tip)
if whats_this is not None:
action.setWhatsThis(whats_this)
if add_to_toolbar:
self.toolbar.addAction(action)
if add_to_menu:
self.iface.addPluginToMenu(
self.menu,
action)
self.actions.append(action)
return action
def initGui(self):
"""Create the menu entries and toolbar icons inside the QGIS GUI."""
qgis_log_tools.logMessageINFO("Launch 'InitGui(...)' ...")
"""Create the menu entries and toolbar icons inside the QGIS GUI."""
icon_path = ':/plugins/interactive_map_tracking/icon.png'
# icon_path = ':/plugins/interactive_map_tracking/icon_svg.png'
self.add_action(
icon_path,
text=self.tr(u'Tools for Interactive Map Tracking'),
callback=self.run,
parent=self.iface.mainWindow())
#
self.init_plugin()
# Connections
# activate/desactivate plugin
self.dlg.enablePlugin.clicked.connect(self.enabled_plugin)
# activate/desactivate autosave
self.dlg.enableAutoSave.clicked.connect(self.enabled_autosave)
# activate/desactive tracking position
self.dlg.enableTrackPosition.clicked.connect(self.enabled_trackposition)
# box for tracking layers
self.dlg.refreshLayersListForTrackPosition.clicked.connect(self.refreshComboBoxLayers)
QObject.connect(self.dlg.trackingPositionLayerCombo, SIGNAL("currentIndexChanged ( const QString & )"),
self.currentIndexChangedTPLCB)
QObject.connect(self.dlg.IMT_Window_Tabs, SIGNAL("currentChanged (int)"), self.QTabWidget_CurrentChanged)
# Dev Debug
self.dlg.enableLogging.clicked.connect(self.enableLogging)
self.dlg.enableUseMutexForTP.clicked.connect(self.enableUseMutexForTP)
# hide the window plugin
# don't change the state (options) of the plugin
self.dlg.buttonHide.clicked.connect(self.hide_plugin)
#
self.refreshComboBoxLayers()
self.thresholdChanged()
#
#QgsMessageLog.logMessage("enableLogging()")
self.enableLogging()
self.enableUseMutexForTP()
def unload(self):
"""Removes the plugin menu item and icon from QGIS GUI."""
for action in self.actions:
self.iface.removePluginMenu(
self.tr(u'&Interactive Map Tracking'),
action)
self.iface.removeToolBarIcon(action)
def onResizeEvent(self, event):
# url: http://openclassrooms.com/forum/sujet/dimensionnement-automatique-d-une-qtabwidget
QTabWidget.resizeEvent(self.dlg.IMT_Window_Tabs, event)
# url: http://qt-project.org/doc/qt-4.8/qresizeevent.html
self.dict_tabs_size[self.dlg.IMT_Window_Tabs.currentIndex()] = event.size()
def run(self):
"""Run method that performs all the real work"""
#
# set the icon IMT ^^
icon_path = ':/plugins/interactive_map_tracking/icon.png'
self.dlg.setWindowIcon(QIcon(icon_path))
# set the tab at init
self.dlg.IMT_Window_Tabs.setCurrentIndex(0)
# url: http://qt-project.org/doc/qt-4.8/qtabwidget.html#resizeEvent
self.dlg.IMT_Window_Tabs.resizeEvent = self.onResizeEvent
# show the dialog
self.dlg.show()
#
self.enabled_plugin()
# Run the dialog event loop
self.dlg.exec_()
def init_plugin(self):
""" Init the plugin
- Set defaults values in QSetting # note : some value are already setted !
- Setup the GUI
"""
qgis_log_tools.logMessageINFO("Launch 'init_plugin(...)' ...")
s = QSettings()
# retrieve default states from Qt Creator GUI design
self.update_setting(s, "enabledPlugin", self.dlg.enablePlugin)
self.update_setting(s, "enabledAutoSave", self.dlg.enableAutoSave)
self.update_setting(s, "enabledTrackPosition", self.dlg.enableTrackPosition)
self.update_setting(s, "enabledLogging", self.dlg.enableLogging)
self.update_setting(s, "enableV2", self.dlg.enableUseMutexForTP)
self.thresholdChanged()
s.setValue(self.qsettings_prefix_name + "threshold", str(self.threshold))
if s.value(self.qsettings_prefix_name + "enabledPlugin", "") == "true":
self.update_checkbox(s, "enableAutoSave", self.dlg.enableAutoSave)
self.update_checkbox(s, "enableTrackPosition", self.dlg.enableTrackPosition)
self.update_checkbox(s, "enableLogging", self.dlg.enableLogging)
self.update_checkbox(s, "enableV2", self.dlg.enableUseMutexForTP)
#
self.dlg.thresholdLabel.setEnabled(True)
self.dlg.threshold_extent.setEnabled(True)
QObject.connect(self.dlg.threshold_extent, SIGNAL("returnPressed ()"), self.thresholdChanged)
self.thresholdChanged()
else:
#
self.dlg.enableAutoSave.setDisabled(True)
self.dlg.enableTrackPosition.setDisabled(True)
self.dlg.enableLogging.setDisabled(True)
self.dlg.enableUseMutexForTP.setDisabled(True)
self.dlg.thresholdLabel.setDisabled(True)
self.dlg.threshold_extent.setDisabled(True)
#
QObject.disconnect(self.dlg.threshold_extent, SIGNAL("returnPressed ()"), self.thresholdChanged)
QObject.disconnect(self.dlg.webView_about, SIGNAL("loadFinished (bool)"), self.webview_loadFinished)
QObject.disconnect(self.dlg.webView_userdoc, SIGNAL("loadFinished (bool)"), self.webview_loadFinished)
def update_setting(self, _settings, _name_in_setting, _checkbox):
"""
:param _settings:
:param _name_in_setting:
:param _checkbox:
"""
if _checkbox.isChecked():
_settings.setValue(self.qsettings_prefix_name + _name_in_setting, "true")
else:
_settings.setValue(self.qsettings_prefix_name + _name_in_setting, "false")
def update_checkbox(self, _settings, _name_in_setting, _checkbox):
""" According to values stores in QSetting, update the state of a checkbox
:param _settings: (local) Setting from Qt
:type _settings: QSettings
:param _name_in_setting: setting's name for the _checkbox in QSettings
:type _name_in_setting: QString
:param _checkbox: CheckBox to update state
:type _checkbox: QCheckBox
"""
if _settings.value(self.qsettings_prefix_name + _name_in_setting, "") == "true":
_checkbox.setDisabled(False)
_checkbox.setChecked(True)
else:
_checkbox.setChecked(False)
_checkbox.setDisabled(True)
def disconnectSignaleForLayerCrsChanged(self, layer):
""" Disconnect the signal: 'layerCrsChanged' of the layer given
:param layer:
:return:
"""
if None != layer and self.bSignalForLayerModifiedConnected:
QObject.disconnect(layer, SIGNAL("layerCrsChanged()"), self.currentLayerCrsChanged)
self.bSignalForLayerCrsChangedConnected = False
#
qgis_log_tools.logMessageINFO("Disconnect SIGNAL on layer: " + layer.name())
def disconnectSignalForLayerModified(self, layer):
""" Disconnect the signal: 'Layer Modified' of the layer given
:param layer: QGIS Layer
:type layer: QgsMapLayer
"""
if None != layer and self.bSignalForLayerModifiedConnected:
QObject.disconnect(layer, SIGNAL("layerModified()"), self.currentLayerModified)
self.bSignalForLayerModifiedConnected = False
#
qgis_log_tools.logMessageINFO("Disconnect SIGNAL on layer: " + layer.name())
def disconnectSignalForLayerChanged(self):
""" Disconnect the signal: 'Current Layer Changed' of the QGIS Interface"""
#
if self.bSignalForLayerChangedConnected:
QObject.disconnect(self.iface, SIGNAL("currentLayerChanged(QgsMapLayer*)"),
self.qgisInterfaceCurrentLayerChanged)
self.bSignalForLayerChangedConnected = False
#
qgis_log_tools.logMessageINFO("Disconnect SIGNAL on QGISInterface")
def disconnectSignalForExtentsChanged(self):
""" Disconnect the signal: 'Canvas Extents Changed' of the QGIS MapCanvas """
#
if self.bSignalForExtentsChangedConnected:
self.iface.mapCanvas().extentsChanged.disconnect(self.canvasExtentsChanged)
self.bSignalForExtentsChangedConnected = False
#
qgis_log_tools.logMessageINFO("Disconnect SIGNAL on QGISMapCanvas")
def connectSignaleForLayerCrsChanged(self, layer):
""" Disconnect the signal: 'layerCrsChanged' of the layer given
:param layer:
:return:
"""
if None != layer and not self.bSignalForLayerCrsChangedConnected:
QObject.connect(layer, SIGNAL("layerCrsChanged()"), self.currentLayerCrsChanged)
self.bSignalForLayerCrsChangedConnected = False
#
qgis_log_tools.logMessageINFO("Connect SIGNAL on layer: " + layer.name())
def connectSignalForLayerModified(self, layer):
""" Connect the signal: "Layer Modified" to the layer given
:param layer: QGIS layer
:type layer: QgsMapLayer
"""
if None != layer and not self.bSignalForLayerModifiedConnected:
QObject.connect(layer, SIGNAL("layerModified()"), self.currentLayerModified)
self.bSignalForLayerModifiedConnected = True
#
qgis_log_tools.logMessageINFO("Connect SIGNAL on layer: " + layer.name())
def connectSignalForLayerChanged(self):
""" Connect the signal: 'Layer Changed' to the layer given """
#
if not self.bSignalForLayerChangedConnected:
QObject.connect(self.iface, SIGNAL("currentLayerChanged(QgsMapLayer*)"),
self.qgisInterfaceCurrentLayerChanged)
self.bSignalForLayerChangedConnected = True
#
qgis_log_tools.logMessageINFO("Connect SIGNAL on QGISInterface")
def connectSignalForExtentsChanged(self):
""" Connect the signal: 'Extent Changed' to the QGIS MapCanvas """
#
if not self.bSignalForExtentsChangedConnected:
self.iface.mapCanvas().extentsChanged.connect(self.canvasExtentsChanged)
self.bSignalForExtentsChangedConnected = True
#
qgis_log_tools.logMessageINFO("Connect SIGNAL on QGISMapCanvas")
def disconnectSignals(self, layer):
""" Disconnect alls signals (of current layer & QGIS MapCanvas, Interface) """
#
qgis_log_tools.logMessageINFO("Disconnect all SIGNALS ...")
#
self.disconnectSignalForLayerModified(layer)
self.disconnectSignalForLayerChanged()
self.disconnectSignalForExtentsChanged()
self.disconnectSignaleForLayerCrsChanged()
#
QObject.disconnect(self.dlg.webView_about, SIGNAL("loadFinished (bool)"), self.webview_loadFinished)
QObject.disconnect(self.dlg.webView_userdoc, SIGNAL("loadFinished (bool)"), self.webview_loadFinished)
def qgisInterfaceCurrentLayerChanged(self, layer):
""" Action when the signal: 'Current Layer Changed' from QGIS MapCanvas is emitted&captured
:param layer: QGIS layer -> current layer using by Interactive_Map_Tracking plugin
:type layer: QgsMapLayer
"""
# on deconnecte le layer courant
if None != self.currentLayer:
self.disconnectSignalForLayerModified(self.currentLayer)
# Filtre sur les layers a "surveiller"
if not qgis_layer_tools.filter_layer_for_imt(layer):
layer = None
if None != layer:
self.currentLayer = layer
#
if self.dlg.enablePlugin.isChecked():
if self.dlg.enableAutoSave.isChecked():
qgis_layer_tools.commitChangesAndRefresh(self.currentLayer, self.iface, QSettings())
self.connectSignalForLayerModified(self.currentLayer)
qgis_log_tools.logMessageINFO("Change Layer: layer.name=" + layer.name())
else:
qgis_log_tools.logMessageINFO("No layer selected (for ITP)")
def qgisInterfaceProjectRead(self):
""" Action when the signal: 'Project Read' from QGIS Inteface is emitted&captured """
pass
def currentLayerModified(self):
""" Action when the signal: 'Layer Modified' from QGIS Layer (current) is emitted&captured
We connect a new signal: 'RenderComplete' to perform operation after the QGIS rendering (deferred strategy)
"""
#
if None != self.currentLayer:
if None != self.iface.mapCanvas():
QObject.connect(self.iface.mapCanvas(), SIGNAL("renderComplete(QPainter*)"),
self.currentLayerModifiedAndRenderComplete)
qgis_log_tools.logMessageINFO("Detect modification on layer:" + self.currentLayer.name())
def currentLayerModifiedAndRenderComplete(self):
""" Action when the signal: 'Render Complete' from QGIS Layer (current) is emitted&captured (after emitted&captured signal: 'Layer Modified') """
#
QObject.disconnect(self.iface.mapCanvas(), SIGNAL("renderComplete(QPainter*)"),
self.currentLayerModifiedAndRenderComplete)
#
# qgis_layer_tools.bRefreshMapFromAutoSave = True
qgis_layer_tools.commitChangesAndRefresh(self.currentLayer, self.iface, QSettings())
def canvasExtentsChanged(self):
""" Action when the signal: 'Extent Changed' from QGIS MapCanvas is emitted&captured
We connect a new signal: 'RenderComplete' to perform operation after the QGIS rendering (deferred strategy)
"""
if self.bUseV2Functionnalities:
# filter on our dummy refreshMap using little zoom on mapcanvas (=> canvasExtentChanged was emitted)
# if self.bRefreshMapFromAutoSave:
# self.bRefreshMapFromAutoSave = False
# else:
self.update_track_position_with_qtimers()
else:
QObject.connect(self.iface.mapCanvas(), SIGNAL("renderComplete(QPainter*)"),
self.canvasExtentsChangedAndRenderComplete)
def canvasExtentsChangedAndRenderComplete(self):
""" Action when the signal: 'Render Complete' from QGIS MapCanvas is emitted&captured (after a emitted&captured signal: 'Extent Changed')
"""
#
QObject.disconnect(self.iface.mapCanvas(), SIGNAL("renderComplete(QPainter*)"),
self.canvasExtentsChangedAndRenderComplete)
if self.bUseV2Functionnalities:
self.update_track_position_with_qtimers()
else:
self.update_track_position()
def filter_layer_for_tracking_position(layer):
# set Attributes for Layer in DB
# On récupère automatiquement le nombre de champs qui compose les features présentes dans ce layer
# How to get field names in pyqgis 2.0
# url: http://gis.stackexchange.com/questions/76364/how-to-get-field-names-in-pyqgis-2-0
dataProvider = layer.dataProvider()
# Return a map of indexes with field names for this layer.
# url: http://qgis.org/api/classQgsVectorDataProvider.html#a53f4e62cb05889ecf9897fc6a015c296
fields = dataProvider.fields()
# get fields name from the layer
field_names = [field.name() for field in fields]
# find index for field 'user-id'
id_user_id_field = imt_tools.find_index_field_by_name(field_names, "user_id")
if id_user_id_field == -1:
qgis_log_tools.logMessageWARNING(
"No \"user_id\"::text field attributes found in layer: " + layer.name())
return -1
# find index for field 'writing_time'
id_w_time_field = imt_tools.find_index_field_by_name(field_names, "w_time")
if id_w_time_field == -1:
qgis_log_tools.logMessageWARNING(
"No \"w_time\"::text attributes found in layer: " + layer.name())
return -1
return [id_user_id_field, id_w_time_field]
def currentIndexChangedTPLCB(self, layer_name):
"""
:param layer_name:
:return:
"""
qgis_log_tools.logMessageINFO("Launch 'currentIndexChangedTPLCB(self, layer_name=" + layer_name + ")' ...")
# layer_name == "" when when we clear the combobox (for example)
if layer_name == "":
return
layer_for_tp = imt_tools.find_layer_in_qgis_legend_interface(self.iface, layer_name)
self.currentLayerForTrackingPosition = layer_for_tp # set the layer for tracking position (plugin)
list_id_fields = qgis_layer_tools.filter_layer_trackingposition_required_fields(layer_for_tp)
self.tp_id_user_id = list_id_fields[0]
self.tp_id_w_time = list_id_fields[1]
dataProvider = layer_for_tp.dataProvider()
# Return a map of indexes with field names for this layer.
# url: http://qgis.org/api/classQgsVectorDataProvider.html#a53f4e62cb05889ecf9897fc6a015c296
fields = dataProvider.fields()
# set the fields
# reset all fields in None
self.values = [None for i in range(fields.count())]
# set user_id field (suppose constant for a layer (in QGIS session))
self.values[self.tp_id_user_id] = self.tp_user_name
def refreshComboBoxLayers(self):
""" Action when the Combo Box attached to refreshing layers for tracking position is clicked """
#
qgis_log_tools.logMessageINFO("Launch 'refreshComboBoxLayers(...)' ...")
self.dlg.trackingPositionLayerCombo.clear()
idComboBoxIndex = -1
idComboBoxForDefaultSearchLayer = -1
# search a default layer ('camera_position') if no layer was selected before
# else we search the same layer (if it present)
if self.currentLayerForTrackingPosition is None:
defaultSearchLayer = "camera_position"
else:
defaultSearchLayer = self.currentLayerForTrackingPosition.name()
# dictionnary to link id on combobox and objects QGIS layer
dict_key_comboboxindex_value_layer = {}
#
layers = QgsMapLayerRegistry.instance().mapLayers().values()
for layer in layers:
# filter on layers to add in combobox
if qgis_layer_tools.filter_layer_for_trackingposition(layer):
idComboBoxIndex = self.dlg.trackingPositionLayerCombo.count()
dict_key_comboboxindex_value_layer[idComboBoxIndex] = layer
self.dlg.trackingPositionLayerCombo.addItem(layer.name(), layer)
# default search layer
if layer.name() == defaultSearchLayer:
idComboBoxForDefaultSearchLayer = idComboBoxIndex
#
qgis_log_tools.logMessageINFO(
defaultSearchLayer + " layer found - id in combobox: " +
str(idComboBoxForDefaultSearchLayer)
)
# update GUI
if idComboBoxForDefaultSearchLayer != -1:
self.dlg.trackingPositionLayerCombo.setCurrentIndex(idComboBoxForDefaultSearchLayer)
idComboBoxIndex = idComboBoxForDefaultSearchLayer
if idComboBoxIndex != -1:
try:
self.currentLayerForTrackingPosition = dict_key_comboboxindex_value_layer[idComboBoxIndex]
qgis_log_tools.logMessageINFO("Set the layer to: " + self.currentLayerForTrackingPosition.name())
except:
qgis_log_tools.logMessageINFO("!!! ERROR for selecting layer !!!")
def enabled_autosave(self):
""" Action when the checkbox 'Enable Auto-Save and Refresh' is clicked """
#
qgis_log_tools.logMessageINFO("Launch 'enable_autosave(...)' ...")
resultCommit = False
# filtre sur les layers
if qgis_layer_tools.filter_layer_for_imt(self.iface.activeLayer()):
self.currentLayer = self.iface.activeLayer()
else:
self.currentLayer = None
#
if self.dlg.enableAutoSave.isChecked():
#
resultCommit = qgis_layer_tools.commitChangesAndRefresh(self.currentLayer, self.iface, QSettings())
#
self.connectSignalForLayerModified(self.currentLayer)
else:
self.disconnectSignalForLayerModified(self.currentLayer)
#
return resultCommit
def stop_threads(self):
if self.qtimer_tracking_position_rtt_to_memory.isActive():
self.qtimer_tracking_position_rtt_to_memory.stop()
if self.qtimer_tracking_position_memory_to_geom.isActive():
self.qtimer_tracking_position_memory_to_geom.stop()
if self.qtimer_tracking_position_geom_to_layer.isActive():
self.qtimer_tracking_position_geom_to_layer.stop()
if self.qtimer_tracking_position_layers_to_commit.isActive():
self.qtimer_tracking_position_layers_to_commit.stop()
def enabled_trackposition(self):
""" Action when the checkbox 'Enable Tracking Position' is clicked """
#
qgis_log_tools.logMessageINFO("Launch 'enable_trackposition(...)' ...")
if self.dlg.enableTrackPosition.isChecked():
#
self.refreshComboBoxLayers()
#
self.connectSignalForExtentsChanged()
else:
self.disconnectSignalForExtentsChanged()
self.stop_threads()
def enableLogging(self):
""" Action when the checkbox 'Enable LOGging' is clicked """
#
qgis_log_tools.setLogging(self.dlg.enableLogging.isChecked())
def enableUseMutexForTP(self):
""" Action when the checkbox 'Use Mutex (for TrackingPosition) [BETA]' is clicked
Beta test for:
- using Mutex to protect commitChange operation in multi-threads context (signals strategy)
- using queuing requests from TrackPosition (we try to amortize the cost and effects on QGIS GUI)
"""
self.bUseV2Functionnalities = self.dlg.enableUseMutexForTP.isChecked()
if not(self.dlg.enableUseMutexForTP.isChecked() and self.dlg.enableTrackPosition.isChecked()):
self.stop_threads()
def enabled_plugin(self):
""" Action when the checkbox 'Enable SteetGen3 Plugin' is clicked
Activate/desactivate all options/capabilities of IMT plugin: AutoSave&Refresh, TrackPosition
"""
qgis_log_tools.logMessageINFO("Launch 'enabled_plugin(...)' ...")
#force the plugin to be in front
self.dlg.raise_()
resultCommit = False
# filtre sur les layers a prendre en compte
if qgis_layer_tools.filter_layer_postgis(self.iface.activeLayer()):
self.currentLayer = self.iface.activeLayer()
else:
self.currentLayer = None
if self.dlg.enablePlugin.isChecked():
#
self.dlg.enableAutoSave.setEnabled(True)
self.dlg.enableTrackPosition.setEnabled(True)
self.dlg.enableLogging.setEnabled(True)
self.dlg.thresholdLabel.setEnabled(True)
self.dlg.threshold_extent.setEnabled(True)
QObject.connect(self.dlg.threshold_extent, SIGNAL("editingFinished ()"), self.thresholdChanged)
self.dlg.enableUseMutexForTP.setEnabled(True)
#
self.connectSignalForLayerChanged()
if self.dlg.enableAutoSave.isChecked():
self.connectSignalForLayerModified(self.currentLayer)
resultCommit = qgis_layer_tools.commitChangesAndRefresh(self.currentLayer, self.iface, QSettings())
if self.dlg.enableTrackPosition.isChecked():
self.refreshComboBoxLayers()
self.connectSignalForExtentsChanged()
else:
self.dlg.enableAutoSave.setDisabled(True)
self.dlg.enableTrackPosition.setDisabled(True)
self.dlg.enableLogging.setDisabled(True)
self.dlg.thresholdLabel.setDisabled(True)
self.dlg.threshold_extent.setDisabled(True)
QObject.disconnect(self.dlg.threshold_extent, SIGNAL("returnPressed ()"), self.thresholdChanged)
self.dlg.enableUseMutexForTP.setDisabled(True)
#
self.disconnectSignalForLayerChanged()
if self.dlg.enableAutoSave.isChecked():
self.disconnectSignalForLayerModified(self.currentLayer)
if self.dlg.enableTrackPosition.isChecked():
self.disconnectSignalForExtentsChanged()
self.stop_threads()
return resultCommit
def update_setting(self, _s, _name_in_setting, _checkbox):
""" Update the value store in settings (Qt settings) according to checkbox (Qt) status
:param _s: Qt Settings
:type _s: QSettings
:param _name_in_setting: Name of the setting in QSetting
:type _name_in_setting: QString
:param _checkbox: CheckBox link to this setting
:type _checkbox: QCheckBox
"""
if _checkbox.isChecked():
_s.setValue(self.qsettings_prefix_name + _name_in_setting, "true")
else:
_s.setValue(self.qsettings_prefix_name + _name_in_setting, "false")
def update_settings(self, _s):
""" Update all settings
:param _s: Qt Settings
:type _s: QSettings
"""
dlg = self.dlg
# Update (Qt) settings according to the GUI IMT plugin
self.update_setting(_s, "enabledPlugin", dlg.enablePlugin)
self.update_setting(_s, "enablesAutoSave", dlg.enableAutoSave)
self.update_setting(_s, "enablesTrackPosition", dlg.enableTrackPosition)
def hide_plugin(self):
""" Hide the plugin.
Don't change the state of the plugin
"""
# @FIXME there is a mistake here, this function is also called before init. Because QSettings is a singleton, variable are inited before end of init !
self.update_settings(QSettings())
self.dlg.hide()
def thresholdChanged(self):
"""
QT Line edit changed, we get/interpret the new value (if valid)
Format for threshold scale : 'a'[int]:'b'[int]
We just used 'b' for scale => threshold_scale = 'b'
"""
validFormat = True
try:
threshold_string = self.dlg.threshold_extent.text()
self.threshold = int(threshold_string)
except ValueError:
try:
a, b = threshold_string.split(":")
try:
int(a) # just to verify the type of 'a'
self.threshold = int(b) # only use 'b' to change the threshold scale value
except Exception:
validFormat = False # problem with 'a'
except Exception:
validFormat = False # problem with 'b'
# Input format problem!
if validFormat == False:
qgis_log_tools.logMessageWARNING("Invalid input for scale! Scale format input : [int]:[int] or just [int]")
# just for visualisation purpose
self.dlg.threshold_extent.setText("1:" + str(self.threshold))
def update_size_dlg_from_frame(self, dlg, frame, margin_width=60):
"""
:param dlg:
:param frame:
:param margin_width:
:return:
"""
width = frame.contentsSize().width()
height = frame.contentsSize().height()
#
width += margin_width
#
width = max(1024, width)
height = min(768, max(height, width*4/3))
#
dlg.resize(width, height)
#
return width, height
def update_size_dlg_from_tuple(self, dlg, tuple):
dlg.resize(tuple.width, tuple.height)
return tuple.width, tuple.height
def webview_loadFinished(self, ok):
"""
:param ok:
"""
# safe because we stop the listener of this event when we changed the tab
webview = self.webview_current
tuple_webview = self.webview_dict.setdefault(webview, self.webview_default_tuple)
last_state = tuple_webview.state
qgis_log_tools.logMessageINFO("#last_state : " + str(last_state))
if ok:
# we have loaded a HTML page (offline or online)
qgis_log_tools.logMessageINFO("## WebView : OK")
# update the QDiaglog sizes
width, height = self.update_size_dlg_from_frame(
self.dlg,
webview.page().currentFrame(),
self.webview_margin
)
# update the tuple for this webview
self.webview_dict[webview] = self.TP_NAMEDTUPLE_WEBVIEW(
'online',
width, height,
tuple_webview.online_url,
tuple_webview.offline_url
)
#
qgis_log_tools.logMessageINFO("### width : " + str(width) + " - height : " + str(height))
else:
if self.webview_dict[webview].state == 'online':
qgis_log_tools.logMessageINFO("## WebView : FAILED TO LOAD from " + str(self.webview_dict[webview].online_url))#online_url
else :
qgis_log_tools.logMessageINFO("## WebView : FAILED TO LOAD from " + str(self.webview_dict[webview].offline_url))
#
if self.webview_dict[webview].state != 'offline': #regular case we failed, but we are going to try again
self.webview_dict[webview] = self.TP_NAMEDTUPLE_WEBVIEW(
'offline',
tuple_webview.width, tuple_webview.height,
tuple_webview.online_url,
tuple_webview.offline_url
)
# try to load the offline version (still in initial state)
# @FIXME : doesn't load the images in offline mode on XP...
webview.load(QUrl(tuple_webview.offline_url))
else: # we already failed last, time, stopping to try
qgis_log_tools.logMessageINFO("## WebView : stopping to try to retrieve html")
def webview_load_page(self, webview, margin=60):
"""
:param webview:
:param margin:
:return:
"""
#
tuple_webview = self.webview_dict.setdefault(webview, self.webview_default_tuple)
self.webview_margin = margin
self.webview_current = webview
# signal : 'loadFinished(bool)'
QObject.connect(webview, SIGNAL("loadFinished (bool)"), self.webview_loadFinished)
# reset/clear the web widget
# url : http://qt-project.org/doc/qt-4.8/qwebview.html#settings
websetting = webview.settings()
websetting.clearMemoryCaches()
globalsettings = websetting.globalSettings()
globalsettings.clearMemoryCaches()
if tuple_webview.state == 'offline': # offline
webview.load(tuple_webview.offline_url)
else: # 'init' or 'online'
webview.load(tuple_webview.online_url)
def QTabWidget_CurrentChanged(self, index):
"""
:param index:
:return:
"""
QObject.disconnect(self.dlg.webView_about, SIGNAL("loadFinished (bool)"), self.webview_loadFinished)
QObject.disconnect(self.dlg.webView_userdoc, SIGNAL("loadFinished (bool)"), self.webview_loadFinished)
if index == 3:
qgis_log_tools.logMessageINFO("## Tab : User Doc")
self.webview_load_page(self.dlg.webView_userdoc, self.webview_margin)
elif index == 4:
qgis_log_tools.logMessageINFO("## Tab : About")
self.webview_load_page(self.dlg.webView_about, self.webview_margin)
else:
self.dict_tabs_size[index] = self.dict_tabs_size.setdefault(index, self.dlg.minimumSize())
self.dlg.resize(self.dict_tabs_size[index])
# TODO: optimize update_track_position because it's a (critical) real-time method !
def update_track_position(self, bWithProjectionInCRSLayer=True, bUseEmptyFields=False):
""" Perform the update tracking position (in real-time)
Save the current Qgis Extent (+metadatas) into a QGIS vector layer (compatible for tracking position).
The QGIS Vector Layer need at least 2 attributes:
- user_id: text
- w_time: text
:param bWithProjectionInCRSLayer: Option [default=True].
If True, project the QGIS MapCanvas extent (QGIS World CRS) into Layer CRS (CRS=Coordinates Reference System)
:type bWithProjectionInCRSLayer: bool
:param bUseEmptyFields: Option [default=False]
If True, don't set fields (user_id, w_time)
If False, use a auto-generate id for user (user-name from OS + IP Lan) and current date time (from time stamp os into QDateTime string format)
:type bUseEmptyFields: bool
"""
if self.currentLayerForTrackingPosition is None:
return -1
mapCanvas = self.iface.mapCanvas()
mapcanvas_extent = mapCanvas.extent()
layer_for_polygon_extent = self.currentLayerForTrackingPosition
# # filter on extent size
# try:
# threshold = int(self.dlg.threshold_extent.text())
# except Exception:
# qgis_log_tools.logMessageWARNING("Threshold can only be a number")
# return -1
# if max(mapcanvas_extent.width(), mapcanvas_extent.height()) > threshold:
if mapCanvas.scale() > self.threshold:
qgis_log_tools.logMessageWARNING("MapCanvas extent size exceed the Threshold size for tracking")
qgis_log_tools.logMessageWARNING(
"-> MapCanvas extent size= " + str(max(mapcanvas_extent.width(), mapcanvas_extent.height())) +
"\tThreshold size= " + str(self.threshold))
return -2
# get the list points from the current extent (from QGIS MapCanvas)
list_points_from_mapcanvas = imt_tools.construct_listpoints_from_extent(mapcanvas_extent)
## NEED TO OPTIMIZE ##
if bWithProjectionInCRSLayer:
# url: http://qgis.org/api/classQgsMapCanvas.html#af0ffae7b5e5ec8b29764773fa6a74d58
extent_src_crs = mapCanvas.mapSettings().destinationCrs()
# url: http://qgis.org/api/classQgsMapLayer.html#a40b79e2d6043f8ec316a28cb17febd6c
extent_dst_crs = layer_for_polygon_extent.crs()
# url: http://docs.qgis.org/testing/en/docs/pyqgis_developer_cookbook/crs.html
xform = QgsCoordinateTransform(extent_src_crs, extent_dst_crs)
#
list_points = [xform.transform(point) for point in list_points_from_mapcanvas]
else:
list_points = list_points_from_mapcanvas
## NEED TO OPTIMIZE ##
# list of lists of points
gPolygon = QgsGeometry.fromPolygon([list_points])
fet = QgsFeature()
fet.setGeometry(gPolygon)
if bUseEmptyFields:
pass
else:
# update the time stamp attribute
self.values[self.tp_id_w_time] = imt_tools.get_timestamp_from_qt_string_format()
fet.setAttributes(self.values)
# How can I programatically create and add features to a memory layer in QGIS 1.9?
# url: http://gis.stackexchange.com/questions/60473/how-can-i-programatically-create-and-add-features-to-a-memory-layer-in-qgis-1-9
# write the layer and send request to DB
layer_for_polygon_extent.startEditing()
layer_for_polygon_extent.addFeatures([fet], False) # bool_makeSelected=False
#
resultCommit = layer_for_polygon_extent.commitChanges()
#
if resultCommit:
qgis_log_tools.logMessageINFO("Location saved in layer: " + layer_for_polygon_extent.name())
else:
qgis_log_tools.logMessageCRITICAL(
"saving position failed : are you sure the selected tracking layer: " + layer_for_polygon_extent.name() +
"has at least 2 attributes : \"user_id\"::text and \"w_time\"::text")
commitErrorString = layer_for_polygon_extent.commitErrors()[2]
commitErrorStringShort = commitErrorString[commitErrorString.rfind(":") + 2:len(
commitErrorString)] # +2 to skip ': ' prefix of commitError msg
self.iface.messageBar().pushMessage("IMT. ERROR : " + "\"" + commitErrorStringShort + "\"",
"",
QgsMessageBar.CRITICAL, 0)
#
return resultCommit
def update_track_position_with_qtimers(self, bWithProjectionInCRSLayer=True, bUseEmptyFields=False):
"""
Note: it's not a Thread/QTimer
:param bWithProjectionInCRSLayer:
:param bUseEmptyFields:
:return:
"""
bIsTimeToUpdate = self.tp_timers.is_time_to_update("update_track_position_with_qtimers", "tp_threshold_time_for_realtime_tracking_position")
if bIsTimeToUpdate:
# Do we have a current layer activate for tracking position ?
if self.currentLayerForTrackingPosition is None:
# if not, no need to go further
return -1
mapCanvas = self.iface.mapCanvas()
mapcanvas_extent = mapCanvas.extent()
# Add a filter to prevent to save the same extent (useful in regards to our 'dummy' approach to refresh map)
if imt_tools.extent_equal(self.tp_last_extent_saved, mapcanvas_extent, 0.01): # 10 mm
return -3
# Filter on extent map scale (size)
# We use a threshold scale (user input in the GUI)
if mapCanvas.scale() > self.threshold:
qgis_log_tools.logMessageWARNING("MapCanvas extent scale exceed the Threshold scale for tracking")
qgis_log_tools.logMessageWARNING(
"-> MapCanvas scale= " + str(mapCanvas.scale()) +
"\tThreshold scale= " + str(self.threshold))
return -2
layer_for_itp = self.currentLayerForTrackingPosition
# Build the tuple contains:
# - layer used for tracking position
# - list of points extract from the current extent for QGIS Map Canvas
# - acquisition time for this track
rt_ntuple = self.TP_NAMEDTUPLE_LET(
layer_for_itp,
imt_tools.construct_listpoints_from_extent(mapcanvas_extent),
imt_tools.get_timestamp()
)
self.tp_last_extent_saved = mapcanvas_extent
self.tp_timers.update("update_track_position_with_qtimers")
# This queue is not protect (multi-threads context)
# but it's oki in your case
# queue in write-append only here !
self.tp_queue_rt_ntuples_let.put(rt_ntuple)
interval = self.tp_timers.get_delay("tp_threshold_time_for_tp_to_mem")
self.qtimer_tracking_position_rtt_to_memory.start(interval)
# update timer for still moving here !
self.tp_timers.update("still moving")
return 1
def tracking_position_qtimer_rttp_to_memory(self):
""" Action perform when the QTimer for Tracking Position is time out
Enqueue requests from Tracking Position to amortize the cost&effect on QGIS GUI
This pass transfer events inputs map moving into memory (QGIS -> Python)
"""
qgis_log_tools.logMessageINFO("~~ CALL : tracking_position_qtimer_rttp_to_memory ~~")
# qgis_log_tools.logMessage("self.tp_timers.delta_with_current_time('tp_time_last_rttp_to_mem'): "
# + str(self.tp_timers.delta_with_current_time("tp_time_last_rttp_to_mem")))
# bIsTimeToUpdate = self.tp_timers.is_time_to_update("tp_time_last_rttp_to_mem",
# "tp_threshold_time_for_tp_to_mem")
size_tp_queue = self.tp_queue_rt_ntuples_let.qsize()
# this queue is not protect (multi-threads context)
# but it's oki in your case
while not self.tp_queue_rt_ntuples_let.empty():
# queue in read-write-delete/pop here
tp_tuple = self.tp_queue_rt_ntuples_let.get()
self.tp_queue_rt_ntuples_let.task_done()
# url: http://stackoverflow.com/questions/20585920/how-to-add-multiple-values-to-a-dictionary-key-in-python
self.tp_dict_key_l_values_et.setdefault(tp_tuple.layer, []).append(
self.TP_NAMEDTUPLE_ET(tp_tuple.extent, tp_tuple.w_time)
)
if size_tp_queue != 0:
# update timer
# self.tp_timers.update("tp_time_last_rttp_to_mem")
#
qgis_log_tools.logMessageINFO("** Pack " + str(size_tp_queue) + " tuples for 1 call -> mem")
#####################
# Process Management
#####################
self.qtimer_tracking_position_rtt_to_memory.stop()
#
interval = self.tp_timers.get_delay("tp_threshold_time_for_construct_geom")
self.qtimer_tracking_position_memory_to_geom.start(interval)
#####################
def tracking_position_qtimer_memory_to_geom(self):
""" Action perform when the QTimer for Tracking Position is time out
Enqueue requests from Tracking Position to amortize the cost&effect on QGIS GUI
This pass convert tracking extents datas (MEMory side) to QGIS GEOMetries (MEMory side)
In this pass we project the entries points in world (QGIS) CRS into CRS of the layer target (for tracking position)
"""
qgis_log_tools.logMessageINFO("~~ CALL : tracking_position_qtimer_memory_to_geom ~~")
# qgis_log_tools.logMessage("self.tp_timers.delta_with_current_time('tp_time_last_rttp_to_mem'): "
# + str(self.tp_timers.delta_with_current_time("tp_time_last_rttp_to_mem")))
# bIsTimeToUpdate = self.tp_timers.is_time_to_update("tp_time_last_rttp_to_mem",
# "tp_threshold_time_for_construct_geom")
mapCanvas = self.iface.mapCanvas()
# url: http://qgis.org/api/classQgsMapCanvas.html#af0ffae7b5e5ec8b29764773fa6a74d58
extent_src_crs = mapCanvas.mapSettings().destinationCrs()
append_in_dic = False
for layer in self.tp_dict_key_l_values_et.keys():
layer_to_commit = layer
# url: http://qgis.org/api/classQgsMapLayer.html#a40b79e2d6043f8ec316a28cb17febd6c
extent_dst_crs = layer_to_commit.crs()
# url: http://docs.qgis.org/testing/en/docs/pyqgis_developer_cookbook/crs.html
xform = QgsCoordinateTransform(extent_src_crs, extent_dst_crs)
tp_list_fets = []
# pop key from tracking position dictionary
list_ntuples = self.tp_dict_key_l_values_et.pop(layer)
append_at_least_1_fet = False
for tp_namedtuple in list_ntuples:
mapcanvas_extent = tp_namedtuple.extent
w_time = tp_namedtuple.w_time
# get the list points from the current extent (from QGIS MapCanvas)
list_points_from_mapcanvas = mapcanvas_extent
# TODO: add a option for this feature (Projected points in CRS destination layer) in GUI
bWithProjectionInCRSLayer = True
if bWithProjectionInCRSLayer:
#
list_points = [xform.transform(point) for point in list_points_from_mapcanvas]
else:
list_points = list_points_from_mapcanvas
# list of lists of points
gPolygon = QgsGeometry.fromPolygon([list_points])
fet = QgsFeature()
fet.setGeometry(gPolygon)
# update the time stamp attribute
self.values[self.tp_id_w_time] = imt_tools.convert_timestamp_to_qt_string_format(w_time)
fet.setAttributes(self.values)
tp_list_fets.append(fet)
append_at_least_1_fet = True
if append_at_least_1_fet:
self.tp_dict_key_l_values_listfeatures.setdefault(layer, []).append(tp_list_fets)
append_in_dic = True
qgis_log_tools.logMessageINFO(
"-- Pack " + str(len(tp_list_fets)) + " features in layer: " + layer.name())
if append_in_dic:
# update timer
# self.tp_timers.update("tp_time_last_construct_geom")
#####################
# Process Management
#####################
#
self.qtimer_tracking_position_memory_to_geom.stop()
#
interval = self.tp_timers.get_delay("tp_threshold_time_for_sending_geom_to_layer")
interval += CONVERT_S_TO_MS(max(0.0, (self.tp_timers.get_delay("delay_time_still_moving")-self.tp_timers.delta_with_current_time("still moving"))))
self.qtimer_tracking_position_geom_to_layer.start(interval)
#####################
# qgis_log_tools.logMessage("=> still moving = " + str(self.tp_timers.get_delay("delay_time_still_moving")-self.tp_timers.delta_with_current_time("still moving")))
# qgis_log_tools.logMessage("=> still moving = " + str(max(0.0, self.tp_timers.get_delay("delay_time_still_moving")-self.tp_timers.delta_with_current_time("still moving"))))
# qgis_log_tools.logMessage("delay= " + str(delay))
def tracking_position_qtimer_geom_to_layer(self):
""" Action perform when the QTimer for Tracking Position is time out
Enqueue requests from Tracking Position to amortize the cost&effect on QGIS GUI
In this pass we transfer if timeout for delay is done and we 'not moving' on QGIS Map
QGIS GEOmetries [MEM] into target Layer [MEM]
This operation send/render geometries into the Layer (we see a rectangle representation of our extent on the screen)
"""
qgis_log_tools.logMessageINFO("~~ CALL : tracking_position_qtimer_geom_to_layer ~~")
bNotMovingOnQGISMap = self.tp_timers.is_time_to_update("still moving", "delay_time_still_moving")
# qgis_log_tools.logMessage("self.tp_timers.delta_with_current_time('still moving'): " + str(self.tp_timers.delta_with_current_time("still moving")))
# qgis_log_tools.logMessage("bNotMovingOnQGISMap: " + str(bNotMovingOnQGISMap))
# qgis_log_tools.logMessage("delay_time_still_moving: " + str(self.tp_timers.get_delay("delay_time_still_moving")))
# if bIsTimeToUpdate and bNotMovingOnQGISMap:
if bNotMovingOnQGISMap:
append_in_dict_one_time = False
for layer in self.tp_dict_key_l_values_listfeatures.keys():
# from the dict we retrieve a list of list
tp_list_of_list_fets = self.tp_dict_key_l_values_listfeatures.pop(layer)
# How can I programatically create and add features to a memory layer in QGIS 1.9?
# url: http://gis.stackexchange.com/questions/60473/how-can-i-programatically-create-and-add-features-to-a-memory-layer-in-qgis-1-9
# write the layer and send request to DB
layer.startEditing()
for tp_list_fets in tp_list_of_list_fets:
layer.addFeatures(tp_list_fets, False) # bool_makeSelected=False
self.tp_dict_layers_to_commit[layer] = 1
append_in_dict_one_time = True
qgis_log_tools.logMessageINFO(
"++ Pack requests => " + str(len(tp_list_of_list_fets)) + " extents for layer: " + layer.name())
if append_in_dict_one_time:
# update timer
# self.tp_timers.update("tp_time_last_send_geom_to_layer")
#####################
# Process Management
#####################
self.qtimer_tracking_position_geom_to_layer.stop()
#
interval = self.tp_timers.get_delay("tp_threshold_time_for_sending_layer_to_dp")
interval += CONVERT_S_TO_MS(max(0.0, self.tp_timers.get_delay("delay_time_still_moving")-self.tp_timers.delta_with_current_time("still moving")))
self.qtimer_tracking_position_layers_to_commit.start(interval)
#####################
# qgis_log_tools.logMessageINFO("tp_threshold_time_for_sending_layer_to_dp= " + str(self.tp_timers.get_delay("tp_threshold_time_for_sending_layer_to_dp")))
else:
self.qtimer_tracking_position_geom_to_layer.setInterval(self.tp_timers.get_delay("delay_time_still_moving")*1000)
def tracking_position_qtimer_layers_to_commit(self):
"""Action perform when the QTimer for Tracking Position is time out
Enqueue requests from Tracking Position to amortize the cost&effect on QGIS GUI
In this pass, we commit QGIS Layer into Data Provider (PostGIS DataBase, or ShapeFile, or anything (local or distant)
MEM -> DP
"""
qgis_log_tools.logMessageINFO("~~ CALL : tracking_position_qtimer_layers_to_commit ~~")
bNotMovingOnQGISMap = self.tp_timers.is_time_to_update("still moving", "delay_time_still_moving")
# qgis_log_tools.logMessage("self.tp_timers.delta_with_current_time(''tp_time_last_send_geom_to_layer''):"+str(self.tp_timers.delta_with_current_time("tp_time_last_send_geom_to_layer")))
# qgis_log_tools.logMessage("bNotMovingOnQGISMap: " + str(bNotMovingOnQGISMap))
if bNotMovingOnQGISMap:
layers = self.tp_dict_layers_to_commit.keys()
# clear dict
self.tp_dict_layers_to_commit.clear()
for layer_to_commit in layers:
#
try:
# update timer
# self.tp_timers.update("tracking_position_qtimer_layers_to_commit")
resultCommit = layer_to_commit.commitChanges()
if resultCommit:
qgis_log_tools.logMessageINFO("* Commit change layer:" + layer_to_commit.name + " [OK]")
except:
# TODO : deal with errors/exceptions
pass
#####################
# Process Management
#####################
# last link of processes chain
self.qtimer_tracking_position_layers_to_commit.stop()
#####################
else:
#####################
# Process Management
#####################
interval = CONVERT_S_TO_MS(self.tp_timers.get_delay("delay_time_still_moving"))
self.qtimer_tracking_position_layers_to_commit.setInterval(interval)
#####################
def tracking_position_log_threads_infos(self):
"""
DEBUG INFORMATIONS
"""
qgis_log_tools.logMessageINFO(
"self.tp_queue_rt_ntuples_let.qsize(): " + str(self.tp_queue_rt_ntuples_let.qsize()))
qgis_log_tools.logMessageINFO(
"- self.tp_dict_key_l_values_et.qsize(): " + str(len(self.tp_dict_key_l_values_et)))
qgis_log_tools.logMessageINFO("- - self.tp_list_fets: " + str(len(self.tp_list_fets)))
qgis_log_tools.logMessageINFO(
"- - - self.tp_dict_key_l_values_listfeatures: " + str(len(self.tp_dict_key_l_values_listfeatures)))
qgis_log_tools.logMessageINFO(
"- - - - self.tp_list_layers_to_commit: " + str(len(self.tp_dict_layers_to_commit)))
| gpl-3.0 | 6,448,873,579,100,184,000 | 42.5639 | 194 | 0.61615 | false |
bccp/nbodykit | nbodykit/tests/test_binned_stat.py | 2 | 9104 | from runtests.mpi import MPITest
from nbodykit import setup_logging
from nbodykit.binned_statistic import BinnedStatistic
import pytest
import tempfile
import numpy.testing as testing
import numpy
import os
data_dir = os.path.join(os.path.split(os.path.abspath(__file__))[0], 'data')
setup_logging("debug")
@MPITest([1])
def test_to_json(comm):
# load from JSON
ds1 = BinnedStatistic.from_json(os.path.join(data_dir, 'dataset_1d.json'))
# to JSON
with tempfile.NamedTemporaryFile(delete=False) as ff:
ds1.to_json(ff.name)
ds2 = BinnedStatistic.from_json(ff.name)
# same data?
for name in ds1:
testing.assert_almost_equal(ds1[name], ds2[name])
# cleanup
os.remove(ff.name)
@MPITest([1])
def test_1d_load(comm):
# load plaintext format
with pytest.warns(FutureWarning):
ds1 = BinnedStatistic.from_plaintext(['k'], os.path.join(data_dir, 'dataset_1d_deprecated.dat'))
# wrong dimensions
with pytest.raises(ValueError):
ds1 = BinnedStatistic.from_plaintext(['k', 'mu'], os.path.join(data_dir, 'dataset_1d_deprecated.dat'))
# load from JSON
ds2 = BinnedStatistic.from_json(os.path.join(data_dir, 'dataset_1d.json'))
# same data?
for name in ds1:
testing.assert_almost_equal(ds1[name], ds2[name])
@MPITest([1])
def test_2d_load(comm):
# load plaintext format
with pytest.warns(FutureWarning):
ds1 = BinnedStatistic.from_plaintext(['k', 'mu'], os.path.join(data_dir, 'dataset_2d_deprecated.dat'))
# load from JSON
ds2 = BinnedStatistic.from_json(os.path.join(data_dir, 'dataset_2d.json'))
# same data?
for name in ds1:
testing.assert_almost_equal(ds1[name], ds2[name])
@MPITest([1])
def test_str(comm):
dataset = BinnedStatistic.from_json(os.path.join(data_dir, 'dataset_2d.json'))
# list all variable names
s = str(dataset)
# now just list total number of variables
dataset['test1'] = numpy.ones(dataset.shape)
dataset['test2'] = numpy.ones(dataset.shape)
s = str(dataset)
# this is the same as str
r = repr(dataset)
@MPITest([1])
def test_getitem(comm):
dataset = BinnedStatistic.from_json(os.path.join(data_dir, 'dataset_2d.json'))
# invalid key
with pytest.raises(KeyError):
bad = dataset['error']
# slice columns
sliced = dataset[['k', 'mu', 'power']]
sliced = dataset[('k', 'mu', 'power')]
# invalid slice
with pytest.raises(KeyError):
bad =dataset[['k', 'mu', 'error']]
# too many dims in slice
with pytest.raises(IndexError):
bad = dataset[0,0,0]
# cannot access single element of 2D power
with pytest.raises(IndexError):
bad = dataset[0,0]
@MPITest([1])
def test_array_slice(comm):
dataset = BinnedStatistic.from_json(os.path.join(data_dir, 'dataset_2d.json'))
# get the first mu column
sliced = dataset[:,0]
assert sliced.shape[0] == dataset.shape[0]
assert len(sliced.shape) == 1
assert sliced.dims == ['k']
# get the first mu column but keep dimension
sliced = dataset[:,[0]]
assert sliced.shape[0] == dataset.shape[0]
assert sliced.shape[1] == 1
assert sliced.dims == ['k', 'mu']
@MPITest([1])
def test_list_array_slice(comm):
dataset = BinnedStatistic.from_json(os.path.join(data_dir, 'dataset_2d.json'))
# get the first and last mu column
sliced = dataset[:,[0, -1]]
assert len(sliced.shape) == 2
assert sliced.dims == ['k', 'mu']
# make sure we grabbed the right data
for var in dataset:
testing.assert_array_equal(dataset[var][:,[0,-1]], sliced[var])
@MPITest([1])
def test_variable_set(comm):
dataset = BinnedStatistic.from_json(os.path.join(data_dir, 'dataset_2d.json'))
modes = numpy.ones(dataset.shape)
# add new variable
dataset['TEST'] = modes
assert 'TEST' in dataset
# override existing variable
dataset['modes'] = modes
assert numpy.all(dataset['modes'] == 1.0)
# needs right shape
with pytest.raises(ValueError):
dataset['TEST'] = 10.
@MPITest([1])
def test_copy(comm):
dataset = BinnedStatistic.from_json(os.path.join(data_dir, 'dataset_2d.json'))
copy = dataset.copy()
for var in dataset:
testing.assert_array_equal(dataset[var], copy[var])
@MPITest([1])
def test_rename_variable(comm):
dataset = BinnedStatistic.from_json(os.path.join(data_dir, 'dataset_2d.json'))
test = numpy.zeros(dataset.shape)
dataset['test'] = test
dataset.rename_variable('test', 'renamed_test')
assert 'renamed_test' in dataset
assert 'test' not in dataset
@MPITest([1])
def test_sel(comm):
dataset = BinnedStatistic.from_json(os.path.join(data_dir, 'dataset_2d.json'))
# no exact match fails
with pytest.raises(IndexError):
sliced = dataset.sel(k=0.1)
# this should be squeezed
sliced = dataset.sel(k=0.1, method='nearest')
assert len(sliced.dims) == 1
# this is not squeezed
sliced = dataset.sel(k=[0.1], method='nearest')
assert sliced.shape[0] == 1
# this return empty k with arbitary edges.
sliced = dataset.sel(k=[], method='nearest')
assert sliced.shape[0] == 0
# slice in a specific k-range
sliced = dataset.sel(k=slice(0.02, 0.15), mu=[0.5], method='nearest')
assert sliced.shape[1] == 1
assert numpy.alltrue((sliced['k'] >= 0.02)&(sliced['k'] <= 0.15))
@MPITest([1])
def test_take(comm):
dataset = BinnedStatistic.from_json(os.path.join(data_dir, 'dataset_2d.json'))
sliced = dataset.take(k=[8])
assert sliced.shape[0] == 1
assert len(sliced.dims) == 2
sliced = dataset.take(k=[])
assert sliced.shape[0] == 0
assert len(sliced.dims) == 2
dataset.take(k=dataset.coords['k'] < 0.3)
assert len(sliced.dims) == 2
dataset.take(dataset['modes'] > 0)
assert len(sliced.dims) == 2
dataset.take(dataset['k'] < 0.3)
assert len(sliced.dims) == 2
@MPITest([1])
def test_squeeze(comm):
dataset = BinnedStatistic.from_json(os.path.join(data_dir, 'dataset_2d.json'))
# need to specify which dimension to squeeze
with pytest.raises(ValueError):
squeezed = dataset.squeeze()
with pytest.raises(ValueError):
squeezed = dataset[[0],[0]].squeeze()
sliced = dataset[:,[2]]
with pytest.raises(ValueError):
squeezed = sliced.squeeze('k')
squeezed = sliced.squeeze('mu')
assert len(squeezed.dims) == 1
assert squeezed.shape[0] == sliced.shape[0]
@MPITest([1])
def test_average(comm):
import warnings
dataset = BinnedStatistic.from_json(os.path.join(data_dir, 'dataset_2d.json'))
# unweighted
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
avg = dataset.average('mu')
for var in dataset.variables:
if var in dataset._fields_to_sum:
x = numpy.nansum(dataset[var], axis=-1)
else:
x = numpy.nanmean(dataset[var], axis=-1)
testing.assert_allclose(x, avg[var])
# weighted
weights = numpy.random.random(dataset.shape)
dataset['weights'] = weights
avg = dataset.average('mu', weights='weights')
for var in dataset:
if var in dataset._fields_to_sum:
x = numpy.nansum(dataset[var], axis=-1)
else:
x = numpy.nansum(dataset[var]*dataset['weights'], axis=-1)
x /= dataset['weights'].sum(axis=-1)
testing.assert_allclose(x, avg[var])
@MPITest([1])
def test_reindex(comm):
import warnings
dataset = BinnedStatistic.from_json(os.path.join(data_dir, 'dataset_2d.json'))
with pytest.raises(ValueError):
new, spacing = dataset.reindex('k', 0.005, force=True, return_spacing=True)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
weights = numpy.random.random(dataset.shape)
dataset['weights'] = weights
new, spacing = dataset.reindex('k', 0.02, weights='weights', force=True, return_spacing=True)
diff = numpy.diff(new.coords['k'])
assert numpy.alltrue(diff > numpy.diff(dataset.coords['k'])[0])
with pytest.raises(ValueError):
new = dataset.reindex('mu', 0.4, force=False)
new = dataset.reindex('mu', 0.4, force=True)
@MPITest([1])
def test_subclass_copy_sel(comm):
# this test asserts the sel returns instance of subclass.
# and the copy method can change the class.
class A(BinnedStatistic):
def mymethod(self):
return self.copy(cls=BinnedStatistic)
# load from JSON
dataset = A.from_json(os.path.join(data_dir, 'dataset_2d.json'))
dataset.mymethod()
# no exact match fails
with pytest.raises(IndexError):
sliced = dataset.sel(k=0.1)
# this should be squeezed
sliced = dataset.sel(k=0.1, method='nearest')
assert len(sliced.dims) == 1
assert isinstance(sliced, A)
assert isinstance(sliced.mymethod(), BinnedStatistic)
| gpl-3.0 | 4,051,709,132,764,577,000 | 27.098765 | 114 | 0.636424 | false |
RichardLeeK/MachineLearning | MachineLearning/Img3DProcess/main.py | 1 | 2129 | # Basic OBJ file viewer. needs objloader from:
# http://www.pygame.org/wiki/OBJFileLoader
# LMB + move: rotate
# RMB + move: pan
# Scroll wheel: zoom in/out
import sys, pygame
from pygame.locals import *
from pygame.constants import *
from OpenGL.GL import *
from OpenGL.GLU import *
# IMPORT OBJECT LOADER
from objloader import *
pygame.init()
viewport = (800,600)
hx = viewport[0]/2
hy = viewport[1]/2
srf = pygame.display.set_mode(viewport, OPENGL | DOUBLEBUF)
glLightfv(GL_LIGHT0, GL_POSITION, (-40, 200, 100, 0.0))
glLightfv(GL_LIGHT0, GL_AMBIENT, (0.2, 0.2, 0.2, 1.0))
glLightfv(GL_LIGHT0, GL_DIFFUSE, (0.5, 0.5, 0.5, 1.0))
glEnable(GL_LIGHT0)
glEnable(GL_LIGHTING)
glEnable(GL_COLOR_MATERIAL)
glEnable(GL_DEPTH_TEST)
glShadeModel(GL_SMOOTH) # most obj files expect to be smooth-shaded
# LOAD OBJECT AFTER PYGAME INIT
obj = OBJ('testData.obj', swapyz=True)
clock = pygame.time.Clock()
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
width, height = viewport
gluPerspective(90.0, width/float(height), 1, 100.0)
glEnable(GL_DEPTH_TEST)
glMatrixMode(GL_MODELVIEW)
rx, ry = (0,0)
tx, ty = (0,0)
zpos = 5
rotate = move = False
while 1:
clock.tick(30)
for e in pygame.event.get():
if e.type == QUIT:
sys.exit()
elif e.type == KEYDOWN and e.key == K_ESCAPE:
sys.exit()
elif e.type == MOUSEBUTTONDOWN:
if e.button == 4: zpos = max(1, zpos-1)
elif e.button == 5: zpos += 1
elif e.button == 1: rotate = True
elif e.button == 3: move = True
elif e.type == MOUSEBUTTONUP:
if e.button == 1: rotate = False
elif e.button == 3: move = False
elif e.type == MOUSEMOTION:
i, j = e.rel
if rotate:
rx += i
ry += j
if move:
tx += i
ty -= j
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity()
# RENDER OBJECT
glTranslate(tx/20., ty/20., - zpos)
glRotate(ry, 1, 0, 0)
glRotate(rx, 0, 1, 0)
glCallList(obj.gl_list)
pygame.display.flip() | mit | 1,437,956,592,720,366,300 | 25.962025 | 77 | 0.602161 | false |
dialogtekgeek/DSTC6-End-to-End-Conversation-Modeling | ChatbotBaseline/tools/seq2seq_model.py | 1 | 4813 | # -*- coding: utf-8 -*-
"""Sequence-to-sequence model module
Copyright (c) 2017 Takaaki Hori ([email protected])
This software is released under the MIT License.
http://opensource.org/licenses/mit-license.php
"""
import six
import chainer
import chainer.functions as F
from chainer import cuda
import numpy as np
class Sequence2SequenceModel(chainer.Chain):
def __init__(self, encoder, decoder):
""" Define model structure
Args:
encoder (~chainer.Chain): encoder network
decoder (~chainer.Chain): decoder network
"""
super(Sequence2SequenceModel, self).__init__(
encoder = encoder,
decoder = decoder
)
def loss(self,es,x,y,t):
""" Forward propagation and loss calculation
Args:
es (pair of ~chainer.Variable): encoder state
x (list of ~chainer.Variable): list of input sequences
y (list of ~chainer.Variable): list of output sequences
t (list of ~chainer.Variable): list of target sequences
if t is None, it returns only states
Return:
es (pair of ~chainer.Variable(s)): encoder state
ds (pair of ~chainer.Variable(s)): decoder state
loss (~chainer.Variable) : cross-entropy loss
"""
es,ey = self.encoder(es,x)
ds,dy = self.decoder(es,y)
if t is not None:
loss = F.softmax_cross_entropy(dy,t)
# avoid NaN gradients (See: https://github.com/pfnet/chainer/issues/2505)
if chainer.config.train:
loss += F.sum(F.concat(ey, axis=0)) * 0
return es, ds, loss
else: # if target is None, it only returns states
return es, ds
def generate(self, es, x, sos, eos, unk=0, maxlen=100, beam=5, penalty=1.0, nbest=1):
""" Generate sequence using beam search
Args:
es (pair of ~chainer.Variable(s)): encoder state
x (list of ~chainer.Variable): list of input sequences
sos (int): id number of start-of-sentence label
eos (int): id number of end-of-sentence label
unk (int): id number of unknown-word label
maxlen (int): list of target sequences
beam (int): list of target sequences
penalty (float): penalty added to log probabilities
of each output label.
nbest (int): number of n-best hypotheses to be output
Return:
list of tuples (hyp, score): n-best hypothesis list
- hyp (list): generated word Id sequence
- score (float): hypothesis score
pair of ~chainer.Variable(s)): decoder state of best hypothesis
"""
# encoder
es,ey = self.encoder(es, [x])
# beam search
ds = self.decoder.initialize(es, ey, sos)
hyplist = [([], 0., ds)]
best_state = None
comp_hyplist = []
for l in six.moves.range(maxlen):
new_hyplist = []
argmin = 0
for out,lp,st in hyplist:
logp = self.decoder.predict(st)
lp_vec = cuda.to_cpu(logp.data[0]) + lp
if l > 0:
new_lp = lp_vec[eos] + penalty * (len(out)+1)
new_st = self.decoder.update(st,eos)
comp_hyplist.append((out, new_lp))
if best_state is None or best_state[0] < new_lp:
best_state = (new_lp, new_st)
for o in np.argsort(lp_vec)[::-1]:
if o == unk or o == eos:# exclude <unk> and <eos>
continue
new_lp = lp_vec[o]
if len(new_hyplist) == beam:
if new_hyplist[argmin][1] < new_lp:
new_st = self.decoder.update(st, o)
new_hyplist[argmin] = (out+[o], new_lp, new_st)
argmin = min(enumerate(new_hyplist), key=lambda h:h[1][1])[0]
else:
break
else:
new_st = self.decoder.update(st, o)
new_hyplist.append((out+[o], new_lp, new_st))
if len(new_hyplist) == beam:
argmin = min(enumerate(new_hyplist), key=lambda h:h[1][1])[0]
hyplist = new_hyplist
if len(comp_hyplist) > 0:
maxhyps = sorted(comp_hyplist, key=lambda h:-h[1])[:nbest]
return maxhyps, best_state[1]
else:
return [([],0)],None
| mit | 1,694,128,833,325,667,000 | 39.445378 | 90 | 0.503636 | false |
vrenaville/report-print-send | base_report_to_printer/report.py | 1 | 4026 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Guewen Baconnier
# Copyright 2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, exceptions, _, api
class Report(models.Model):
_inherit = 'report'
@api.v7
def print_document(self, cr, uid, ids, report_name, html=None,
data=None, context=None):
""" Print a document, do not return the document file """
if context is None:
context = self.pool['res.users'].context_get(cr, uid)
local_context = context.copy()
local_context['must_skip_send_to_printer'] = True
document = self.get_pdf(cr, uid, ids, report_name,
html=html, data=data, context=local_context)
report = self._get_report_from_name(cr, uid, report_name)
behaviour = report.behaviour()[report.id]
printer = behaviour['printer']
if not printer:
raise exceptions.Warning(
_('No printer configured to print this report.')
)
return printer.print_document(report, document, report.report_type)
@api.v8
def print_document(self, records, report_name, html=None, data=None):
return self._model.print_document(self._cr, self._uid,
records.ids, report_name,
html=html, data=data, context=self._context)
def _can_print_report(self, cr, uid, ids, behaviour, printer, document,
context=None):
"""Predicate that decide if report can be sent to printer
If you want to prevent `get_pdf` to send report you can set
the `must_skip_send_to_printer` key to True in the context
"""
if context is not None and context.get('must_skip_send_to_printer'):
return False
if behaviour['action'] == 'server' and printer and document:
return True
return False
@api.v7
def get_pdf(self, cr, uid, ids, report_name, html=None,
data=None, context=None):
""" Generate a PDF and returns it.
If the action configured on the report is server, it prints the
generated document as well.
"""
document = super(Report, self).get_pdf(cr, uid, ids, report_name,
html=html, data=data,
context=context)
report = self._get_report_from_name(cr, uid, report_name)
behaviour = report.behaviour()[report.id]
printer = behaviour['printer']
can_print_report = self._can_print_report(cr, uid, ids,
behaviour, printer, document,
context=context)
if can_print_report:
printer.print_document(report, document, report.report_type)
return document
@api.v8
def get_pdf(self, records, report_name, html=None, data=None):
return self._model.get_pdf(self._cr, self._uid,
records.ids, report_name,
html=html, data=data, context=self._context) | agpl-3.0 | -2,749,583,403,344,640,500 | 43.252747 | 79 | 0.561351 | false |
AjabWorld/ajabsacco | ajabsacco/core/models/loans.py | 1 | 15611 | from decimal import Decimal as D
import uuid
from django.contrib.auth.models import AbstractUser
from django.db import models
from .querysets.loans import *
from .abstract import ExtraField, LedgerBalance
from ajabsacco.core.codes.loans import *
TRANSACTION_TYPES = (
(TRANSACTION_TYPE_LOAN_DISBURSAL, "Loan Disbursal"),
(TRANSACTION_TYPE_INTEREST_APPLY, "Apply Interest on Account"),
(TRANSACTION_TYPE_FEE_APPLY, "Apply Fee on Account"),
(TRANSACTION_TYPE_PENALTY_APPLY, "Apply Penalty on Account"),
(TRANSACTION_TYPE_PRINCIPAL_POSTING, "Principal Posting"),
(TRANSACTION_TYPE_INTEREST_POSTING, "Interest Posting"),
(TRANSACTION_TYPE_FEE_POSTING, "Fee Posting"),
(TRANSACTION_TYPE_PENALTY_POSTING, "Penalty Posting"),
(TRANSACTION_TYPE_PRINCIPAL_WRITE_OFF, "Principal Write-Off"),
(TRANSACTION_TYPE_INTEREST_WRITE_OFF, "Interest Write-Off"),
(TRANSACTION_TYPE_FEE_WRITE_OFF, "Fee Write-Off"),
(TRANSACTION_TYPE_PENALTY_WRITE_OFF, "Penalty Write-Off"),
)
class LoanProduct(models.Model):
DECLINING_BALANCE = 1
INTEREST_CALCULATION_METHODS = (
(DECLINING_BALANCE, "Declining Balance"),
)
PERIOD_UNIT_DAYS = 1
PERIOD_UNIT_MONTHS = 2
PERIOD_UNITS = (
(1, 'Day'),
(2, 'Month'),
)
PERSONAL = 1
GROUP = 2
BUSINESS = 3
LOAN_TYPE = (
(PERSONAL, 'Personal'),
(GROUP, 'Group'),
(BUSINESS, 'Business'),
)
STATEMENT_ISSUE_FREQUENCY = (
(1, 'Monthly'),
(2, 'Quaterly'),
(3, 'Yearly'),
)
ROLE_CHOICES = (
(0, 'Finance Committee'),
(1, 'Audit and Supervision Committee'),
(2, 'Finance Committee'),
(3, 'General Manager/CEO'),
(4, 'Accountant'),
(5, 'Cashier'),
(6, 'Accountant\'s Assistant'),
(7, 'Member'),
)
PRINCIPAL_GRACE_PERIOD = 1
FULL_GRACE_PERIOD = 2
GRACE_PERIOD_TYPES = (
(1, "Principal Grace Period"),
(2, "Full Grace Period"),
)
ALLOCATION_CHOICE_FEE = 1
ALLOCATION_CHOICE_PENALTY = 2
ALLOCATION_CHOICE_INTEREST = 3
ALLOCATION_CHOICE_PRINCIPAL = 4
ALLOCATION_CHOICES = (
(ALLOCATION_CHOICE_FEE, "Fee"),
(ALLOCATION_CHOICE_PENALTY, "Penalty"),
(ALLOCATION_CHOICE_INTEREST, "Interest"),
(ALLOCATION_CHOICE_PRINCIPAL, "Principal"),
)
ALLOCATION_ORDER = [
ALLOCATION_CHOICE_FEE,
ALLOCATION_CHOICE_PENALTY,
ALLOCATION_CHOICE_INTEREST,
ALLOCATION_CHOICE_PRINCIPAL,
]
allocation_order = None
ACCOUNTING_RULES = [
#(Transaction Type, Debit, Credit)
(TRANSACTION_TYPE_LOAN_DISBURSAL, LOAN_PORTFOLIO_CONTROL_ACCOUNT, LOAN_FUND_SOURCE),
(TRANSACTION_TYPE_INTEREST_APPLY, INTEREST_INCOME_RECEIVABLE_ACCOUNT, INTEREST_INCOME_ACCOUNT),
(TRANSACTION_TYPE_FEE_APPLY, FEE_INCOME_RECEIVABLE_ACCOUNT, FEE_INCOME_ACCOUNT),
(TRANSACTION_TYPE_PENALTY_APPLY, PENALTY_INCOME_RECEIVABLE_ACCOUNT, PENALTY_INCOME_ACCOUNT),
(TRANSACTION_TYPE_PRINCIPAL_POSTING, LOAN_FUND_SOURCE, LOAN_PORTFOLIO_CONTROL_ACCOUNT),
(TRANSACTION_TYPE_INTEREST_POSTING, LOAN_FUND_SOURCE, INTEREST_INCOME_RECEIVABLE_ACCOUNT),
(TRANSACTION_TYPE_FEE_POSTING, LOAN_FUND_SOURCE, FEE_INCOME_RECEIVABLE_ACCOUNT),
(TRANSACTION_TYPE_PENALTY_POSTING, LOAN_FUND_SOURCE, PENALTY_INCOME_RECEIVABLE_ACCOUNT),
(TRANSACTION_TYPE_PRINCIPAL_WRITE_OFF, LOAN_WRITE_OFF_EXPENSE_ACCOUNT, LOAN_PORTFOLIO_CONTROL_ACCOUNT),
(TRANSACTION_TYPE_INTEREST_WRITE_OFF, LOAN_WRITE_OFF_EXPENSE_ACCOUNT, INTEREST_INCOME_RECEIVABLE_ACCOUNT),
(TRANSACTION_TYPE_FEE_WRITE_OFF, LOAN_WRITE_OFF_EXPENSE_ACCOUNT, FEE_INCOME_RECEIVABLE_ACCOUNT),
(TRANSACTION_TYPE_PENALTY_WRITE_OFF, LOAN_WRITE_OFF_EXPENSE_ACCOUNT, PENALTY_INCOME_RECEIVABLE_ACCOUNT),
]
name = models.CharField(max_length=90)
description = models.CharField(max_length=800)
is_active = models.BooleanField(default=False)
loan_type = models.IntegerField(choices=LOAN_TYPE, default=PERSONAL)
statement_issue_frequency = models.IntegerField(
choices=STATEMENT_ISSUE_FREQUENCY, default=1)
default_repayment_period = models.IntegerField(default=0)
default_grace_period = models.IntegerField(default=0)
period_unit = models.IntegerField(default=PERIOD_UNIT_MONTHS, choices=PERIOD_UNITS)
grace_period_type = models.IntegerField(default=FULL_GRACE_PERIOD, choices=GRACE_PERIOD_TYPES)
#Normally, most saccos can give you loans up to 3x your savings
loan_to_savings_ratio = models.IntegerField(default=3)
min_amount = models.DecimalField(decimal_places=2, max_digits=18, default=D(0.0))
max_amount = models.DecimalField(decimal_places=2, max_digits=18, default=D(0.0))
min_installments = models.IntegerField(null=True, blank=False)
max_installments = models.IntegerField(null=True, blank=False)
min_interest_rate = models.DecimalField(decimal_places=2, max_digits=4, default=D('0.0'))
max_interest_rate = models.DecimalField(decimal_places=2, max_digits=4, default=D('0.0'))
interest_calculation_method = models.IntegerField(
choices=INTEREST_CALCULATION_METHODS, default=DECLINING_BALANCE)
creation_date = models.DateTimeField(auto_now=True)
last_modified_date = models.DateTimeField(auto_now_add=True)
class Meta:
verbose_name = "Loan Product"
verbose_name_plural = "Loan Products"
def __str__(self):
return self.name
class LoanProductFee(models.Model):
FEE_TYPES = (
(1, "Manual Fee"),
(2, "Late Repayment"),
(3, "Disbursal"),
(4, "Payment Due"),
)
name = models.CharField(max_length=50)
product = models.ForeignKey('LoanProduct', related_name="fees")
fee_type = models.IntegerField(choices=FEE_TYPES)
transaction_type = models.IntegerField(choices=TRANSACTION_TYPES)
class Meta:
verbose_name = "Loan Product Fee"
verbose_name_plural = "Loan Product Fees"
def __str__(self):
return self.name
class LoanProductExtraField(ExtraField):
product = models.ForeignKey('LoanProduct', related_name="product_extra_fields")
class Meta:
verbose_name = "Loan Product Custom Field"
verbose_name_plural = "Loan Product Custom Fields"
def __str__(self):
return self.field_name
class LoanAccount(models.Model):
'''
These are the Member Accounts.
They hold the low level member accounts only.
amount stored as cents: Balance effected every 4pm and 4am
Pending debits and credits are posted daily (to current balance),
and updated hourly
'''
PENDING_APPROVAL = 1
APPROVED = 2
ACTIVE = 3
ON_HOLD = 4
CLOSED = 5
SUSPENDED = 6
ACCOUNT_STATE = (
(PENDING_APPROVAL, "Pending Approval"),
(APPROVED, "Approved"),
(ACTIVE, "Active"),
(ON_HOLD, "On-Hold"),
(CLOSED, "Closed"),
(SUSPENDED, "Suspended"),
)
PERFOMING = 1
WATCH = 2
SUBSTANDARD = 3
DOUBTFUL = 4
LOSS = 5
LOAN_RISK_CATEGORIES = (
(PERFOMING, "Perfoming Loan"),
(WATCH, "Watch Loan (Past Due)"),
(SUBSTANDARD, "Substandard Loan"),
(DOUBTFUL, "Charged-off"),
(LOSS, "Loss"),
)
NO_DELINQUENCY = 1
CURRENT_DELINQUENCY = 2
HISTORICAL_DELINQUENCY = 3
DELINQUENCY_STATUS = (
(NO_DELINQUENCY, "No Delinquency"),
(CURRENT_DELINQUENCY, "Current Delinquency"),
(HISTORICAL_DELINQUENCY, "Historical Delinquent"),
)
account_number = models.CharField(unique=True, db_index=True, max_length=10)
product = models.ForeignKey('LoanProduct',
related_name="product_accounts", related_query_name="product_account")
holder = models.ForeignKey('Member',
related_name="loan_accounts", related_query_name="loan_account")
status = models.IntegerField(choices=ACCOUNT_STATE, default=PENDING_APPROVAL)
all_requirements_met = models.BooleanField(default=False)
amount = models.DecimalField(decimal_places=2, max_digits=18, default=D('0.0'))
repayment_period = models.IntegerField(default=0)
installments = models.IntegerField(default=0)
grace_period = models.IntegerField(default=0)
notes = models.CharField(max_length=400, null=True, blank=True)
#Current balance updated after transactions
current_balance = models.DecimalField(max_digits=18, decimal_places=2, default=D('0.0'))
current_balance_date = models.DateTimeField(null=True, blank=True)
#On Apraisal
loan_risk_category = models.IntegerField(choices=LOAN_RISK_CATEGORIES, default=PERFOMING)
deliquency_status = models.IntegerField(choices=DELINQUENCY_STATUS, default=NO_DELINQUENCY)
#Interest rate per period unit
interest_rate = models.DecimalField(max_digits=4, decimal_places=2, default=D('0.0'))
#How much can we loan this member for this product,
#this is useful, for accounts such as mobile based loans
#If this is null, we will use the product's max as the limit
credit_limit = models.DecimalField(decimal_places=2, max_digits=18, default=D('0.0'))
#Date stamped by actions
date_approved = models.DateTimeField(null=True, blank=True)
date_disbursed = models.DateTimeField(null=True, blank=True)
reason_disbursed = models.CharField(max_length=140, null=True, blank=True)
date_closed = models.DateTimeField(null=True, blank=True)
reason_closed = models.CharField(max_length=140, null=True, blank=True)
overdue_balance = models.DecimalField(max_digits=25, decimal_places=5, null=True, blank=True)
overdue_date = models.DateTimeField(null=True, blank=True)
last_repayment_amount = models.DecimalField(max_digits=25, decimal_places=5, null=True, blank=True)
last_repayment_date = models.DateTimeField(null=True, blank=True)
last_appraisal_date = models.DateTimeField(null=True, blank=True)
last_accruals_date = models.DateTimeField(null=True, blank=True)
last_restucture_date = models.DateTimeField(null=True, blank=True)
date_created = models.DateTimeField(auto_now_add=True, blank=True)
last_modified_date =models.DateTimeField(auto_now=True, blank=True)
objects = LoanAccountQuerySet.as_manager()
class Meta:
verbose_name = "Loan Account"
verbose_name_plural = "Loan Accounts"
def __str__(self):
return "#%s: %s for %s" % (self.account_number, self.product, self.holder)
@property
def period_unit_interest_rate(self):
if self.product.period_unit == LoanProduct.PERIOD_UNIT_MONTHS:
return (self.interest_rate / D('12.0'))
elif self.product.period_unit == LoanProduct.PERIOD_UNIT_MONTHS:
return (self.interest_rate / D('360.0'))
@property
def monthly_interest_rate(self):
return self.period_unit_interest_rate()
@property
def daily_interest_rate(self):
return self.period_unit_interest_rate()
class LoanAccountExtraField(ExtraField):
account = models.ForeignKey('LoanAccount', related_name="account_extra_fields")
class Meta:
verbose_name = "Loan Account Custom Field"
verbose_name_plural = "Loan Account Custom Fields"
class Security(models.Model):
COLLATERAL_ASSET = 1
GUARANTOR_GROUP = 2
SECURITY_TYPES = (
(COLLATERAL_ASSET, 'Collateral Asset'),
(GUARANTOR_GROUP, 'Guarantor Group')
)
PENDING_APPROVAL = 1
APPROVED = 2
ACTIVE = 3
CLOSED = 4
ON_HOLD = 5
RECOVERED = 6
SECURITY_STATUS = (
(PENDING_APPROVAL, "Pending Approval"),
(APPROVED, "Approved"),
(ACTIVE, "Active"),
(CLOSED, "Closed"),
(ON_HOLD, "On Hold"),
(RECOVERED, "Recovered"),
)
LAND = 1
VEHICLE = 2
GOVERNMENT_BONDS = 3
MACHINERY = 4
GUARANTORS_SAVINGS = 5
BUILDING = 6
COLLATERAL_ASSET_TYPES = (
(LAND, "Land"),
(VEHICLE, "Vehicle"),
(GOVERNMENT_BONDS, "Government Bonds"),
(MACHINERY, "Machinery"),
(GUARANTORS_SAVINGS, "Guarantor's Savings"),
(BUILDING, "Building"),
)
#Name for Collateral Asset
name = models.CharField(max_length=50, null=True)
#Loan Account secured
loan_account = models.ForeignKey('LoanAccount', related_name="securities", null=True)
#Type of security, for matching data
security_type = models.IntegerField(choices=SECURITY_TYPES, default=GUARANTOR_GROUP)
collateral_type = models.IntegerField(default=GUARANTORS_SAVINGS, choices=COLLATERAL_ASSET_TYPES)
status = models.IntegerField(choices=SECURITY_STATUS, default=PENDING_APPROVAL)
#We can put here the amount we are to recover from this security if defaulted
salvage_value = models.DecimalField(decimal_places=2, max_digits=18, default=D('0.0'))
last_valuation_date = models.DateTimeField(null=True)
#group guaranting loan, this can be null if its a collateral asset
guarantor_group = models.ForeignKey('core.Group', null=True)
#Loan Amount recovered since disbursal
amount_recovered = models.DecimalField(decimal_places=2, max_digits=18, default=D('0.0'))
#Amount
identification = models.CharField(max_length=50, null=True)
place_of_issue = models.CharField(max_length=50, null=True)
#Timestamp
date_created = models.DateTimeField(auto_now_add=True, null=True)
date_modified = models.DateTimeField(auto_now=True, null=True)
class Meta:
verbose_name = "Loan Security"
verbose_name_plural = "Loan Securities"
unique_together = ('security_type', 'loan_account')
class LoanTransactionEntry(models.Model):
'''
We separate all the loan product transactions from the GL transactions
'''
CREDIT = 0
DEBIT = 1
ITEM_TYPE = (
(CREDIT, 'Credit'),
(DEBIT, 'Debit'),
)
PENDING_POSTING = 0
POSTED = 1
ON_HOLD = 2
STATUS = (
(PENDING_POSTING, 'Pending Posting'),
(POSTED, 'Posted'),
(ON_HOLD, 'On Hold'),
)
#metadata
transaction_id = models.UUIDField()
transaction_type = models.IntegerField(choices=TRANSACTION_TYPES, null=True)
account = models.ForeignKey('LoanAccount', null=True,
related_name="account_transactions", related_query_name="account_transaction")
amount = models.DecimalField(decimal_places=2, max_digits=18, default=D('0.0'))
approved_by = models.ForeignKey('Member', related_name='approvals_on_loan_accounts', null=True, blank=True)
sms_sent = models.BooleanField(default=False)
email_sent = models.BooleanField(default=False)
creation_date = models.DateTimeField(auto_now_add=True)
#Transaction Data
item_type = models.IntegerField(choices=ITEM_TYPE)
status = models.IntegerField(default=0, choices=STATUS)
gl_account_code = models.CharField("Loan Ledger Account", max_length=20, null=True, choices=LOAN_CHART_OF_ACCOUNTS)
ledger_balance_increment = models.DecimalField(decimal_places=2, max_digits=18, default=D('0.0'))
posting_date = models.DateTimeField(null=True, blank=True)
objects = LoanTransactionEntryQuerySet.as_manager()
class Meta:
verbose_name = "Loan Transaction Entry"
verbose_name_plural = "Loan Transaction Entries"
def __str__(self):
return "%s transaction, %s item on GL account #%s" % (
self.get_transaction_type_display(),
self.get_item_type_display(),
self.gl_account_code
)
| apache-2.0 | 418,958,189,437,914,500 | 34.723112 | 119 | 0.671386 | false |
dloureiro/sf-crm-utils | sf-crm-addDocument.py | 1 | 3153 | #!/usr/bin/env python
#
# sf-crm-addDocument.py
# Copyright (C) <year> <name of author>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sugarcrm
import xlwt
import datetime
import os.path
import json
import pprint
from optparse import OptionParser
import base64
VERSION = "1.0"
def main():
parser = OptionParser(usage="usage: sf-crm-addDocument filepath",
version="sf-crm-addDocument " + VERSION)
(options, args) = parser.parse_args()
document = args[0]
fileName = os.path.basename(document)
path = os.path.dirname(document)
ofCode = os.path.basename(path)
#print "ofCode " + ofCode
#print "filename " + fileName
home = os.path.expanduser("~")
configFile=os.path.join(home,".sugar2xls.config")
if not os.path.isfile(configFile) :
print "Error : config file " + configFile + " does not exist"
exit()
config = json.load(open(configFile,"r"))
pp = pprint.PrettyPrinter(indent=4)
#pp.pprint(config)
# This is the URL for the v4 REST API in your SugarCRM server.
url = config["url"]
username = config["username"]
password = config["password"]
# This way you log-in to your SugarCRM instance.
conn = sugarcrm.Sugarcrm(url, username, password)
#data = conn.get_module_fields("Revisions")
#pp.pprint(data)
#exit()
print "Recuperation de l'opportunite correspondant a l'OF " + ofCode
#pp.pprint(aDocument)
query = conn.modules["Opportunities"].query()
opp = query.filter(of_code_c__contains=ofCode)
opportunity = opp[0]
# This new query has a filter. Please notice that the filter parameter is the
# field name in the SugarCRM module, followed by a double underscore, and then
# an operator (it can be 'exact', 'contains', 'gt', 'gte', 'lt', 'lte' or 'in').
query = conn.modules['Documents'].query()
print "Creation d'un document"
aDocument = sugarcrm.SugarEntry(conn.modules["Documents"])
aDocument["name"] = fileName
aDocument["filename"]=fileName
aDocument["document_name"]=fileName
aDocument.save()
print aDocument["id"]
with open(document) as f:
encoded = base64.b64encode(f.read())
print "encoded : " + encoded
conn.set_document_revision({"id": aDocument["id"],
"revision":1,
"filename":fileName + "-test",
"file":encoded})
print "Liaison entre le doc cree et "+ opportunity["name"]
aDocument.relate(opportunity)
if __name__ == '__main__':
main()
| gpl-3.0 | -5,350,635,180,503,216,000 | 28.194444 | 85 | 0.664129 | false |
nikoladimitroff/Zmey | Tools/BlenderPlugins/scripts/addons/io_scene_gltf2/gltf2_get.py | 1 | 11682 | # Copyright (c) 2017 The Khronos Group Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Imports
#
import bpy
import os
from .gltf2_debug import *
#
# Globals
#
#
# Functions
#
def get_used_materials():
"""
Gathers and returns all unfiltered, valid Blender materials.
"""
materials = []
for blender_material in bpy.data.materials:
if blender_material.node_tree and blender_material.use_nodes:
for currentNode in blender_material.node_tree.nodes:
if isinstance(currentNode, bpy.types.ShaderNodeGroup):
if currentNode.node_tree.name.startswith('glTF Metallic Roughness'):
materials.append(blender_material)
elif currentNode.node_tree.name.startswith('glTF Specular Glossiness'):
materials.append(blender_material)
else:
materials.append(blender_material)
return materials
def get_material_requires_texcoords(glTF, index):
"""
Query function, if a material "needs" texture cooridnates. This is the case, if a texture is present and used.
"""
if glTF.get('materials') is None:
return False
materials = glTF['materials']
if index < 0 or index >= len(materials):
return False
material = materials[index]
# General
if material.get('emissiveTexture') is not None:
return True
if material.get('normalTexture') is not None:
return True
if material.get('occlusionTexture') is not None:
return True
# Metallic roughness
if material.get('baseColorTexture') is not None:
return True
if material.get('metallicRoughnessTexture') is not None:
return True
# Specular glossiness
if material.get('diffuseTexture') is not None:
return True
if material.get('specularGlossinessTexture') is not None:
return True
# Common Material
if material.get('diffuseTexture') is not None:
return True
if material.get('specularTexture') is not None:
return True
if material.get('shininessTexture') is not None:
return True
if material.get('ambientTexture') is not None:
return True
return False
def get_material_requires_normals(glTF, index):
"""
Query function, if a material "needs" normals. This is the case, if a texture is present and used.
At point of writing, same function as for texture coordinates.
"""
return get_material_requires_texcoords(glTF, index)
def get_image_index(export_settings, uri):
"""
Return the image index in the glTF array.
"""
if export_settings['gltf_uri'] is None:
return -1
if uri in export_settings['gltf_uri']:
return export_settings['gltf_uri'].index(uri)
return -1
def get_texture_index_by_filepath(export_settings, glTF, filepath):
"""
Return the texture index in the glTF array by a given filepath.
"""
if filepath is None:
return -1
uri = get_uri(filepath)
if export_settings['gltf_uri'] is None:
return -1
if glTF.get('textures') is None:
return -1
image_uri = export_settings['gltf_uri']
index = 0
for texture in glTF['textures']:
current_image_uri = image_uri[texture['source']]
if current_image_uri == uri:
return index
index += 1
return -1
def get_texture_index(export_settings, glTF, name, shader_node_group):
"""
Return the texture index in the glTF array.
"""
if shader_node_group is None:
return -1
if not isinstance(shader_node_group, bpy.types.ShaderNodeGroup):
return -1
if shader_node_group.inputs.get(name) is None:
return -1
if len(shader_node_group.inputs[name].links) == 0:
return -1
from_node = shader_node_group.inputs[name].links[0].from_node
#
if not isinstance(from_node, bpy.types.ShaderNodeTexImage):
return -1
if from_node.image is None or from_node.image.size[0] == 0 or from_node.image.size[1] == 0:
return -1
return get_texture_index_by_filepath(export_settings, glTF, from_node.image.filepath)
def get_texcoord_index(glTF, name, shader_node_group):
"""
Return the texture coordinate index, if assigend and used.
"""
if shader_node_group is None:
return 0
if not isinstance(shader_node_group, bpy.types.ShaderNodeGroup):
return 0
if shader_node_group.inputs.get(name) is None:
return 0
if len(shader_node_group.inputs[name].links) == 0:
return 0
from_node = shader_node_group.inputs[name].links[0].from_node
#
if not isinstance(from_node, bpy.types.ShaderNodeTexImage):
return 0
#
if len(from_node.inputs['Vector'].links) == 0:
return 0
input_node = from_node.inputs['Vector'].links[0].from_node
if not isinstance(input_node, bpy.types.ShaderNodeUVMap):
return 0
if input_node.uv_map == '':
return 0
#
# Try to gather map index.
for blender_mesh in bpy.data.meshes:
texCoordIndex = blender_mesh.uv_textures.find(input_node.uv_map)
if texCoordIndex >= 0:
return texCoordIndex
return 0
def get_material_index(glTF, name):
"""
Return the material index in the glTF array.
"""
if name is None:
return -1
if glTF.get('materials') is None:
return -1
index = 0
for material in glTF['materials']:
if material['name'] == name:
return index
index += 1
return -1
def get_mesh_index(glTF, name):
"""
Return the mesh index in the glTF array.
"""
if glTF.get('meshes') is None:
return -1
index = 0
for mesh in glTF['meshes']:
if mesh['name'] == name:
return index
index += 1
return -1
def get_skin_index(glTF, name, index_offset):
"""
Return the skin index in the glTF array.
"""
if glTF.get('skins') is None:
return -1
skeleton = get_node_index(glTF, name)
index = 0
for skin in glTF['skins']:
if skin['skeleton'] == skeleton:
return index + index_offset
index += 1
return -1
def get_camera_index(glTF, name):
"""
Return the camera index in the glTF array.
"""
if glTF.get('cameras') is None:
return -1
index = 0
for camera in glTF['cameras']:
if camera['name'] == name:
return index
index += 1
return -1
def get_light_index_cmn(glTF, name):
"""
Return the light index in the glTF array.
"""
if glTF.get('extensions') is None:
return -1
extensions = glTF['extensions']
if extensions.get('KHR_lights_cmn') is None:
return -1
khr_lights_cmn = extensions['KHR_lights_cmn']
if khr_lights_cmn.get('lights') is None:
return -1
lights = khr_lights_cmn['lights']
index = 0
for light in lights:
if light['name'] == name:
return index
index += 1
return -1
def get_light_index_pbr(glTF, name):
"""
Return the light index in the glTF array.
"""
if glTF.get('extensions') is None:
return -1
extensions = glTF['extensions']
if extensions.get('KHR_lights_pbr') is None:
return -1
khr_lights_pbr = extensions['KHR_lights_pbr']
if khr_lights_pbr.get('lights') is None:
return -1
lights = khr_lights_pbr['lights']
index = 0
for light in lights:
if light['name'] == name:
return index
index += 1
return -1
def get_node_index(glTF, name):
"""
Return the node index in the glTF array.
"""
if glTF.get('nodes') is None:
return -1
index = 0
for node in glTF['nodes']:
if node['name'] == name:
return index
index += 1
return -1
def get_scene_index(glTF, name):
"""
Return the scene index in the glTF array.
"""
if glTF.get('scenes') is None:
return -1
index = 0
for scene in glTF['scenes']:
if scene['name'] == name:
return index
index += 1
return -1
def get_uri(filepath):
"""
Return the final PNG uri depending on a filepath.
"""
return os.path.splitext(bpy.path.basename(filepath))[0] + '.png'
def get_node(data_path):
"""
Return Blender node on a given Blender data path.
"""
if data_path is None:
return None
index = data_path.find("[\"")
if (index == -1):
return None
node_name = data_path[(index + 2):]
index = node_name.find("\"")
if (index == -1):
return None
return node_name[:(index)]
def get_data_path(data_path):
"""
Return Blender data path.
"""
index = data_path.rfind('.')
if index == -1:
return data_path
return data_path[(index + 1):]
def get_scalar(default_value, init_value = 0.0):
"""
Return scalar with a given default/fallback value.
"""
return_value = init_value
if default_value is None:
return return_value
return_value = default_value
return return_value
def get_vec2(default_value, init_value = [0.0, 0.0]):
"""
Return vec2 with a given default/fallback value.
"""
return_value = init_value
if default_value is None or len(default_value) < 2:
return return_value
index = 0
for number in default_value:
return_value[index] = number
index += 1
if index == 2:
return return_value
return return_value
def get_vec3(default_value, init_value = [0.0, 0.0, 0.0]):
"""
Return vec3 with a given default/fallback value.
"""
return_value = init_value
if default_value is None or len(default_value) < 3:
return return_value
index = 0
for number in default_value:
return_value[index] = number
index += 1
if index == 3:
return return_value
return return_value
def get_vec4(default_value, init_value = [0.0, 0.0, 0.0, 1.0]):
"""
Return vec4 with a given default/fallback value.
"""
return_value = init_value
if default_value is None or len(default_value) < 4:
return return_value
index = 0
for number in default_value:
return_value[index] = number
index += 1
if index == 4:
return return_value
return return_value
def get_index(list, name):
"""
Return index of a glTF element by a given name.
"""
if list is None or name is None:
return -1
index = 0
for element in list:
if element.get('name') is None:
return -1
if element['name'] == name:
return index
index += 1
return -1
| mit | 986,731,978,539,314,600 | 20.086643 | 114 | 0.586715 | false |
chadspratt/AveryDB | join.py | 1 | 1126 | """Contains a simple class for storing join configurations."""
##
# Copyright 2013 Chad Spratt
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
class Join(object):
"""A struct for storing a join definition."""
def __init__(self, joinalias, jointable, joinfield,
targetalias, targettable, targetfield,
inner=False):
self.joinalias = joinalias
self.jointable = jointable
self.joinfield = joinfield
self.targetalias = targetalias
self.targettable = targettable
self.targetfield = targetfield
self.inner = inner
| apache-2.0 | -2,738,151,809,143,295,000 | 37.827586 | 76 | 0.686501 | false |
ningirsu/stepmania-server | smserver/resources/user_resource.py | 1 | 3748 | """ User resources """
import hashlib
import sqlalchemy
from smserver import exceptions
from smserver import models
from smserver.resources import base
class UserResource(base.BaseResource):
""" User class resource """
MIN_PASSWORD_LENGTH = 8
def create(self, name, password, email=None, rank=1):
""" Create a new user """
if len(password) < self.MIN_PASSWORD_LENGTH:
raise exceptions.ValidationError(self.token, "Password too short")
if rank > 1 and rank > self.connection.level():
raise exceptions.Unauthorized(
self.token,
"Unauthorized to create user with rank %s" % rank
)
password = hashlib.sha256(password.encode('utf-8')).hexdigest()
user = models.User(
name=name,
password=password,
email=email,
rank=rank
)
try:
self.session.add(user)
self.session.commit()
except sqlalchemy.exc.IntegrityError:
self.session.rollback()
raise exceptions.ValidationError(self.token, "User already exist")
self.log.info("%s create user %s", self.token, user.id)
return user
def login(self, name, password):
""" Login a new user in our database """
if len(password) < self.MIN_PASSWORD_LENGTH:
raise exceptions.Forbidden(self.token, "Invalid name or password")
password = hashlib.sha256(password.encode('utf-8')).hexdigest()
user = (self.session
.query(models.User)
.filter_by(name=name, password=password)
.one_or_none())
if not user:
raise exceptions.Forbidden(self.token, "Invalid name or password")
if models.Ban.is_ban(self.session, user_id=user.id):
raise exceptions.Forbidden(self.token, "User ban from this server")
self.log.info("%s successfully login user %s", self.token, name)
return user
def login_or_create(self, name, password):
""" Login or create a user if the name don't exists """
if len(password) < self.MIN_PASSWORD_LENGTH:
raise exceptions.Forbidden(self.token, "Invalid name or password")
user = (self.session
.query(models.User)
.filter_by(name=name)
.one_or_none())
if not user:
return self.create(name, password)
return self.login(name, password)
def connect(self, user, pos=0):
""" Connect the user in the given position """
if user.online and user not in self.connection.users:
raise exceptions.Unauthorized(self.token, "User already online")
nb_onlines = models.User.nb_onlines(self.session)
max_users = self.serv.config.server.get("max_users", -1)
if max_users > 0 and nb_onlines >= max_users:
raise exceptions.Unauthorized(
self.token,
"nb max users reaches (%s/%s)" % (nb_onlines, max_users)
)
for online_user in self.connection.active_users:
if online_user.pos == pos and online_user != user:
self.log.info("%s log out user %s", self.token, online_user.name)
online_user.pos = None
online_user.online = False
user.pos = pos
user.online = True
user.connection = self.connection
user.room_id = self.connection.room_id
user.last_ip = self.connection.ip
user.client_name = self.connection.client_name
user.client_version = self.connection.client_version
self.serv.send_sd_running_status(session=self.session)
return user
| mit | -8,784,744,963,530,073,000 | 31.034188 | 81 | 0.592316 | false |
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_07_01/aio/operations/_usages_operations.py | 1 | 5201 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class UsagesOperations:
"""UsagesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
location: str,
**kwargs
) -> AsyncIterable["_models.UsagesListResult"]:
"""List network usages for a subscription.
:param location: The location where resource usage is queried.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either UsagesListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_07_01.models.UsagesListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.UsagesListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str', pattern=r'^[-\w\._ ]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('UsagesListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/usages'} # type: ignore
| mit | 6,820,042,527,769,866,000 | 45.4375 | 134 | 0.637954 | false |
FederatedAI/FATE | examples/dsl/v1/experiment/generate_mock_data.py | 1 | 6367 | import random
import sys
import numpy as np
SAMPLE_NUM = 10000
# For sparse data, it means how many non-zero features in one sample.
# The total possible feature num depends on your tag interval below.
FEATURE_NUM = 20
TAG_INTERVAL = (2019120799, 2019121299)
VALUE_INTERVAL = (0, 10000)
# SAVE_FILE_NAME = DATA_TYPE + "_" + str(SAMPLE_NUM) + "_" + str(FEATURE_NUM) + ".csv"
def generate_tag_1_data(ids):
if len(ids) != SAMPLE_NUM:
raise ValueError("len ids should equal to sample number")
counter = 0
for sample_i in range(SAMPLE_NUM):
one_data = [ids[sample_i]]
valid_set = [x for x in range(TAG_INTERVAL[0], TAG_INTERVAL[1])]
features = np.random.choice(valid_set, FEATURE_NUM, replace=False)
one_data += [":".join([x, "1.0"]) for x in features]
counter += 1
if counter % 10000 == 0:
print("generate data {}".format(counter))
yield one_data
def generate_str_data(ids):
if len(ids) != SAMPLE_NUM:
raise ValueError("len ids should equal to sample number")
header = ['id', 'y'] + ['x' + str(i) for i in range(FEATURE_NUM)]
yield header
counter = 0
for sample_i in range(SAMPLE_NUM):
valid_set = ["test" + str(x) for x in range(0, FEATURE_NUM)]
features = np.random.choice(valid_set, FEATURE_NUM, replace=False)
one_data = [ids[sample_i], round(random.random())] + list(features)
counter += 1
if counter % 10000 == 0:
print("generate data {}".format(counter))
yield one_data
def generate_tag_float_value_data(ids):
if len(ids) != SAMPLE_NUM:
raise ValueError("len ids should equal to sample number")
counter = 0
for sample_i in range(SAMPLE_NUM):
one_data = [ids[sample_i]]
valid_set = [x for x in range(TAG_INTERVAL[0], TAG_INTERVAL[1])]
tags = np.random.choice(valid_set, FEATURE_NUM, replace=False)
values = 100 * np.random.random(FEATURE_NUM)
one_data += [":".join([str(tags[i]), str(round(values[i], 2))]) for i in range(FEATURE_NUM)]
counter += 1
if counter % 10000 == 0:
print("generate data {}".format(counter))
yield one_data
def generate_tag_data(ids):
if len(ids) != SAMPLE_NUM:
raise ValueError("len ids should equal to sample number")
counter = 0
for sample_i in range(SAMPLE_NUM):
one_data = [ids[sample_i]]
valid_set = [x for x in range(TAG_INTERVAL[0], TAG_INTERVAL[1])]
tags = np.random.choice(valid_set, FEATURE_NUM, replace=False)
one_data += [str(tag) for tag in tags]
counter += 1
if counter % 10000 == 0:
print("generate data {}".format(counter))
yield one_data
def generate_tag_value_data(ids):
if len(ids) != SAMPLE_NUM:
raise ValueError("len ids should equal to sample number")
counter = 0
for sample_i in range(SAMPLE_NUM):
one_data = [ids[sample_i]]
for feature_i in range(FEATURE_NUM):
tag = str(random.randint(TAG_INTERVAL[0], TAG_INTERVAL[1]))
value = str(random.randint(VALUE_INTERVAL[0], VALUE_INTERVAL[1]))
tag_value = ":".join([tag, value])
one_data.append(tag_value)
counter += 1
if counter % 10000 == 0:
print("generate data {}".format(counter))
yield one_data
def generate_label_data(ids):
if len(ids) != SAMPLE_NUM:
raise ValueError("len ids should equal to sample number")
header = ['id', 'y'] + ['x' + str(i) for i in range(FEATURE_NUM)]
yield header
counter = 0
for sample_i in range(SAMPLE_NUM):
one_data = [ids[sample_i], round(random.random())] + list(np.random.random(FEATURE_NUM))
counter += 1
if counter % 10000 == 0:
print("generate data {}".format(counter))
yield one_data
def generate_non_label_data(ids):
if len(ids) != SAMPLE_NUM:
raise ValueError("len ids should equal to sample number")
header = ['id'] + ['x' + str(i) for i in range(FEATURE_NUM)]
yield header
counter = 0
for sample_i in range(SAMPLE_NUM):
one_data = [ids[sample_i]] + list(np.random.random(FEATURE_NUM))
counter += 1
if counter % 10000 == 0:
print("generate data {}".format(counter))
yield one_data
def read_file(file, has_header=False):
header = None
data = []
with open(file, "r") as fin:
if has_header:
header = fin.readline().replace('\n', '')
line = fin.readline()
while True:
split_line = line.replace("\n", '').split(",")
data.append(split_line)
line = fin.readline()
if not line:
break
return header, data
def save_file(file, data, header=None, delimitor=','):
with open(file, 'w') as fout:
if header:
fout.write("".join([header, '\n']))
for d in data:
d = list(map(str, d))
fout.write(d[0] + ',' + delimitor.join(d[1:]) + "\n")
if __name__ == "__main__":
# ids = [_data[0] for _data in ids_data]
DATA_TYPE = sys.argv[1]
role = sys.argv[2]
SAVE_FILE_NAME = "generated_data_{}.csv".format(role)
ids = [x for x in range(SAMPLE_NUM)]
if DATA_TYPE == 'tag_1':
new_data = generate_tag_1_data(ids)
save_file(SAVE_FILE_NAME, new_data, delimitor=',')
if DATA_TYPE == 'tag_float_value':
new_data = generate_tag_float_value_data(ids)
save_file(SAVE_FILE_NAME, new_data, delimitor=';')
if DATA_TYPE == 'tag':
new_data = generate_tag_data(ids)
save_file(SAVE_FILE_NAME, new_data, delimitor=';')
if DATA_TYPE == 'tag_integer_value':
new_data = generate_tag_value_data(ids)
save_file(SAVE_FILE_NAME, new_data, delimitor=',')
if DATA_TYPE == 'label':
new_data = generate_label_data(ids)
save_file(SAVE_FILE_NAME, new_data, delimitor=',')
if DATA_TYPE == 'non_label':
new_data = generate_non_label_data(ids)
save_file(SAVE_FILE_NAME, new_data, delimitor=',')
if DATA_TYPE == 'str':
new_data = generate_str_data(ids)
save_file(SAVE_FILE_NAME, new_data, delimitor=',')
print("finish generate data , save data in {}".format(SAVE_FILE_NAME))
| apache-2.0 | 7,247,381,461,808,039,000 | 29.610577 | 100 | 0.580179 | false |
wangjun/python-pinyin | pypinyin/__init__.py | 1 | 14031 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""汉语拼音转换工具."""
from __future__ import unicode_literals
from copy import deepcopy
from itertools import chain
import os
import re
import sys
from . import phonetic_symbol, pinyin_dict
__title__ = 'pypinyin'
__version__ = '0.9.0'
__author__ = 'mozillazg, 闲耘'
__license__ = 'MIT'
__copyright__ = 'Copyright (c) 2014 mozillazg, 闲耘'
__all__ = [
'pinyin', 'lazy_pinyin', 'slug',
'STYLE_NORMAL', 'NORMAL',
'STYLE_TONE', 'TONE',
'STYLE_TONE2', 'TONE2',
'STYLE_INITIALS', 'INITIALS',
'STYLE_FINALS', 'FINALS',
'STYLE_FINALS_TONE', 'FINALS_TONE',
'STYLE_FINALS_TONE2', 'FINALS_TONE2',
'STYLE_FIRST_LETTER', 'FIRST_LETTER'
]
# fix "TypeError: Item in ``from list'' not a string" in Python 2
__all__ = [str(x) for x in __all__]
PY2 = sys.version_info < (3, 0)
if not PY2:
unicode = str
str = bytes
callable = lambda x: getattr(x, '__call__', None)
# 词语拼音库
if os.environ.get('PYPINYIN_NO_PHRASES'):
PHRASES_DICT = {}
else:
from . import phrases_dict
PHRASES_DICT = phrases_dict.phrases_dict.copy()
# 单字拼音库
PINYIN_DICT = pinyin_dict.pinyin_dict.copy()
# 声母表
_INITIALS = 'b,p,m,f,d,t,n,l,g,k,h,j,q,x,zh,ch,sh,r,z,c,s'.split(',')
# 带声调字符与使用数字标识的字符的对应关系,类似: {u'ā': 'a1'}
PHONETIC_SYMBOL = phonetic_symbol.phonetic_symbol.copy()
# 所有的带声调字符
re_phonetic_symbol_source = ''.join(PHONETIC_SYMBOL.keys())
# 匹配带声调字符的正则表达式
RE_PHONETIC_SYMBOL = r'[' + re.escape(re_phonetic_symbol_source) + r']'
# 匹配使用数字标识声调的字符的正则表达式
RE_TONE2 = r'([aeoiuvnm])([0-4])$'
# 有拼音的汉字
RE_HANS = re.compile(r'^(?:[\u3400-\u4dbf\u4e00-\u9fff\uf900-\ufaff])+$')
# 没有拼音的字符
RE_NONE_HANS = re.compile(r'^(?:[^\u3400-\u4dbf\u4e00-\u9fff\uf900-\ufaff])+$')
# 拼音风格
PINYIN_STYLE = {
'NORMAL': 0, # 普通风格,不带声调
'TONE': 1, # 标准风格,声调在韵母的第一个字母上
'TONE2': 2, # 声调在拼音之后,使用数字 1~4 标识
'INITIALS': 3, # 仅保留声母部分
'FIRST_LETTER': 4, # 仅保留首字母
'FINALS': 5, # 仅保留韵母部分,不带声调
'FINALS_TONE': 6, # 仅保留韵母部分,带声调
'FINALS_TONE2': 7, # 仅保留韵母部分,声调在拼音之后,使用数字 1~4 标识
}
# 普通风格,不带声调
NORMAL = STYLE_NORMAL = PINYIN_STYLE['NORMAL']
# 标准风格,声调在韵母的第一个字母上
TONE = STYLE_TONE = PINYIN_STYLE['TONE']
# 声调在拼音之后,使用数字 1~4 标识
TONE2 = STYLE_TONE2 = PINYIN_STYLE['TONE2']
# 仅保留声母部分
INITIALS = STYLE_INITIALS = PINYIN_STYLE['INITIALS']
# 仅保留首字母
FIRST_LETTER = STYLE_FIRST_LETTER = PINYIN_STYLE['FIRST_LETTER']
# 仅保留韵母部分,不带声调
FINALS = STYLE_FINALS = PINYIN_STYLE['FINALS']
# 仅保留韵母部分,带声调
FINALS_TONE = STYLE_FINALS_TONE = PINYIN_STYLE['FINALS_TONE']
# 仅保留韵母部分,声调在拼音之后,使用数字 1~4 标识
FINALS_TONE2 = STYLE_FINALS_TONE2 = PINYIN_STYLE['FINALS_TONE2']
def _seg(chars):
"""按是否是汉字进行分词"""
s = '' # 保存一个词
ret = [] # 分词结果
flag = 0 # 上一个字符是什么? 0: 汉字, 1: 不是汉字
for n, c in enumerate(chars):
if RE_HANS.match(c): # 汉字, 确定 flag 的初始值
if n == 0: # 第一个字符
flag = 0
if flag == 0:
s += c
else: # 上一个字符不是汉字, 分词
ret.append(s)
flag = 0
s = c
else: # 不是汉字
if n == 0: # 第一个字符, 确定 flag 的初始值
flag = 1
if flag == 1:
s += c
else: # 上一个字符是汉字, 分词
ret.append(s)
flag = 1
s = c
ret.append(s) # 最后的词
return ret
def simple_seg(hans):
'将传入的字符串按是否有拼音来分割'
assert not isinstance(hans, str), \
'must be unicode string or [unicode, ...] list'
if isinstance(hans, unicode):
return _seg(hans)
else:
hans = list(hans)
if len(hans) == 1:
return simple_seg(hans[0])
return list(chain(*[simple_seg(x) for x in hans]))
def seg(hans):
if getattr(seg, 'no_jieba', None):
ret = hans
return simple_seg(ret)
if seg.jieba is None:
try:
import jieba
seg.jieba = jieba
except ImportError:
seg.no_jieba = True
return seg(hans)
else:
hans = simple_seg(hans)
ret = []
for x in hans:
if RE_NONE_HANS.match(x): # 没有拼音的字符,不再参与二次分词
ret.append(x)
else:
ret.extend(list(seg.jieba.cut(x)))
return ret
seg.jieba = None
if os.environ.get('PYPINYIN_NO_JIEBA'):
seg.no_jieba = True
def load_single_dict(pinyin_dict):
"""载入用户自定义的单字拼音库
:param pinyin_dict: 单字拼音库。比如: ``{0x963F: u"ā,ē"}``
:type pinyin_dict: dict
"""
PINYIN_DICT.update(pinyin_dict)
def load_phrases_dict(phrases_dict):
"""载入用户自定义的词语拼音库
:param phrases_dict: 词语拼音库。比如: ``{u"阿爸": [[u"ā"], [u"bà"]]}``
:type phrases_dict: dict
"""
PHRASES_DICT.update(phrases_dict)
def initial(pinyin):
"""获取单个拼音中的声母.
:param pinyin: 单个拼音
:type pinyin: unicode
:return: 声母
:rtype: unicode
"""
for i in _INITIALS:
if pinyin.startswith(i):
return i
return ''
def final(pinyin):
"""获取单个拼音中的韵母.
:param pinyin: 单个拼音
:type pinyin: unicode
:return: 韵母
:rtype: unicode
"""
initial_ = initial(pinyin) or None
if not initial_:
return pinyin
return ''.join(pinyin.split(initial_, 1))
def toFixed(pinyin, style):
"""根据拼音风格格式化带声调的拼音.
:param pinyin: 单个拼音
:param style: 拼音风格
:return: 根据拼音风格格式化后的拼音字符串
:rtype: unicode
"""
# 声母
if style == INITIALS:
return initial(pinyin)
def _replace(m):
symbol = m.group(0) # 带声调的字符
# 不包含声调
if style in [NORMAL, FIRST_LETTER, FINALS]:
# 去掉声调: a1 -> a
return re.sub(RE_TONE2, r'\1', PHONETIC_SYMBOL[symbol])
# 使用数字标识声调
elif style in [TONE2, FINALS_TONE2]:
# 返回使用数字标识声调的字符
return PHONETIC_SYMBOL[symbol]
# 声调在头上
else:
return symbol
# 替换拼音中的带声调字符
py = re.sub(RE_PHONETIC_SYMBOL, _replace, pinyin)
# 首字母
if style == FIRST_LETTER:
py = py[0]
# 韵母
elif style in [FINALS, FINALS_TONE, FINALS_TONE2]:
py = final(py)
return py
def _handle_nopinyin_char(chars, errors='default'):
"""处理没有拼音的字符"""
if callable(errors):
return errors(chars)
if errors == 'default':
return chars
elif errors == 'ignore':
return None
elif errors == 'replace':
if len(chars) > 1:
return ''.join(unicode('%x' % ord(x)) for x in chars)
else:
return unicode('%x' % ord(chars))
def handle_nopinyin(chars, errors='default'):
py = _handle_nopinyin_char(chars, errors=errors)
if not py:
return []
if isinstance(py, list):
return py
else:
return [py]
def single_pinyin(han, style, heteronym, errors='default'):
"""单字拼音转换.
:param han: 单个汉字
:param errors: 指定如何处理没有拼音的字符,详情请参考
:py:func:`~pypinyin.pinyin`
:return: 返回拼音列表,多音字会有多个拼音项
:rtype: list
"""
num = ord(han)
# 处理没有拼音的字符
if num not in PINYIN_DICT:
return handle_nopinyin(han, errors=errors)
pys = PINYIN_DICT[num].split(",") # 字的拼音列表
if not heteronym:
return [toFixed(pys[0], style)]
# 输出多音字的多个读音
# 临时存储已存在的拼音,避免多音字拼音转换为非音标风格出现重复。
py_cached = {}
pinyins = []
for i in pys:
py = toFixed(i, style)
if py in py_cached:
continue
py_cached[py] = py
pinyins.append(py)
return pinyins
def phrases_pinyin(phrases, style, heteronym, errors='default'):
"""词语拼音转换.
:param phrases: 词语
:param errors: 指定如何处理没有拼音的字符
:return: 拼音列表
:rtype: list
"""
py = []
if phrases in PHRASES_DICT:
py = deepcopy(PHRASES_DICT[phrases])
for idx, item in enumerate(py):
py[idx] = [toFixed(item[0], style=style)]
else:
for i in phrases:
single = single_pinyin(i, style=style, heteronym=heteronym,
errors=errors)
if single:
py.append(single)
return py
def _pinyin(words, style, heteronym, errors):
pys = []
# 初步过滤没有拼音的字符
if RE_HANS.match(words):
pys = phrases_pinyin(words, style=style, heteronym=heteronym,
errors=errors)
return pys
for word in simple_seg(words):
if not (RE_HANS.match(word)):
py = handle_nopinyin(word, errors=errors)
pys.append(py) if py else None
else:
pys.extend(_pinyin(word, style, heteronym, errors))
return pys
def pinyin(hans, style=TONE, heteronym=False, errors='default'):
"""将汉字转换为拼音.
:param hans: 汉字字符串( ``u'你好吗'`` )或列表( ``[u'你好', u'吗']`` ).
如果用户安装了 ``jieba`` , 将使用 ``jieba`` 对字符串进行
分词处理。可以通过传入列表的方式禁用这种行为。
也可以使用自己喜爱的分词模块对字符串进行分词处理,
只需将经过分词处理的字符串列表传进来就可以了。
:type hans: unicode 字符串或字符串列表
:param style: 指定拼音风格
:param errors: 指定如何处理没有拼音的字符
* ``'default'``: 保留原始字符
* ``'ignore'``: 忽略该字符
* ``'replace'``: 替换为去掉 ``\\u`` 的 unicode 编码字符串
(``u'\\u90aa'`` => ``u'90aa'``)
* callable 对象: 回调函数之类的可调用对象。如果 ``erros``
参数 的值是个可调用对象,那么程序会回调这个函数:
``func(char)``::
def foobar(char):
return 'a'
pinyin(u'あ', errors=foobar)
:param heteronym: 是否启用多音字
:return: 拼音列表
:rtype: list
Usage::
>>> from pypinyin import pinyin
>>> import pypinyin
>>> pinyin(u'中心')
[[u'zh\u014dng'], [u'x\u012bn']]
>>> pinyin(u'中心', heteronym=True) # 启用多音字模式
[[u'zh\u014dng', u'zh\xf2ng'], [u'x\u012bn']]
>>> pinyin(u'中心', style=pypinyin.INITIALS) # 设置拼音风格
[[u'zh'], [u'x']]
>>> pinyin(u'中心', style=pypinyin.TONE2)
[[u'zho1ng'], [u'xi1n']]
"""
# 对字符串进行分词处理
if isinstance(hans, unicode):
hans = seg(hans)
pys = []
for words in hans:
pys.extend(_pinyin(words, style, heteronym, errors))
return pys
def slug(hans, style=NORMAL, heteronym=False, separator='-', errors='default'):
"""生成 slug 字符串.
:param hans: 汉字
:type hans: unicode or list
:param style: 指定拼音风格
:param heteronym: 是否启用多音字
:param separstor: 两个拼音间的分隔符/连接符
:param errors: 指定如何处理没有拼音的字符,详情请参考
:py:func:`~pypinyin.pinyin`
:return: slug 字符串.
::
>>> import pypinyin
>>> pypinyin.slug(u'中国人')
u'zhong-guo-ren'
>>> pypinyin.slug(u'中国人', separator=u' ')
u'zhong guo ren'
>>> pypinyin.slug(u'中国人', style=pypinyin.INITIALS)
u'zh-g-r'
"""
return separator.join(chain(*pinyin(hans, style=style, heteronym=heteronym,
errors=errors)
))
def lazy_pinyin(hans, style=NORMAL, errors='default'):
"""不包含多音字的拼音列表.
与 :py:func:`~pypinyin.pinyin` 的区别是返回的拼音是个字符串,
并且每个字只包含一个读音.
:param hans: 汉字
:type hans: unicode or list
:param style: 指定拼音风格
:param errors: 指定如何处理没有拼音的字符,详情请参考
:py:func:`~pypinyin.pinyin`
:return: 拼音列表(e.g. ``['zhong', 'guo', 'ren']``)
:rtype: list
Usage::
>>> from pypinyin import lazy_pinyin
>>> import pypinyin
>>> lazy_pinyin(u'中心')
[u'zhong', u'xin']
>>> lazy_pinyin(u'中心', style=pypinyin.TONE)
[u'zh\u014dng', u'x\u012bn']
>>> lazy_pinyin(u'中心', style=pypinyin.INITIALS)
[u'zh', u'x']
>>> lazy_pinyin(u'中心', style=pypinyin.TONE2)
[u'zho1ng', u'xi1n']
"""
return list(chain(*pinyin(hans, style=style, heteronym=False,
errors=errors)))
| mit | -516,029,625,758,082,300 | 24.881319 | 79 | 0.549762 | false |
markrobinson85/odoo-pos-promotions | pos_promotions/promotion_model.py | 1 | 2597 | # -*- coding: utf-8 -*-
from openerp import models, fields, api
from openerp.osv import osv
class Promotion(models.Model):
#_inherit = "account.analytic.account"
_name = "pos.promotion"
_description = "Promotional Rule"
_inherit = "mail.thread"
name = fields.Char("Promotion Name", required=True)
notes = fields.Text()
locations = fields.Many2many("pos.config", string="Locations", track_visibility="onchange")
active = fields.Boolean("Active", track_visibility="onchange")
coupon_code = fields.Char("Coupon Code", required=False, track_visibility="onchange")
date_start = fields.Date("Start Date", default=fields.Date.today, track_visibility="onchange")
date_end = fields.Date("End Date", track_visibility="onchange")
priority = fields.Integer("Priority", track_visibility="onchange")
discount_type = fields.Selection([('to_percent', 'Percentage'), ('bogo_cheapest', 'The Cheapest'), ('fixed_discount', 'Fixed Price Discount')])
max_qty = fields.Integer("Maximum Quantity", track_visibility="onchange")
discount_step = fields.Integer("Discount Step", track_visibility="onchange")
discount_amount = fields.Float("Discount Amount", track_visibility="onchange")
stop_processing = fields.Boolean("Stop processing rules after this rule.", track_visibility="onchange")
categories_applied = fields.Many2many("pos.category", "promotion_category_applied", string="Categories Applied", track_visibility="onchange")
categories_excluded = fields.Many2many("pos.category", "promotion_category_excluded", string="Categories Excluded", track_visibility="onchange")
products_applied = fields.Many2many("product.template", "promotion_product_applied", string="Products Applied", track_visibility="onchange")
products_excluded = fields.Many2many("product.template", "promotion_product_excluded", string="Products Excluded", track_visibility="onchange")
label = fields.Char("Label (How discount appears on receipt)", track_visiblity="onchange", track_visibility="onchange")
class Discount(osv.osv):
_name = "pos.promotion.discount"
_description = "Discount applied to order line."
name = fields.Char("Discount Name")
rule = fields.Many2many('pos.promotion', string="Promotions Applied")
discount_applied = fields.Float("Discount Amount")
#class PromotionOrderLine(osv.osv):
# _inherit = 'pos.order.line'
# discount = fields.One2one("pos.promotion.discount")
#rule_ids = fields.Char('Rules applied', default="")
#rules_applied = fields.Many2many('pos.promotion', string="Promotions Applied") | mit | -5,878,874,552,577,806,000 | 63.95 | 148 | 0.725838 | false |
tkzeng/molecular-design-toolkit | moldesign/min/descent.py | 1 | 5697 | # Copyright 2016 Autodesk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import moldesign as mdt
from moldesign import utils
from moldesign import units as u
from .base import MinimizerBase
from . import toplevel
def exports(o):
__all__.append(o.__name__)
return o
__all__ = []
@exports
class GradientDescent(MinimizerBase):
""" A careful (perhaps overly careful) gradient descent implementation designed to relax
structures far from equilibrium.
A backtracking line search is performed along the steepest gradient direction.
The maximum move for any single atom is also limited by ``max_atom_move``
Note:
This algorithm is good at stably removing large forces, but it's very poorly suited to
locating any type of critical point; don't use this to find a minimum!
References:
https://www.math.washington.edu/~burke/crs/408/lectures/L7-line-search.pdf
Args:
mol (moldesign.Molecule): molecule to minimize
max_atom_move (Scalar[length]): maximum displacement of a single atom
scaling (Scalar[length/force]): unit of displacement per unit force
gamma (float): number between 0 and 1 indicating scale factor for backtracking search
control (float): threshold for terminating line search; this is a proportion
(0<=``control``<=1) of the expected function decrease
**kwargs (dict): kwargs from :class:`MinimizerBase`
"""
_strip_units = False
def __init__(self, mol,
max_atom_move=0.05*u.angstrom,
scaling=0.01*u.angstrom**2/u.eV,
gamma=0.4, control=0.25,
**kwargs):
super(GradientDescent, self).__init__(mol, **kwargs)
assert 'forces' in self.request_list, 'Gradient descent built-in gradients'
self.max_atom_move = max_atom_move
self.scaling = scaling
self.gamma = gamma
self.control = control
self._last_energy = None
def run(self):
print 'Starting geometry optimization: built-in gradient descent'
lastenergy = self.objective(self._coords_to_vector(self.mol.positions))
current = self._coords_to_vector(self.mol.positions)
for i in xrange(self.nsteps):
grad = self.grad(current)
if np.abs(grad.max()) < self.force_tolerance: # converged
return
move = self.scale_move(grad)
armijo_goldstein_prefac = self.control * move.norm()
for icycle in xrange(0, 10):
g = self.gamma**icycle
newpos = self._make_move(current, g * move)
# move direction may be different than gradient direction due to constraints
move_vec = (newpos-current).normalized()
if grad.dot(move_vec) >= 0.0: # move flipped direction!
if self._constraint_convergence(newpos, current, grad):
return # flip was because we're converged
else: # flip was because move was too big
newenergy = np.inf * u.default.energy
continue
try:
newenergy = self.objective(newpos)
except mdt.QMConvergenceError:
continue
if newenergy <= lastenergy + g * armijo_goldstein_prefac * grad.dot(move_vec):
break
else:
if newenergy >= lastenergy:
raise mdt.ConvergenceFailure('Line search failed')
if self._constraint_convergence(newpos, current, grad):
return
else:
current = newpos
lastenergy = newenergy
self._sync_positions(current)
self.callback()
def scale_move(self, grad):
move = -self.scaling*grad
mmax = np.abs(move).max()
if mmax > self.max_atom_move: # rescale the move
move *= self.max_atom_move/mmax
return move
def _make_move(self, current, move):
if self.mol.constraints:
# TODO: get constraint forces from lagrange multipliers and use them to check for convergence
self._sync_positions(current)
prev = self.mol.positions.copy()
self._sync_positions(current+move)
mdt.geom.shake_positions(self.mol, prev)
return self._coords_to_vector(self.mol.positions)
else:
return current + move
def _constraint_convergence(self, pos, lastpos, energygrad):
""" Test for force-based convergence after projecting out constraint forces
Until the shake method starts explicitly storing constraint forces, we calculate this
direction as the SHAKE-adjusted displacement vector from the current descent step
"""
direction = mdt.mathutils.normalized((pos - lastpos).flatten())
proj_grad = energygrad.dot(direction)
return abs(proj_grad) < self.force_tolerance
gradient_descent = GradientDescent._as_function('gradient_descent')
exports(gradient_descent)
toplevel(gradient_descent)
| apache-2.0 | -3,904,053,056,946,175,500 | 37.234899 | 105 | 0.626119 | false |
NASLab/GroundROS | src/experimental_results/tracking_log_analysis.py | 1 | 1388 | # python experimental tests for Husky
import numpy as np
import glob
import matplotlib.pyplot as plt
for file in glob.glob("*.npy"):
data = np.load(file)[5:, :]
print file,
error_long = data[:, 0]
error_lat = data[:, 1]
ref_x = [value-data[0,2] for value in data[:, 2]]
# print ref_x[:30]
ref_y = [value-data[0,3] for value in data[:, 3]]
pos_x = [value-data[0,4] for value in data[:, 4]][0::20]
pos_y = [value-data[0,5] for value in data[:, 5]][0::20]
pos_theta = data[:, 6]
time = data[:, 7] - data[0, 7]
vel = data[:,8]
# plt.plot(ref_x, ref_y, 'ro')
# plt.gca().set_aspect('equal', adjustable='box')
f0 = plt.figure()
f1 = plt.figure()
f2 = plt.figure()
ax0 = f0.add_subplot(111)
ax0.plot(ref_x, ref_y, '--', lw=3,label='Reference Trajectory')
ax0.plot(pos_x[0], pos_y[0], 'mo',markersize = 10,label='Start Point')
ax0.plot(pos_x, pos_y, 'ro',label='Robot Trajectory')
ax0.legend()
ax0.axis('equal')
ax1 = f1.add_subplot(111)
ax1.plot(time, error_long,lw=3,label='Longitudinal Error')
ax1.plot(time, error_lat,lw=3,label='Lateral Error')
ax1.legend()
ax2 = f2.add_subplot(111)
ax2.plot(time,vel,lw=3,label='Velocity Profile')
ax2.legend()
plt.draw()
plt.pause(.1) # <-------
raw_input("<Hit Enter To Close>")
plt.close(f0)
plt.close(f1)
| mit | 3,869,541,272,850,976,300 | 29.173913 | 74 | 0.582133 | false |
jeffmahoney/crash-python | crash/addrxlat.py | 1 | 2632 | # -*- coding: utf-8 -*-
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
import addrxlat
import crash
from crash.cache.syscache import utsname
from crash.util import offsetof
from crash.util.symbols import Types
import gdb
types = Types(['uint32_t *', 'uint64_t *'])
class TranslationContext(addrxlat.Context):
def __init__(self, *args: int, **kwargs: int) -> None:
super().__init__(*args, **kwargs)
self.read_caps = addrxlat.CAPS(addrxlat.KVADDR)
def cb_sym(self, symtype: int, *args: str) -> int:
if symtype == addrxlat.SYM_VALUE:
ms = gdb.lookup_minimal_symbol(args[0])
if ms is not None:
return int(ms.value().address)
elif symtype == addrxlat.SYM_SIZEOF:
sym = gdb.lookup_symbol(args[0], None)[0]
if sym is not None:
return sym.type.sizeof
elif symtype == addrxlat.SYM_OFFSETOF:
sym = gdb.lookup_symbol(args[0], None, gdb.SYMBOL_STRUCT_DOMAIN)[0]
if sym is None:
# this works for typedefs:
sym = gdb.lookup_symbol(args[0], None)[0]
if sym is not None:
ret = offsetof(sym.type, args[1], True)
if ret is None:
raise RuntimeError("offsetof can't return None with errors=True")
return super().cb_sym(symtype, *args)
def cb_read32(self, faddr: addrxlat.FullAddress) -> int:
v = gdb.Value(faddr.addr).cast(types.uint32_t_p_type)
return int(v.dereference())
def cb_read64(self, faddr: addrxlat.FullAddress) -> int:
v = gdb.Value(faddr.addr).cast(types.uint64_t_p_type)
return int(v.dereference())
class CrashAddressTranslation:
def __init__(self) -> None:
try:
target = crash.current_target()
self.context = target.kdump.get_addrxlat_ctx()
self.system = target.kdump.get_addrxlat_sys()
except AttributeError:
self.context = TranslationContext()
self.system = addrxlat.System()
self.system.os_init(self.context,
arch=utsname.machine,
type=addrxlat.OS_LINUX)
self.is_non_auto = False
xlatmap = self.system.get_map(addrxlat.SYS_MAP_MACHPHYS_KPHYS)
for addr_range in xlatmap:
if addr_range.meth == addrxlat.SYS_METH_NONE:
continue
meth = self.system.get_meth(addr_range.meth)
if meth.kind != addrxlat.LINEAR or meth.off != 0:
self.is_non_auto = True
break
| gpl-2.0 | -4,112,297,611,290,940,000 | 37.144928 | 85 | 0.577508 | false |
ReactiveX/RxPY | tests/test_observable/test_maxby.py | 1 | 11240 | import unittest
from rx import operators as ops
from rx.testing import TestScheduler, ReactiveTest
on_next = ReactiveTest.on_next
on_completed = ReactiveTest.on_completed
on_error = ReactiveTest.on_error
subscribe = ReactiveTest.subscribe
subscribed = ReactiveTest.subscribed
disposed = ReactiveTest.disposed
created = ReactiveTest.created
class TestMaxBy(unittest.TestCase):
def test_maxby_empty(self):
scheduler = TestScheduler()
msgs = [
on_next(150, {"key": 1, "value": 'z'}),
on_completed(250)
]
xs = scheduler.create_hot_observable(msgs)
def create():
def mapper(x):
return x["key"]
return xs.pipe(ops.max_by(mapper))
res = scheduler.start(create=create).messages
self.assertEqual(2, len(res))
self.assertEqual(0, len(res[0].value.value))
assert(res[1].value.kind == 'C' and res[1].time == 250)
def test_maxby_return(self):
scheduler = TestScheduler()
msgs = [
on_next(150, {
"key": 1,
"value": 'z'
}), on_next(210, {
"key": 2,
"value": 'a'
}), on_completed(250)
]
xs = scheduler.create_hot_observable(msgs)
def create():
def mapper(x):
return x["key"]
return xs.pipe(ops.max_by(mapper))
res = scheduler.start(create=create).messages
self.assertEqual(2, len(res))
assert(res[0].value.kind == 'N')
self.assertEqual(1, len(res[0].value.value))
self.assertEqual(2, res[0].value.value[0]["key"])
self.assertEqual('a', res[0].value.value[0]["value"])
assert(res[1].value.kind == 'C' and res[1].time == 250)
def test_maxby_some(self):
scheduler = TestScheduler()
msgs = [
on_next(150, {
"key": 1,
"value": 'z'
}), on_next(210, {
"key": 3,
"value": 'b'
}), on_next(220, {
"key": 4,
"value": 'c'
}), on_next(230, {
"key": 2,
"value": 'a'
}), on_completed(250)
]
xs = scheduler.create_hot_observable(msgs)
def create():
def mapper(x):
return x["key"]
return xs.pipe(ops.max_by(mapper))
res = scheduler.start(create=create).messages
self.assertEqual(2, len(res))
assert(res[0].value.kind == 'N')
self.assertEqual(1, len(res[0].value.value[0]["value"]))
self.assertEqual(4, res[0].value.value[0]["key"])
self.assertEqual('c', res[0].value.value[0]["value"])
assert(res[1].value.kind == 'C' and res[1].time == 250)
def test_maxby_multiple(self):
scheduler = TestScheduler()
msgs = [
on_next(150, {
"key": 1,
"value": 'z'
}),
on_next(210, {
"key": 3,
"value": 'b'
}),
on_next(215, {
"key": 2,
"value": 'd'
}),
on_next(220, {
"key": 3,
"value": 'c'
}),
on_next(225, {
"key": 2,
"value": 'y'
}),
on_next(230, {
"key": 4,
"value": 'a'
}),
on_next(235, {
"key": 4,
"value": 'r'
}),
on_completed(250)
]
xs = scheduler.create_hot_observable(msgs)
def create():
return xs.pipe(ops.max_by(lambda x: x["key"]))
res = scheduler.start(create=create).messages
self.assertEqual(2, len(res))
assert(res[0].value.kind == 'N')
self.assertEqual(2, len(res[0].value.value))
self.assertEqual(4, res[0].value.value[0]["key"])
self.assertEqual('a', res[0].value.value[0]["value"])
self.assertEqual(4, res[0].value.value[1]["key"])
self.assertEqual('r', res[0].value.value[1]["value"])
assert(res[1].value.kind == 'C' and res[1].time == 250)
def test_maxby_on_error(self):
ex = 'ex'
scheduler = TestScheduler()
msgs = [
on_next(150, {
"key": 1,
"value": 'z'
}),
on_error(210, ex)
]
xs = scheduler.create_hot_observable(msgs)
def create():
return xs.pipe(ops.max_by(lambda x: x["key"]))
res = scheduler.start(create=create).messages
assert res == [on_error(210, ex)]
def test_maxby_never(self):
scheduler = TestScheduler()
msgs = [
on_next(150, {
"key": 1,
"value": 'z'
})
]
xs = scheduler.create_hot_observable(msgs)
def create():
return xs.pipe(ops.max_by(lambda x: x["key"]))
res = scheduler.start(create=create).messages
assert res == []
# def test_MaxBy_Comparer_Empty():
# var msgs, res, reverseComparer, scheduler, xs
# scheduler = TestScheduler()
# msgs = [
# on_next(150, {
# key: 1,
# value: 'z'
# }),
# on_completed(250)
# ]
# reverseComparer = function (a, b) {
# if (a > b) {
# return -1
# }
# if (a < b) {
# return 1
# }
# return 0
# }
# xs = scheduler.create_hot_observable(msgs)
# res = scheduler.start(create=create)
# return xs.max_by(function (x) {
# return x.key
# }, reverseComparer)
# }).messages
# self.assertEqual(2, res.length)
# self.assertEqual(0, res[0].value.value.length)
# assert(res[1].value.kind == 'C' and res[1].time == 250)
# def test_MaxBy_Comparer_Return():
# var msgs, res, reverseComparer, scheduler, xs
# scheduler = TestScheduler()
# msgs = [
# on_next(150, {
# key: 1,
# value: 'z'
# }), on_next(210, {
# key: 2,
# value: 'a'
# }), on_completed(250)
# ]
# reverseComparer = function (a, b) {
# if (a > b) {
# return -1
# }
# if (a < b) {
# return 1
# }
# return 0
# }
# xs = scheduler.create_hot_observable(msgs)
# res = scheduler.start(create=create)
# return xs.max_by(function (x) {
# return x.key
# }, reverseComparer)
# }).messages
# self.assertEqual(2, res.length)
# assert(res[0].value.kind == 'N')
# self.assertEqual(1, res[0].value.value.length)
# self.assertEqual(2, res[0].value.value[0].key)
# self.assertEqual('a', res[0].value.value[0].value)
# assert(res[1].value.kind == 'C' and res[1].time == 250)
# def test_MaxBy_Comparer_Some():
# var msgs, res, reverseComparer, scheduler, xs
# scheduler = TestScheduler()
# msgs = [
# on_next(150, {
# key: 1,
# value: 'z'
# }), on_next(210, {
# key: 3,
# value: 'b'
# }), on_next(220, {
# key: 4,
# value: 'c'
# }), on_next(230, {
# key: 2,
# value: 'a'
# }), on_completed(250)
# ]
# reverseComparer = function (a, b) {
# if (a > b) {
# return -1
# }
# if (a < b) {
# return 1
# }
# return 0
# }
# xs = scheduler.create_hot_observable(msgs)
# res = scheduler.start(create=create)
# return xs.max_by(function (x) {
# return x.key
# }, reverseComparer)
# }).messages
# self.assertEqual(2, res.length)
# assert(res[0].value.kind == 'N')
# self.assertEqual(1, res[0].value.value.length)
# equal(2, res[0].value.value[0].key)
# self.assertEqual('a', res[0].value.value[0].value)
# assert(res[1].value.kind == 'C' and res[1].time == 250)
# def test_MaxBy_Comparer_Throw():
# var ex, msgs, res, reverseComparer, scheduler, xs
# ex = 'ex'
# scheduler = TestScheduler()
# msgs = [
# on_next(150, {
# key: 1,
# value: 'z'
# }), on_error(210, ex)
# ]
# reverseComparer = function (a, b) {
# if (a > b) {
# return -1
# }
# if (a < b) {
# return 1
# }
# return 0
# }
# xs = scheduler.create_hot_observable(msgs)
# res = scheduler.start(create=create)
# return xs.max_by(function (x) {
# return x.key
# }, reverseComparer)
# }).messages
# assert res == [on_error(210, ex)]
# def test_MaxBy_Comparer_Never():
# var msgs, res, reverseComparer, scheduler, xs
# scheduler = TestScheduler()
# msgs = [
# on_next(150, {
# key: 1,
# value: 'z'
# })
# ]
# reverseComparer = function (a, b) {
# if (a > b) {
# return -1
# }
# if (a < b) {
# return 1
# }
# return 0
# }
# xs = scheduler.create_hot_observable(msgs)
# res = scheduler.start(create=create)
# return xs.max_by(function (x) {
# return x.key
# }, reverseComparer)
# }).messages
# assert res == []
# def test_MaxBy_SelectorThrows():
# var ex, msgs, res, reverseComparer, scheduler, xs
# ex = 'ex'
# scheduler = TestScheduler()
# msgs = [
# on_next(150, {
# key: 1,
# value: 'z'
# }), on_next(210, {
# key: 3,
# value: 'b'
# }), on_next(220, {
# key: 2,
# value: 'c'
# }), on_next(230, {
# key: 4,
# value: 'a'
# }), on_completed(250)
# ]
# reverseComparer = function (a, b) {
# if (a > b) {
# return -1
# }
# if (a < b) {
# return 1
# }
# return 0
# }
# xs = scheduler.create_hot_observable(msgs)
# res = scheduler.start(create=create)
# return xs.max_by(function (x) {
# throw ex
# }, reverseComparer)
# }).messages
# assert res == [on_error(210, ex)]
def test_maxby_comparerthrows(self):
ex = 'ex'
scheduler = TestScheduler()
msgs = [
on_next(150, {
"key": 1,
"value": 'z'
}), on_next(210, {
"key": 3,
"value": 'b'
}), on_next(220, {
"key": 2,
"value": 'c'
}), on_next(230, {
"key": 4,
"value": 'a'
}), on_completed(250)
]
def reverse_comparer(a, b):
raise Exception(ex)
xs = scheduler.create_hot_observable(msgs)
def create():
return xs.pipe(ops.max_by(lambda x: x["key"], reverse_comparer))
res = scheduler.start(create=create).messages
assert res == [on_error(220, ex)]
| mit | 3,801,385,841,253,108,000 | 27.241206 | 76 | 0.457117 | false |
camayak/django-apiserver | tests/core/tests/paginator.py | 1 | 5585 | from django.conf import settings
from django.test import TestCase
from apiserver.exceptions import BadRequest
from apiserver.paginator import Paginator
from core.models import Note
from core.tests.resources import NoteResource
from django.db import reset_queries
class PaginatorTestCase(TestCase):
fixtures = ['note_testdata.json']
def setUp(self):
super(PaginatorTestCase, self).setUp()
self.data_set = Note.objects.all()
self.old_debug = settings.DEBUG
settings.DEBUG = True
def tearDown(self):
settings.DEBUG = self.old_debug
super(PaginatorTestCase, self).tearDown()
def _get_query_count(self):
try:
from django.db import connections
return connections['default'].queries
except ImportError:
from django.db import connection
return connection.queries
def test_page1(self):
reset_queries()
self.assertEqual(len(self._get_query_count()), 0)
paginator = Paginator({}, self.data_set, resource_uri='/api/v1/notes/', limit=2, offset=0)
# REGRESSION: Check to make sure only part of the cache is full.
# We used to run ``len()`` on the ``QuerySet``, which would populate
# the entire result set. Owwie.
paginator.get_count()
self.assertEqual(len(self._get_query_count()), 1)
# Should be nothing in the cache.
self.assertEqual(paginator.objects._result_cache, None)
meta = paginator.page()['meta']
self.assertEqual(meta['limit'], 2)
self.assertEqual(meta['offset'], 0)
self.assertEqual(meta['previous'], None)
self.assertEqual(meta['next'], '/api/v1/notes/?limit=2&offset=2')
self.assertEqual(meta['total_count'], 6)
def test_page2(self):
paginator = Paginator({}, self.data_set, resource_uri='/api/v1/notes/', limit=2, offset=2)
meta = paginator.page()['meta']
self.assertEqual(meta['limit'], 2)
self.assertEqual(meta['offset'], 2)
self.assertEqual(meta['previous'], '/api/v1/notes/?limit=2&offset=0')
self.assertEqual(meta['next'], '/api/v1/notes/?limit=2&offset=4')
self.assertEqual(meta['total_count'], 6)
def test_page3(self):
paginator = Paginator({}, self.data_set, resource_uri='/api/v1/notes/', limit=2, offset=4)
meta = paginator.page()['meta']
self.assertEqual(meta['limit'], 2)
self.assertEqual(meta['offset'], 4)
self.assertEqual(meta['previous'], '/api/v1/notes/?limit=2&offset=2')
self.assertEqual(meta['next'], None)
self.assertEqual(meta['total_count'], 6)
def test_large_limit(self):
paginator = Paginator({}, self.data_set, resource_uri='/api/v1/notes/', limit=20, offset=0)
meta = paginator.page()['meta']
self.assertEqual(meta['limit'], 20)
self.assertEqual(meta['offset'], 0)
self.assertEqual(meta['previous'], None)
self.assertEqual(meta['next'], None)
self.assertEqual(meta['total_count'], 6)
def test_all(self):
paginator = Paginator({'limit': 0}, self.data_set, resource_uri='/api/v1/notes/', limit=2, offset=0)
page = paginator.page()
meta = page['meta']
self.assertEqual(meta['limit'], 0)
self.assertEqual(meta['offset'], 0)
self.assertEqual(meta['total_count'], 6)
self.assertEqual(len(page['objects']), 6)
self.assertFalse('previous' in meta)
self.assertFalse('next' in meta)
def test_complex_get(self):
request = {
'slug__startswith': 'food',
'format': 'json',
}
paginator = Paginator(request, self.data_set, resource_uri='/api/v1/notes/', limit=2, offset=2)
meta = paginator.page()['meta']
self.assertEqual(meta['limit'], 2)
self.assertEqual(meta['offset'], 2)
self.assertEqual(meta['previous'], '/api/v1/notes/?slug__startswith=food&offset=0&limit=2&format=json')
self.assertEqual(meta['next'], '/api/v1/notes/?slug__startswith=food&offset=4&limit=2&format=json')
self.assertEqual(meta['total_count'], 6)
def test_limit(self):
paginator = Paginator({}, self.data_set, limit=20, offset=0)
paginator.limit = '10'
self.assertEqual(paginator.get_limit(), 10)
paginator.limit = None
self.assertEqual(paginator.get_limit(), 20)
paginator.limit = 10
self.assertEqual(paginator.get_limit(), 10)
paginator.limit = -10
self.assertRaises(BadRequest, paginator.get_limit)
paginator.limit = 'hAI!'
self.assertRaises(BadRequest, paginator.get_limit)
def test_offset(self):
paginator = Paginator({}, self.data_set, limit=20, offset=0)
paginator.offset = '10'
self.assertEqual(paginator.get_offset(), 10)
paginator.offset = 0
self.assertEqual(paginator.get_offset(), 0)
paginator.offset = 10
self.assertEqual(paginator.get_offset(), 10)
paginator.offset= -10
self.assertRaises(BadRequest, paginator.get_offset)
paginator.offset = 'hAI!'
self.assertRaises(BadRequest, paginator.get_offset)
def test_regression_nonqueryset(self):
paginator = Paginator({}, ['foo', 'bar', 'baz'], limit=2, offset=0)
# This would fail due to ``count`` being present on ``list`` but called
# differently.
page = paginator.page()
self.assertEqual(page['objects'], ['foo', 'bar'])
| bsd-2-clause | -2,993,944,627,323,012,600 | 37.784722 | 111 | 0.617547 | false |
caedesvvv/pynoded | pynoded/shapes/arrow.py | 1 | 1616 | """
Arrow shapes
"""
from pynoded.graph import GraphObject
from math import atan2,pi
from cubicspline import cubicspline
from numpy import array
class Arrow(GraphObject):
"""
An arrow connecting two objects.
"""
def __init__(self,parent,x0,y0,x1,y1,color):
GraphObject.__init__(self,parent,x0,y0)
self.x1=x1
self.y1=y1
self.color=color
self.maxdist = 3
def Draw_(self, ctx):
x1,y1=self.ToLocal(self.x1, self.y1)
ctx.set_line_width(1)
linewidth,_ = ctx.device_to_user_distance(1., 1.)
ctx.set_line_width(linewidth)
ctx.set_source_rgb(*self.color)
ctx.move_to(0,0)
dist = abs(complex(x1, y1))
elast = dist/2.0
ctx.curve_to(elast, 0, x1-elast, y1, x1, y1)
ctx.stroke()
data = [[float(elast), float(0)],
[float(x1-elast), float(y1)],
[float(x1), float(y1)],
[0, 0]]
data = array(data)
time, val = cubicspline(data, 123)
if linewidth > self.maxdist:
return
ctx.move_to(x1, y1)
# following is to draw the arrow in direction of line
# but now we're drawing the in/out tangential, so not needed
# angle=atan2(0,x1)
# ctx.rotate(angle)
ctx.rel_line_to(-6*linewidth,0)
ctx.rel_line_to(0,2*linewidth)
ctx.rel_line_to(6*linewidth,-2*linewidth)
ctx.rel_line_to(-6*linewidth,-2*linewidth)
ctx.rel_line_to(0,2*linewidth)
ctx.fill_preserve()
ctx.stroke()
def Test(self,x,y):
return False
| gpl-3.0 | -2,896,924,524,325,959,000 | 28.925926 | 68 | 0.564975 | false |
colloquium/spacewalk | client/tools/rhn-virtualization/virtualization/poller.py | 1 | 10725 | #
# Copyright (c) 2008--2010 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import sys
sys.path.append("/usr/share/rhn/")
import binascii
import traceback
try:
import libvirt
except ImportError:
# There might not be a libvirt.
libvirt = None
from optparse import OptionParser
from virtualization.state import State
from virtualization.errors import VirtualizationException
from virtualization.constants import PropertyType, \
VirtualizationType, \
IdentityType, \
VIRT_STATE_NAME_MAP, \
VIRT_VDSM_STATUS_MAP
from virtualization.notification import Plan, \
EventType, \
TargetType
from virtualization.util import hyphenize_uuid, \
is_fully_virt
from virtualization.poller_state_cache import PollerStateCache
from virtualization.domain_directory import DomainDirectory
###############################################################################
# Globals
###############################################################################
options = None
###############################################################################
# Public Interface
###############################################################################
def poll_hypervisor():
"""
This function polls the hypervisor for information about the currently
running set of domains. It returns a dictionary object that looks like the
following:
{ uuid : { 'name' : '...',
'uuid' : '...',
'virt_type' : '...',
'memory_size' : '...',
'vcpus' : '...',
'state' : '...' }, ... }
"""
if not libvirt:
return {}
try:
conn = libvirt.open(None)
except libvirt.libvirtError, lve:
# virConnectOpen() failed
conn = None
if not conn:
# No connection to hypervisor made
return {}
domainIDs = conn.listDomainsID()
state = {}
for domainID in domainIDs:
try:
domain = conn.lookupByID(domainID)
except libvirt.libvirtError, lve:
raise VirtualizationException, \
"Failed to obtain handle to domain %d: %s" % \
(domainID, repr(lve))
uuid = binascii.hexlify(domain.UUID())
# SEE: http://libvirt.org/html/libvirt-libvirt.html#virDomainInfo
# for more info.
domain_info = domain.info()
# Set the virtualization type. We can tell if the domain is fully virt
# by checking the domain's OSType() attribute.
virt_type = VirtualizationType.PARA
if is_fully_virt(domain):
virt_type = VirtualizationType.FULLY
# we need to filter out the small per/minute KB changes
# that occur inside a vm. To do this we divide by 1024 to
# drop our precision down to megabytes with an int then
# back up to KB
memory = int(domain_info[2] / 1024);
memory = memory * 1024;
properties = {
PropertyType.NAME : domain.name(),
PropertyType.UUID : uuid,
PropertyType.TYPE : virt_type,
PropertyType.MEMORY : str(memory), # current memory
PropertyType.VCPUS : domain_info[3],
PropertyType.STATE : VIRT_STATE_NAME_MAP[domain_info[0]] }
state[uuid] = properties
if state: _log_debug("Polled state: %s" % repr(state))
return state
def poll_through_vdsm():
"""
This method polls all the virt guests running on a VDSM enabled Host.
Libvirt is disabled by default on RHEV-M managed clients.
* Imports the localvdsm client that talks to the localhost
and fetches the list of vms and their info.
* Extract the data and construct the state to pass it to the
execution plan for guest polling.
* The server should account for business rules similar to
xen/kvm.
"""
import localvdsm
try:
server = localvdsm.connect()
except:
# VDSM raised an exception we're done here
return {}
# Extract list of vm's. True returns full list
try:
domains = server.list(True)
except:
# Something went wrong in vdsm, exit
return {}
if not len(domains['vmList']):
# No domains, exit.
return
state = {}
for domain in domains['vmList']:
#trim uuid
uuid = domain['vmId'].lower().replace('-', '')
# Map the VDSM status to libvirt for server compatibility
status = 'nostate'
if VIRT_VDSM_STATUS_MAP.has_key(domain['status']):
status = VIRT_VDSM_STATUS_MAP[domain['status']]
# This is gonna be fully virt as its managed by VDSM
virt_type = VirtualizationType.FULLY
#Memory
memory = int(domain['memSize']) * 1024
# vcpus
if domain.has_key('smp'):
vcpus = domain['smp']
else:
vcpus = '1'
properties = {
PropertyType.NAME : domain['vmName'],
PropertyType.UUID : uuid,
PropertyType.TYPE : virt_type,
PropertyType.MEMORY : memory, # current memory
PropertyType.VCPUS : vcpus,
PropertyType.STATE : status}
state[uuid] = properties
if state: _log_debug("Polled state: %s" % repr(state))
return state
def poll_state(uuid):
"""
Polls just the state of the guest with the provided UUID. This state is
returned.
"""
conn = libvirt.open(None)
if not conn:
raise VirtualizationException, \
"Failed to open connection to hypervisor."
# Attempt to connect to the domain. Since there is technically no
# "stopped" state, we will assume that if we cannot connect the domain is
# not running. Unfortunately, we can't really determine if the domain
# actually exists.
domain = None
try:
domain = conn.lookupByUUIDString(hyphenize_uuid(uuid))
except libvirt.libvirtError, lve:
# Can't find domain. Return stopped state.
return State(None)
# Now that we have the domain, lookup the state.
domain_info = domain.info()
return State(VIRT_STATE_NAME_MAP[domain_info[0]])
###############################################################################
# Helper Functions
###############################################################################
def _send_notifications(poller_state):
"""
This function will send notifications based on vm state change to the
server. To reduce the possibility of spamming the server but still
maintain an element of consistency, it will compare the previous poll state
against the current poll state and only send notifications if something has
changed. In the event that the cache might have gotten into an
inconsistent state, the cache will be removed after every 50 polls (this is
about every 1.5 hours). This will cause the full state to be re-uploaded
and put things back in sync, if necessary.
"""
# Now, if anything changed, send the appropriate notification for it.
if poller_state.is_changed():
added = poller_state.get_added()
removed = poller_state.get_removed()
modified = poller_state.get_modified()
plan = Plan()
# Declare virtualization host first
plan.add(EventType.EXISTS,
TargetType.SYSTEM,
{ PropertyType.IDENTITY : IdentityType.HOST,
PropertyType.UUID : '0000000000000000' })
for (uuid, data) in added.items():
plan.add(EventType.EXISTS, TargetType.DOMAIN, data)
for (uuid, data) in modified.items():
plan.add(EventType.EXISTS, TargetType.DOMAIN, data)
for (uuid, data) in removed.items():
plan.add(EventType.REMOVED, TargetType.DOMAIN, data)
plan.execute()
def _parse_options():
usage = "Usage: %prog [options]"
parser = OptionParser(usage)
parser.set_defaults(debug=False)
parser.add_option("-d", "--debug", action="store_true", dest="debug")
global options
(options, args) = parser.parse_args()
def _log_debug(msg, include_trace = 0):
if options and options.debug:
print "DEBUG: " + str(msg)
if include_trace:
e_info = sys.exc_info()
traceback.print_exception(e_info[0], e_info[1], e_info[2])
###############################################################################
# Main Program
###############################################################################
if __name__ == "__main__":
# First, handle the options.
_parse_options()
# check for VDSM status
import commands
vdsm_enabled = False
status, msg = commands.getstatusoutput("/etc/init.d/vdsmd status")
if status == 0:
vdsm_enabled = True
# Crawl each of the domains on this host and obtain the new state.
if vdsm_enabled:
domain_list = poll_through_vdsm()
elif libvirt:
domain_list = poll_hypervisor()
else:
# If no libvirt nor vdsm is present, this program is pretty much
# useless. Just exit.
sys.exit(0)
if not domain_list:
# No domains returned, nothing to do, exit polling
sys.exit(0)
# create the unkonwn domain config files (for libvirt only)
if libvirt and not vdsm_enabled:
uuid_list = domain_list.keys()
domain = DomainDirectory()
domain.save_unknown_domain_configs(uuid_list)
cached_state = PollerStateCache(domain_list,
debug = options and options.debug)
# Send notifications, if necessary.
_send_notifications(cached_state)
# Save the new state.
cached_state.save()
| gpl-2.0 | -3,612,708,035,236,073,000 | 33.596774 | 79 | 0.565315 | false |
forseti-security/forseti-security | google/cloud/forseti/scanner/audit/resource_rules_engine.py | 1 | 13538 | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rules engine for Resources."""
from builtins import object
import collections
from google.cloud.forseti.common.util import logger
from google.cloud.forseti.scanner.audit import base_rules_engine
from google.cloud.forseti.scanner.audit import errors
from google.cloud.forseti.services import utils
LOGGER = logger.get_logger(__name__)
_SUPPORTED_MODES = {'required'}
RuleViolation = collections.namedtuple(
'RuleViolation',
['resource_id', 'resource_name', 'resource_type', 'full_name', 'rule_index',
'rule_name', 'violation_type', 'violation_data', 'resource_data']
)
class ResourceRulesEngine(base_rules_engine.BaseRulesEngine):
"""Rules engine for Resources."""
def __init__(self, rules_file_path, snapshot_timestamp=None):
"""Initialize.
Args:
rules_file_path (str): file location of rules
snapshot_timestamp (str): snapshot timestamp. Defaults to None.
If set, this will be the snapshot timestamp
used in the engine.
"""
super(ResourceRulesEngine, self).__init__(
rules_file_path=rules_file_path)
self.rule_book = None
def build_rule_book(self, global_configs=None):
"""Build ResourceRuleBook from the rules definition file.
Args:
global_configs (dict): Global configurations.
"""
self.rule_book = ResourceRuleBook(self._load_rule_definitions())
def find_violations(self, resources, force_rebuild=False):
"""Determine whether the resources violate rules.
Args:
resources (List[Resource]): resources to find violations for.
force_rebuild (bool): If True, rebuilds the rule book. This will
reload the rules definition file and add the rules to the book.
Returns:
generator: A generator of rule violations.
"""
if self.rule_book is None or force_rebuild:
self.build_rule_book()
violations = self.rule_book.find_violations(resources)
return violations
def add_rules(self, rule_defs):
"""Add rules to the rule book.
Args:
rule_defs (dict): rule definitions dictionary
"""
if self.rule_book is not None:
self.rule_book.add_rules(rule_defs)
class ResourceRuleBook(base_rules_engine.BaseRuleBook):
"""The RuleBook for Resources."""
def __init__(self, rule_defs=None):
"""Initialization.
Args:
rule_defs (dict): rule definitons dictionary.
"""
super(ResourceRuleBook, self).__init__()
self.rules = []
if not rule_defs:
self.rule_defs = {}
else:
self.rule_defs = rule_defs
self.add_rules(rule_defs)
def add_rules(self, rule_defs):
"""Add rules to the rule book.
Args:
rule_defs (dict): rule definitions dictionary.
"""
for (i, rule) in enumerate(rule_defs.get('rules', [])):
self.add_rule(rule, i)
def add_rule(self, rule_def, rule_index):
"""Add a rule to the rule book.
Args:
rule_def (dict): A dictionary containing rule definition
properties.
rule_index (int): The index of the rule from the rule definitions.
Assigned automatically when the rule book is built.
raises:
InvalidRulesSchemaError: if invalid rules definition.
"""
mode = rule_def.get('mode', '')
if mode not in _SUPPORTED_MODES:
raise errors.InvalidRulesSchemaError(
'Unexpected "mode" in rule {}: got {}, want one of {}'.format(
rule_index, mode, _SUPPORTED_MODES))
if not rule_def.get('resource_types'):
raise errors.InvalidRulesSchemaError(
'Missing non empty "resource_types" in rule {}'.format(
rule_index))
resource_tree = ResourceTree.from_json(
rule_def.get('resource_trees', []))
self.rules.append(
Rule(name=rule_def['name'],
index=rule_index,
resource_types=set(rule_def['resource_types']),
resource_tree=resource_tree))
def find_violations(self, resources):
"""Find resource violations in the rule book.
Args:
resources (List[Resource]): The resources to check for violations.
Yields:
RuleViolation: resource rule violations.
"""
for rule in self.rules:
for violation in rule.find_violations(resources):
yield violation
def get_applicable_resource_types(self):
"""Get the applicable resource types defined in this rule book.
The applcable resource types are a union of all resource types defined
in each rule.
Returns:
Set[string]: applicable resource types.
"""
types = set()
for rule in self.rules:
types.update(rule.resource_types)
return types
class ResourceTree(object):
"""ResourceTree represents resources in a tree format."""
def __init__(self, resource_type=None, resource_id=None, children=None):
"""Initialize a resource tree.
Args:
resource_type (str): type of this resource. Leave as None if
this is a root node with multiple children. In that case,
this tree will represent a multi-root tree.
resource_id (str): id of this resource or '*'. Leave as None if
this is a root node with multiple children. In that case,
this tree will represent a multi-root tree.
children (List[ResourceTree]): children of this node.
"""
self.resource_type = resource_type
self.resource_id = resource_id
self.children = children or []
@classmethod
def from_json(cls, json_nodes):
"""Create a resource tree from the given JSON representation of nodes.
If there are multiple json nodes, the resulting tree will have a root
node with no resource type or id and each json node as a child.
If there is only one json node, the root will have the resource id and
type of the node.
Args:
json_nodes(List[dict]): JSON representation of nodes.
Returns:
ResourceTree: The resource tree representation of the json nodes.
"""
nodes = cls._from_json(json_nodes)
if len(nodes) == 1:
return nodes[0]
return ResourceTree(children=nodes)
@classmethod
def _from_json(cls, json_nodes):
"""Build Resource Tree nodes.
Args:
json_nodes(List[dict]): JSON representation of nodes.
Returns:
ResourceTree: The resource tree representation of the json nodes.
"""
nodes = []
for json_node in json_nodes:
node = ResourceTree(
resource_type=json_node['type'],
resource_id=json_node['resource_id'],
children=cls._from_json(json_node.get('children', [])))
nodes.append(node)
return nodes
def match(self, resource, resource_types):
"""Match the given resource against this resource tree.
Args:
resource (Resource): The resource to match.
resource_types (List[string]): Applicable resource types. Violations
on types not in this list will not be reported.
Returns:
ResourceTree: The final matching node, or None if there is no match.
"""
tuples = []
for resource_type, resource_id in (
utils.get_resources_from_full_name(resource.full_name)):
tuples.append((resource_type, resource_id))
# Tuples are returned in reverse order, so reverse them.
tuples = list(reversed(tuples))
# Left trim tuples that are not appicable.
for resource_type, _ in tuples:
if resource_type not in resource_types:
tuples = tuples[1:]
if not tuples:
return None
return self.match_tuples(tuples)
def match_tuples(self, tuples):
"""Match the given tuples against this tree.
Args:
tuples (List[Tuple[string, string]]): (type, id) pairs of resources.
Together, they represent one full resource.
e.g. organization/123/project/456/ should be represented as
[('organization', '123'), ('project', '456')].
Returns:
ResourceTree: The final matching node, or None if there is no match.
"""
if not self.resource_type:
return self._find_matching_child(tuples)
for resource_type, resource_id in tuples:
id_match = self.resource_id == '*' or (
resource_id == self.resource_id)
if resource_type == self.resource_type and id_match:
tuples = tuples[1:]
if not tuples:
return self
if not self.children:
return None
return self._find_matching_child(tuples)
return None
def _find_matching_child(self, tuples):
"""Finds a matching child node.
Assumes that a child will either match an exact resource id, or a
wildcard. The exact match child is given preference.
Args:
tuples (List[Tuple[string, string]]): (type, id) pairs of resources.
Together, they represent one full resource.
e.g. organization/123/project/456/ should be represented as
[('organization', '123'), ('project', '456')].
Returns:
ResourceTree: Matching child node, or None if none matched.
"""
wildcard_child = None
for child in self.children:
node = child.match_tuples(tuples)
if node:
if node.resource_id != '*':
return node
else:
wildcard_child = node
return wildcard_child
def get_nodes(self):
"""Get all nodes in this resource tree.
Returns:
List[ResourceTree]: nodes in this tree.
"""
nodes = []
if self.resource_type:
nodes.append(self)
for child in self.children:
nodes.extend(child.get_nodes())
return nodes
class Rule(object):
"""Rule properties from the rule definition file.
Also finds violations.
"""
def __init__(self, name, index, resource_types, resource_tree):
"""Initialize.
Args:
name (str): Name of the loaded rule.
index (int): The index of the rule from the rule definitions.
resource_types (List[str]): The applicable resource types of this
rule.
resource_tree (ResourceTree): Tree representing the valid resources.
"""
self.name = name
self.index = index
self.resource_types = resource_types
self.resource_tree = resource_tree
def find_violations(self, resources):
"""Find violations for this rule against the given resource.
Args:
resources (List[Resource]): resources to check for violations.
Yields:
RuleViolation: resource rule violation.
"""
matched_nodes = set()
for resource in resources:
if resource.type not in self.resource_types:
continue
node = self.resource_tree.match(resource, self.resource_types)
if node:
matched_nodes.add(node)
else:
yield RuleViolation(
resource_id=resource.id,
resource_name=resource.display_name,
resource_type=resource.type,
full_name=resource.full_name,
rule_index=self.index,
rule_name=self.name,
violation_type='RESOURCE_VIOLATION',
violation_data='',
resource_data=resource.data or '',
)
for node in self.resource_tree.get_nodes():
if node.resource_id != '*' and (
node not in matched_nodes):
yield RuleViolation(
resource_id=node.resource_id,
resource_name=node.resource_id,
resource_type=node.resource_type,
full_name=node.resource_id,
rule_index=self.index,
rule_name=self.name,
violation_type='RESOURCE_VIOLATION',
violation_data='',
resource_data='',
)
| apache-2.0 | 7,402,139,771,712,971,000 | 33.891753 | 80 | 0.581844 | false |
fmalina/python-netsuite | netsuite/api/customer.py | 1 | 1591 |
from netsuite.client import client, passport, app_info
from netsuite.utils import (
get_record_by_type,
search_records_using
)
from netsuite.service import (
Customer,
CustomerSearchBasic,
SearchStringField
)
import uuid
def get_customer(internal_id):
return get_record_by_type('customer', internal_id)
def get_or_create_customer(customer_data):
"""
Lookup customer, add a customer if lookup fails.
"""
internal_id = lookup_customer_id_by_name_and_email(customer_data)
if not internal_id:
customer_data['entityId'] = str(uuid.uuid4())
customer = Customer(**customer_data)
response = client.service.add(customer, _soapheaders={
'passport': passport,
'applicationInfo': app_info
})
r = response.body.writeResponse
if r.status.isSuccess:
internal_id = r.baseRef.internalId
return get_customer(internal_id)
def lookup_customer_id_by_name_and_email(customer_data):
name_and_email = {k: v for k, v in customer_data.items()
if k in ['firstName', 'lastName', 'email']}
search_fields = {k: SearchStringField(searchValue=v, operator='is')
for k, v in name_and_email.items()}
customer_search = CustomerSearchBasic(**search_fields)
response = search_records_using(customer_search)
r = response.body.searchResult
if r.status.isSuccess:
if r.recordList is None:
return
records = r.recordList.record
if len(records) > 0:
return records[0].internalId
| bsd-3-clause | -684,644,612,951,325,200 | 29.596154 | 71 | 0.642992 | false |
algorhythms/LeetCode | 987 Vertical Order Traversal of a Binary Tree.py | 1 | 2279 | #!/usr/bin/python3
"""
Given a binary tree, return the vertical order traversal of its nodes values.
For each node at position (X, Y), its left and right children respectively will
be at positions (X-1, Y-1) and (X+1, Y-1).
Running a vertical line from X = -infinity to X = +infinity, whenever the
vertical line touches some nodes, we report the values of the nodes in order
from top to bottom (decreasing Y coordinates).
If two nodes have the same position, then the value of the node that is
reported first is the value that is smaller.
Return an list of non-empty reports in order of X coordinate. Every report
will have a list of values of nodes.
Example 1:
Input: [3,9,20,null,null,15,7]
Output: [[9],[3,15],[20],[7]]
Explanation:
Without loss of generality, we can assume the root node is at position (0, 0):
Then, the node with value 9 occurs at position (-1, -1);
The nodes with values 3 and 15 occur at positions (0, 0) and (0, -2);
The node with value 20 occurs at position (1, -1);
The node with value 7 occurs at position (2, -2).
Example 2:
Input: [1,2,3,4,5,6,7]
Output: [[4],[2],[1,5,6],[3],[7]]
Explanation:
The node with value 5 and the node with value 6 have the same position according to the given scheme.
However, in the report "[1,5,6]", the node value of 5 comes first since 5 is smaller than 6.
Note:
The tree will have between 1 and 1000 nodes.
Each node's value will be between 0 and 1000.
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
from collections import defaultdict
class Solution:
def __init__(self):
self.mp = defaultdict(list) # element (-Y, val) # from left to right, top to bottom
def verticalTraversal(self, root: TreeNode) -> List[List[int]]:
self.dfs(root, 0, 0)
ret = []
mn = min(self.mp)
mx = max(self.mp)
for i in range(mn, mx+1):
ret.append([
val
for _, val in sorted(self.mp[i])
])
return ret
def dfs(self, node, x, y):
if not node:
return
self.mp[x].append((-y, node.val))
self.dfs(node.left, x-1, y-1)
self.dfs(node.right, x+1, y-1)
| mit | -3,850,868,018,202,838,500 | 28.986842 | 101 | 0.641948 | false |
UCMAndesLab/webCTRLsMAP | webctrl.py | 1 | 2580 | import os, subprocess
import json
from webctrlSOAP import webctrlSOAP
from smap import actuate, driver
from smap.authentication import authenticated
class _Actuator(actuate.SmapActuator):
GET_REQUEST = 'getValue'
SET_REQUEST = 'setValue'
def setup(self, opts):
self.devicePath = os.path.expanduser(opts['devicePath'])
self.serverAddr = opts['webctrlServerAddr'];
if 'scriptPath' in opts:
self.scriptPath = opts['scriptPath'];
else:
self.webctrl = webctrlSOAP();
def webctrlRequest(self, typeOfRequest, inputValue = "0"):
cleanDevicePath = "\"{0}\"".format(self.devicePath)
print cleanDevicePath;
if 'scriptPath' in self:
pythonCmd = [self.scriptPath, typeOfRequest, self.serverAddr, cleanDevicePath, inputValue]
value = subprocess.check_output(pythonCmd)
else:
if typeOfRequest == self.SET_REQUEST:
value = self.webctrl.setValue(self.serverAddr, self.devicePath, inputValue);
else:
value = self.webctrl.getValue(self.serverAddr, self.devicePath);
return value;
def get_state(self, request):
return float(self.webctrlRequest(self.GET_REQUEST));
def set_state(self, request, state):
status = self.webctrlRequest(self.SET_REQUEST, str(state));
if status:
return float(state);
class WebCTRL_Actuator(_Actuator, actuate.IntegerActuator):
def setup(self, opts):
actuate.IntegerActuator.setup(self, opts)
_Actuator.setup(self, opts)
class WebCtrlDriver(driver.SmapDriver):
def setup(self, opts):
zoneSetpoint = []
zoneSetpoint.append('unoccupied_heating');
zoneSetpoint.append('unoccupied_cooling');
zoneSetpoint.append('occupied_heating');
zoneSetpoint.append('occupied_cooling');
# List of WebCtrl points/types [Facilities/Bldg1/VAV1_1/] [Damper Position] [double]
klass = WebCTRL_Actuator
setpointFile = open(opts['setpointFile'], 'r');
jsonStr = setpointFile.read();
setpoints = json.loads(jsonStr);
for entry in setpoints:
data_type = 'double'
for point in zoneSetpoint:
devicePath = '{0}/{1}/setpoints/{2}.value'.format(entry['refName'], entry['block'], point);
setup=opts;
setup['devicePath'] = devicePath;
actuatorPath = '{0}/{1}'.format(entry['path'], point);
print actuatorPath
self.add_actuator(actuatorPath, 'Value', klass, setup=setup, data_type=data_type)
| mit | -4,791,517,070,495,689,000 | 34.833333 | 103 | 0.644961 | false |
smly/nips17_adversarial_attack | defense/import_from_tf_baseline/dump_ens_adv_inception_v3.py | 1 | 27088 | # -*- coding: utf-8 -*-
import os
import sys
import math
from pathlib import Path
import scipy.misc
import h5py
import numpy as np
import tensorflow as tf
import torch
import torch.nn as nn
import click
from models.slim.nets.inception_v3 import (
inception_v3,
inception_v3_arg_scope)
import inception_v3_fullconv
slim = tf.contrib.slim
FMT_CONV = 'InceptionV3/InceptionV3/{}/convolution'
FMT_RELU = 'InceptionV3/InceptionV3/{}/Relu'
FMT_OTHER = 'InceptionV3/{}/{}'
TEST_THRESHOLD = 1e-2
def _make_padding(padding_name, conv_shape):
padding_name = padding_name.decode("utf-8")
if padding_name == "VALID":
return [0, 0]
elif padding_name == "SAME":
return [
math.floor(int(conv_shape[0])/2),
math.floor(int(conv_shape[1])/2)
]
else:
raise RuntimeError(f"Invalid padding name: {padding_name}")
def get_store_path(outdir, name):
return (
Path(outdir) /
Path('dump') /
Path(f'{name}.h5'))
def dump_conv2d(sess, name='Conv2d_1a_3x3', outdir='./dump'):
conv_operation = sess.graph.get_operation_by_name(FMT_CONV.format(name))
weights_tensor = sess.graph.get_tensor_by_name(
FMT_OTHER.format(name, 'weights:0'))
weights = weights_tensor.eval()
padding = _make_padding(
conv_operation.get_attr('padding'),
weights_tensor.get_shape())
strides = conv_operation.get_attr('strides')
conv_out = sess.graph.get_operation_by_name(
FMT_CONV.format(name)).outputs[0].eval()
bn_beta = sess.graph.get_tensor_by_name(
FMT_OTHER.format(name, 'BatchNorm/beta:0')).eval()
bn_mean = sess.graph.get_tensor_by_name(
FMT_OTHER.format(name, 'BatchNorm/moving_mean:0')).eval()
bn_var = sess.graph.get_tensor_by_name(
FMT_OTHER.format(name, 'BatchNorm/moving_variance:0')).eval()
relu_out = sess.graph.get_operation_by_name(
FMT_RELU.format(name)).outputs[0].eval()
store_path = get_store_path(outdir, name)
if not store_path.parent.exists():
store_path.parent.mkdir(parents=True)
with h5py.File(str(store_path), 'w') as h5f:
# conv
h5f.create_dataset("weights", data=weights)
h5f.create_dataset("strides", data=strides)
h5f.create_dataset("padding", data=padding)
h5f.create_dataset("conv_out", data=conv_out)
# batch norm
h5f.create_dataset("beta", data=bn_beta)
h5f.create_dataset("mean", data=bn_mean)
h5f.create_dataset("var", data=bn_var)
h5f.create_dataset("relu_out", data=relu_out)
def dump_logits(sess, name='Logits/Conv2d_1c_1x1', outdir='./dump'):
conv_operation = sess.graph.get_operation_by_name(
f'InceptionV3/{name}/convolution')
weights_tensor = sess.graph.get_tensor_by_name(
f'InceptionV3/{name}/weights:0')
weights = weights_tensor.eval()
biases_tensor = sess.graph.get_tensor_by_name(
f'InceptionV3/{name}/biases:0')
biases = biases_tensor.eval()
padding = _make_padding(
conv_operation.get_attr('padding'),
weights_tensor.get_shape())
strides = conv_operation.get_attr('strides')
conv_out = sess.graph.get_operation_by_name(
f'InceptionV3/{name}/BiasAdd').outputs[0].eval()
store_path = get_store_path(outdir, name)
if not store_path.parent.exists():
store_path.parent.mkdir(parents=True)
with h5py.File(str(store_path), 'w') as h5f:
h5f.create_dataset("weights", data=weights)
h5f.create_dataset("biases", data=biases)
h5f.create_dataset("strides", data=strides)
h5f.create_dataset("padding", data=padding)
h5f.create_dataset("conv_out", data=conv_out)
def dump_conv2d_logit(sess, name='Conv2d_1a_3x3', outdir='./dump'):
conv_operation = sess.graph.get_operation_by_name(
'InceptionV3/{}/convolution'.format(name))
weights_tensor = sess.graph.get_tensor_by_name(
'InceptionV3/{}/weights:0'.format(name))
weights = weights_tensor.eval()
padding = _make_padding(
conv_operation.get_attr('padding'),
weights.shape)
strides = conv_operation.get_attr('strides')
conv_out = sess.graph.get_operation_by_name(
'InceptionV3/{}/convolution'.format(name)
).outputs[0].eval()
beta = sess.graph.get_tensor_by_name(
'InceptionV3/{}/BatchNorm/beta:0'.format(name)).eval()
mean = sess.graph.get_tensor_by_name(
'InceptionV3/{}/BatchNorm/moving_mean:0'.format(name)).eval()
var = sess.graph.get_tensor_by_name(
'InceptionV3/{}/BatchNorm/moving_variance:0'.format(name)).eval()
relu_out = sess.graph.get_operation_by_name(
'InceptionV3/{}/Relu'.format(name)).outputs[0].eval()
store_path = get_store_path(outdir, name)
if not store_path.parent.exists():
store_path.parent.mkdir(parents=True)
with h5py.File(str(store_path), 'w') as h5f:
h5f.create_dataset("weights", data=weights)
h5f.create_dataset("strides", data=strides)
h5f.create_dataset("padding", data=padding)
h5f.create_dataset("conv_out", data=conv_out)
# batch norm
h5f.create_dataset("beta", data=beta)
h5f.create_dataset("mean", data=mean)
h5f.create_dataset("var", data=var)
h5f.create_dataset("relu_out", data=relu_out)
def dump_mixed_5b(sess, name='Mixed_5b', outdir='./dump'):
dump_conv2d(sess, name=f'{name}/Branch_0/Conv2d_0a_1x1', outdir=outdir)
dump_conv2d(sess, name=f'{name}/Branch_1/Conv2d_0a_1x1', outdir=outdir)
dump_conv2d(sess, name=f'{name}/Branch_1/Conv2d_0b_5x5', outdir=outdir)
dump_conv2d(sess, name=f'{name}/Branch_2/Conv2d_0a_1x1', outdir=outdir)
dump_conv2d(sess, name=f'{name}/Branch_2/Conv2d_0b_3x3', outdir=outdir)
dump_conv2d(sess, name=f'{name}/Branch_2/Conv2d_0c_3x3', outdir=outdir)
dump_conv2d(sess, name=f'{name}/Branch_3/Conv2d_0b_1x1', outdir=outdir)
def dump_mixed_5c(sess, name='Mixed_5c', outdir='./dump'):
dump_conv2d(sess, name=f'{name}/Branch_0/Conv2d_0a_1x1', outdir=outdir)
dump_conv2d(sess, name=f'{name}/Branch_1/Conv2d_0b_1x1', outdir=outdir)
dump_conv2d(sess, name=f'{name}/Branch_1/Conv_1_0c_5x5', outdir=outdir)
dump_conv2d(sess, name=f'{name}/Branch_2/Conv2d_0a_1x1', outdir=outdir)
dump_conv2d(sess, name=f'{name}/Branch_2/Conv2d_0b_3x3', outdir=outdir)
dump_conv2d(sess, name=f'{name}/Branch_2/Conv2d_0c_3x3', outdir=outdir)
dump_conv2d(sess, name=f'{name}/Branch_3/Conv2d_0b_1x1', outdir=outdir)
def dump_mixed_5d(sess, name='Mixed_5d', outdir='./dump'):
dump_conv2d(sess, name=f'{name}/Branch_0/Conv2d_0a_1x1', outdir=outdir)
dump_conv2d(sess, name=f'{name}/Branch_1/Conv2d_0a_1x1', outdir=outdir)
dump_conv2d(sess, name=f'{name}/Branch_1/Conv2d_0b_5x5', outdir=outdir)
dump_conv2d(sess, name=f'{name}/Branch_2/Conv2d_0a_1x1', outdir=outdir)
dump_conv2d(sess, name=f'{name}/Branch_2/Conv2d_0b_3x3', outdir=outdir)
dump_conv2d(sess, name=f'{name}/Branch_2/Conv2d_0c_3x3', outdir=outdir)
dump_conv2d(sess, name=f'{name}/Branch_3/Conv2d_0b_1x1', outdir=outdir)
def dump_mixed_6a(sess, name='Mixed_6a', outdir='./dump'):
dump_conv2d(sess, f"{name}/Branch_0/Conv2d_1a_1x1", outdir=outdir)
dump_conv2d(sess, f"{name}/Branch_1/Conv2d_0a_1x1", outdir=outdir)
dump_conv2d(sess, f"{name}/Branch_1/Conv2d_0b_3x3", outdir=outdir)
dump_conv2d(sess, f"{name}/Branch_1/Conv2d_1a_1x1", outdir=outdir)
def dump_mixed_6b(sess, name='Mixed_6b', outdir='./dump'):
dump_conv2d(sess, f"{name}/Branch_0/Conv2d_0a_1x1", outdir=outdir)
dump_conv2d(sess, f"{name}/Branch_1/Conv2d_0a_1x1", outdir=outdir)
dump_conv2d(sess, f"{name}/Branch_1/Conv2d_0b_1x7", outdir=outdir)
dump_conv2d(sess, f"{name}/Branch_1/Conv2d_0c_7x1", outdir=outdir)
dump_conv2d(sess, f"{name}/Branch_2/Conv2d_0a_1x1", outdir=outdir)
dump_conv2d(sess, f"{name}/Branch_2/Conv2d_0b_7x1", outdir=outdir)
dump_conv2d(sess, f"{name}/Branch_2/Conv2d_0c_1x7", outdir=outdir)
dump_conv2d(sess, f"{name}/Branch_2/Conv2d_0d_7x1", outdir=outdir)
dump_conv2d(sess, f"{name}/Branch_2/Conv2d_0e_1x7", outdir=outdir)
dump_conv2d(sess, f"{name}/Branch_3/Conv2d_0b_1x1", outdir=outdir)
def dump_mixed_6c(sess, name='Mixed_6c', outdir='./dump'):
dump_mixed_6b(sess, name=name, outdir=outdir)
def dump_mixed_6d(sess, name='Mixed_6d', outdir='./dump'):
dump_mixed_6b(sess, name=name, outdir=outdir)
def dump_mixed_6e(sess, name='Mixed_6e', outdir='./dump'):
dump_mixed_6b(sess, name=name, outdir=outdir)
def dump_mixed_7a(sess, name='Mixed_7a', outdir='./dump'):
dump_conv2d(sess, f"{name}/Branch_0/Conv2d_0a_1x1", outdir=outdir)
dump_conv2d(sess, f"{name}/Branch_0/Conv2d_1a_3x3", outdir=outdir)
dump_conv2d(sess, f"{name}/Branch_1/Conv2d_0a_1x1", outdir=outdir)
dump_conv2d(sess, f"{name}/Branch_1/Conv2d_0b_1x7", outdir=outdir)
dump_conv2d(sess, f"{name}/Branch_1/Conv2d_0c_7x1", outdir=outdir)
dump_conv2d(sess, f"{name}/Branch_1/Conv2d_1a_3x3", outdir=outdir)
def dump_mixed_7b(sess, name='Mixed_7b', outdir='./dump'):
dump_conv2d(sess, f"{name}/Branch_0/Conv2d_0a_1x1", outdir=outdir)
dump_conv2d(sess, f"{name}/Branch_1/Conv2d_0a_1x1", outdir=outdir)
dump_conv2d(sess, f"{name}/Branch_1/Conv2d_0b_1x3", outdir=outdir)
dump_conv2d(sess, f"{name}/Branch_1/Conv2d_0b_3x1", outdir=outdir)
dump_conv2d(sess, f"{name}/Branch_2/Conv2d_0a_1x1", outdir=outdir)
dump_conv2d(sess, f"{name}/Branch_2/Conv2d_0b_3x3", outdir=outdir)
dump_conv2d(sess, f"{name}/Branch_2/Conv2d_0c_1x3", outdir=outdir)
dump_conv2d(sess, f"{name}/Branch_2/Conv2d_0d_3x1", outdir=outdir)
dump_conv2d(sess, f"{name}/Branch_3/Conv2d_0b_1x1", outdir=outdir)
def dump_mixed_7c(sess, name='Mixed_7c', outdir='./dump'):
dump_conv2d(sess, f"{name}/Branch_0/Conv2d_0a_1x1", outdir=outdir)
dump_conv2d(sess, f"{name}/Branch_1/Conv2d_0a_1x1", outdir=outdir)
dump_conv2d(sess, f"{name}/Branch_1/Conv2d_0b_1x3", outdir=outdir)
dump_conv2d(sess, f"{name}/Branch_1/Conv2d_0c_3x1", outdir=outdir)
dump_conv2d(sess, f"{name}/Branch_2/Conv2d_0a_1x1", outdir=outdir)
dump_conv2d(sess, f"{name}/Branch_2/Conv2d_0b_3x3", outdir=outdir)
dump_conv2d(sess, f"{name}/Branch_2/Conv2d_0c_1x3", outdir=outdir)
dump_conv2d(sess, f"{name}/Branch_2/Conv2d_0d_3x1", outdir=outdir)
dump_conv2d(sess, f"{name}/Branch_3/Conv2d_0b_1x1", outdir=outdir)
def dump_aux_logits(sess, outdir='./dump'):
dump_conv2d_logit(sess, "AuxLogits/Conv2d_1b_1x1", outdir=outdir)
dump_conv2d_logit(sess, "AuxLogits/Conv2d_2a_5x5", outdir=outdir)
weights_tensor = sess.graph.get_tensor_by_name(
'InceptionV3/AuxLogits/Conv2d_2b_1x1/weights:0')
weights = weights_tensor.eval()
biases_tensor = sess.graph.get_tensor_by_name(
'InceptionV3/AuxLogits/Conv2d_2b_1x1/biases:0')
biases = biases_tensor.eval()
store_path = get_store_path(outdir, 'AuxLogits')
if not store_path.parent.exists():
store_path.parent.mkdir(parents=True)
with h5py.File(str(store_path), 'w') as h5f:
h5f.create_dataset("weights", data=weights)
h5f.create_dataset("biases", data=biases)
def _assign_from_checkpoint(sess, checkpoint):
init_fn = slim.assign_from_checkpoint_fn(
checkpoint,
slim.get_model_variables('InceptionV3'))
init_fn(sess)
def show_all_variables():
for v in slim.get_model_variables():
print(v.name, v.get_shape())
def dump_all(sess, logits, outdir):
tf.summary.scalar('logs', logits[0][0])
summary_op = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter("logs", sess.graph)
# run for comparing output values later
out = sess.run(summary_op)
summary_writer.add_summary(out, 0)
dump_logits(sess, outdir=outdir)
dump_conv2d(sess, name='Conv2d_1a_3x3', outdir=outdir)
dump_conv2d(sess, name='Conv2d_2a_3x3', outdir=outdir)
dump_conv2d(sess, name='Conv2d_2b_3x3', outdir=outdir)
dump_conv2d(sess, name='Conv2d_3b_1x1', outdir=outdir)
dump_conv2d(sess, name='Conv2d_4a_3x3', outdir=outdir)
dump_mixed_5b(sess, outdir=outdir)
dump_mixed_5c(sess, outdir=outdir)
dump_mixed_5d(sess, outdir=outdir)
dump_mixed_6a(sess, outdir=outdir)
dump_mixed_6b(sess, outdir=outdir)
dump_mixed_6c(sess, outdir=outdir)
dump_mixed_6d(sess, outdir=outdir)
dump_mixed_6e(sess, outdir=outdir)
dump_mixed_7a(sess, outdir=outdir)
dump_mixed_7b(sess, outdir=outdir)
dump_mixed_7c(sess, outdir=outdir)
dump_aux_logits(sess, outdir=outdir)
def dump_proc(checkpoint, outdir, verbose):
with tf.Graph().as_default(),\
slim.arg_scope(inception_v3_arg_scope()):
img = scipy.misc.imread('./000b7d55b6184b08.png')
inputs = np.ones((1, 299, 299, 3), dtype=np.float32)
inputs[0] = (img / 255.0) * 2 - 1.0
inputs = tf.stack(inputs)
logits, _ = inception_v3(inputs,
num_classes=1001,
is_training=False)
with tf.Session() as sess:
_assign_from_checkpoint(sess, checkpoint)
if verbose > 0:
show_all_variables()
dump_all(sess, logits, outdir)
def load_conv2d(state, outdir, name, path):
store_path = str(
Path(outdir) /
Path("dump") /
Path("{}.h5".format(path)))
with h5py.File(store_path, 'r') as h5f:
state[f'{name}.conv.weight'] = torch.from_numpy(
h5f['weights'][()]).permute(3, 2, 0, 1)
out_planes = state['{}.conv.weight'.format(name)].size(0)
state[f'{name}.bn.weight'] = torch.ones(out_planes)
state[f'{name}.bn.bias'] = torch.from_numpy(h5f['beta'][()])
state[f'{name}.bn.running_mean'] = torch.from_numpy(h5f['mean'][()])
state[f'{name}.bn.running_var'] = torch.from_numpy(h5f['var'][()])
def load_aux_logits(state, outdir):
load_conv2d(state, outdir, 'AuxLogits.conv0', 'AuxLogits/Conv2d_1b_1x1')
load_conv2d(state, outdir, 'AuxLogits.conv1', 'AuxLogits/Conv2d_2a_5x5')
store_path = str(
Path(outdir) /
Path("dump") /
Path("AuxLogits.h5"))
with h5py.File(store_path, 'r') as h5f:
state['AuxLogits.fc.bias'] = torch.from_numpy(h5f['biases'][()])
state['AuxLogits.fc.weight'] = torch.from_numpy(h5f['weights'][()])
store_path = str(
Path(outdir) /
Path("dump") /
Path("Logits/Conv2d_1c_1x1.h5"))
with h5py.File(store_path, 'r') as h5f:
state["fc.weight"] = torch.from_numpy(
h5f['weights'][()]).permute(3, 2, 0, 1)
state["fc.bias"] = torch.from_numpy(h5f['biases'][()])
def load_mixed_5b(state, outdir, name):
name_path_pairs = [
# Branch 0
(f'{name}.branch1x1', f'{name}/Branch_0/Conv2d_0a_1x1'),
# Branch 1
(f'{name}.branch5x5_1', f'{name}/Branch_1/Conv2d_0a_1x1'),
(f'{name}.branch5x5_2', f'{name}/Branch_1/Conv2d_0b_5x5'),
# Branch 2
(f'{name}.branch3x3dbl_1', f'{name}/Branch_2/Conv2d_0a_1x1'),
(f'{name}.branch3x3dbl_2', f'{name}/Branch_2/Conv2d_0b_3x3'),
(f'{name}.branch3x3dbl_3', f'{name}/Branch_2/Conv2d_0c_3x3'),
# Branch 3
(f'{name}.branch_pool', f'{name}/Branch_3/Conv2d_0b_1x1'),
]
for name_, path_ in name_path_pairs:
load_conv2d(state, outdir, name_, path_)
def load_mixed_5c(state, outdir, name):
name_path_pairs = [
(f'{name}.branch1x1', f'{name}/Branch_0/Conv2d_0a_1x1'),
(f'{name}.branch5x5_1', f'{name}/Branch_1/Conv2d_0b_1x1'),
(f'{name}.branch5x5_2', f'{name}/Branch_1/Conv_1_0c_5x5'),
(f'{name}.branch3x3dbl_1', f'{name}/Branch_2/Conv2d_0a_1x1'),
(f'{name}.branch3x3dbl_2', f'{name}/Branch_2/Conv2d_0b_3x3'),
(f'{name}.branch3x3dbl_3', f'{name}/Branch_2/Conv2d_0c_3x3'),
(f'{name}.branch_pool', f'{name}/Branch_3/Conv2d_0b_1x1'),
]
for name_, path_ in name_path_pairs:
load_conv2d(state, outdir, name_, path_)
def load_mixed_5d(state, outdir, name):
name_path_pairs = [
(f'{name}.branch1x1', f'{name}/Branch_0/Conv2d_0a_1x1'),
(f'{name}.branch5x5_1', f'{name}/Branch_1/Conv2d_0a_1x1'),
(f'{name}.branch5x5_2', f'{name}/Branch_1/Conv2d_0b_5x5'),
(f'{name}.branch3x3dbl_1', f'{name}/Branch_2/Conv2d_0a_1x1'),
(f'{name}.branch3x3dbl_2', f'{name}/Branch_2/Conv2d_0b_3x3'),
(f'{name}.branch3x3dbl_3', f'{name}/Branch_2/Conv2d_0c_3x3'),
(f'{name}.branch_pool', f'{name}/Branch_3/Conv2d_0b_1x1'),
]
for name_, path_ in name_path_pairs:
load_conv2d(state, outdir, name_, path_)
def load_mixed_6a(state, outdir, name):
name_path_pairs = [
(f'{name}.branch3x3', f'{name}/Branch_0/Conv2d_1a_1x1'),
(f'{name}.branch3x3dbl_1', f'{name}/Branch_1/Conv2d_0a_1x1'),
(f'{name}.branch3x3dbl_2', f'{name}/Branch_1/Conv2d_0b_3x3'),
(f'{name}.branch3x3dbl_3', f'{name}/Branch_1/Conv2d_1a_1x1'),
]
for name_, path_ in name_path_pairs:
load_conv2d(state, outdir, name_, path_)
def load_mixed_6b(state, outdir, name):
name_path_pairs = [
(f'{name}.branch1x1', f'{name}/Branch_0/Conv2d_0a_1x1'),
(f'{name}.branch7x7_1', f'{name}/Branch_1/Conv2d_0a_1x1'),
(f'{name}.branch7x7_2', f'{name}/Branch_1/Conv2d_0b_1x7'),
(f'{name}.branch7x7_3', f'{name}/Branch_1/Conv2d_0c_7x1'),
(f'{name}.branch7x7dbl_1', f'{name}/Branch_2/Conv2d_0a_1x1'),
(f'{name}.branch7x7dbl_2', f'{name}/Branch_2/Conv2d_0b_7x1'),
(f'{name}.branch7x7dbl_3', f'{name}/Branch_2/Conv2d_0c_1x7'),
(f'{name}.branch7x7dbl_4', f'{name}/Branch_2/Conv2d_0d_7x1'),
(f'{name}.branch7x7dbl_5', f'{name}/Branch_2/Conv2d_0e_1x7'),
(f'{name}.branch_pool', f'{name}/Branch_3/Conv2d_0b_1x1'),
]
for name_, path_ in name_path_pairs:
load_conv2d(state, outdir, name_, path_)
def load_mixed_6c(state, outdir, name):
load_mixed_6b(state, outdir, name)
def load_mixed_6d(state, outdir, name):
load_mixed_6b(state, outdir, name)
def load_mixed_6e(state, outdir, name):
load_mixed_6b(state, outdir, name)
def load_mixed_7a(state, outdir, name):
name_path_pairs = [
(f'{name}.branch3x3_1', f'{name}/Branch_0/Conv2d_0a_1x1'),
(f'{name}.branch3x3_2', f'{name}/Branch_0/Conv2d_1a_3x3'),
(f'{name}.branch7x7x3_1', f'{name}/Branch_1/Conv2d_0a_1x1'),
(f'{name}.branch7x7x3_2', f'{name}/Branch_1/Conv2d_0b_1x7'),
(f'{name}.branch7x7x3_3', f'{name}/Branch_1/Conv2d_0c_7x1'),
(f'{name}.branch7x7x3_4', f'{name}/Branch_1/Conv2d_1a_3x3'),
]
for name_, path_ in name_path_pairs:
load_conv2d(state, outdir, name_, path_)
def load_mixed_7b(state, outdir, name):
name_path_pairs = [
(f'{name}.branch1x1', f'{name}/Branch_0/Conv2d_0a_1x1'),
(f'{name}.branch3x3_1', f'{name}/Branch_1/Conv2d_0a_1x1'),
(f'{name}.branch3x3_2a', f'{name}/Branch_1/Conv2d_0b_1x3'),
(f'{name}.branch3x3_2b', f'{name}/Branch_1/Conv2d_0b_3x1'),
(f'{name}.branch3x3dbl_1', f'{name}/Branch_2/Conv2d_0a_1x1'),
(f'{name}.branch3x3dbl_2', f'{name}/Branch_2/Conv2d_0b_3x3'),
(f'{name}.branch3x3dbl_3a', f'{name}/Branch_2/Conv2d_0c_1x3'),
(f'{name}.branch3x3dbl_3b', f'{name}/Branch_2/Conv2d_0d_3x1'),
(f'{name}.branch_pool', f'{name}/Branch_3/Conv2d_0b_1x1'),
]
for name_, path_ in name_path_pairs:
load_conv2d(state, outdir, name_, path_)
def load_mixed_7c(state, outdir, name):
name_path_pairs = [
(f'{name}.branch1x1', f'{name}/Branch_0/Conv2d_0a_1x1'),
(f'{name}.branch3x3_1', f'{name}/Branch_1/Conv2d_0a_1x1'),
(f'{name}.branch3x3_2a', f'{name}/Branch_1/Conv2d_0b_1x3'),
(f'{name}.branch3x3_2b', f'{name}/Branch_1/Conv2d_0c_3x1'),
(f'{name}.branch3x3dbl_1', f'{name}/Branch_2/Conv2d_0a_1x1'),
(f'{name}.branch3x3dbl_2', f'{name}/Branch_2/Conv2d_0b_3x3'),
(f'{name}.branch3x3dbl_3a', f'{name}/Branch_2/Conv2d_0c_1x3'),
(f'{name}.branch3x3dbl_3b', f'{name}/Branch_2/Conv2d_0d_3x1'),
(f'{name}.branch_pool', f'{name}/Branch_3/Conv2d_0b_1x1'),
]
for name_, path_ in name_path_pairs:
load_conv2d(state, outdir, name_, path_)
def load_state_dict_from_h5py(outdir):
state = {}
load_conv2d(state, outdir, 'Conv2d_1a_3x3', 'Conv2d_1a_3x3')
load_conv2d(state, outdir, 'Conv2d_2a_3x3', 'Conv2d_2a_3x3')
load_conv2d(state, outdir, 'Conv2d_2b_3x3', 'Conv2d_2b_3x3')
load_conv2d(state, outdir, 'Conv2d_3b_1x1', 'Conv2d_3b_1x1')
load_conv2d(state, outdir, 'Conv2d_4a_3x3', 'Conv2d_4a_3x3')
load_mixed_5b(state, outdir, 'Mixed_5b')
load_mixed_5c(state, outdir, 'Mixed_5c')
load_mixed_5d(state, outdir, 'Mixed_5d')
load_mixed_6a(state, outdir, 'Mixed_6a')
load_mixed_6b(state, outdir, 'Mixed_6b')
load_mixed_6c(state, outdir, 'Mixed_6c')
load_mixed_6d(state, outdir, 'Mixed_6d')
load_mixed_6e(state, outdir, 'Mixed_6e')
load_mixed_7a(state, outdir, 'Mixed_7a')
load_mixed_7b(state, outdir, 'Mixed_7b')
load_mixed_7c(state, outdir, 'Mixed_7c')
load_aux_logits(state, outdir)
return state
def load_proc(outdir, export_path, verbose):
model = inception_v3_fullconv.inception_v3(
num_classes=1001,
pretrained=False)
state_dict = load_state_dict_from_h5py(outdir)
model.load_state_dict(state_dict)
model.eval()
torch.save(state_dict, export_path)
return model
def test_conv2d(outdir, module, path):
store_path = str(
Path(outdir) /
Path("dump") /
Path(f"{path}.h5"))
with h5py.File(store_path, 'r') as h5f:
output_tf_conv = torch.from_numpy(h5f['conv_out'][()])
output_tf_conv.transpose_(1, 3)
output_tf_conv.transpose_(2, 3)
output_tf_relu = torch.from_numpy(h5f['relu_out'][()])
output_tf_relu.transpose_(1, 3)
output_tf_relu.transpose_(2, 3)
def test_dist_conv(self, input, output):
dist = torch.dist(output.data, output_tf_conv)
assert dist < TEST_THRESHOLD
def test_dist_relu(self, input, output):
dist = torch.dist(output.data, output_tf_relu)
assert dist < TEST_THRESHOLD
module.conv.register_forward_hook(test_dist_conv)
module.relu.register_forward_hook(test_dist_relu)
def test_conv2d_nobn(outdir, module, path):
store_path = str(
Path(outdir) /
Path("dump") /
Path(f"{path}.h5"))
with h5py.File(store_path, 'r') as h5f:
output_tf = torch.from_numpy(h5f['conv_out'][()])
output_tf.transpose_(1, 3)
output_tf.transpose_(2, 3)
def test_dist(self, input, output):
dist = torch.dist(output.data, output_tf)
assert dist < TEST_THRESHOLD
module.register_forward_hook(test_dist)
def _register_forward_hook(outdir, model):
test_conv2d(outdir, model.Conv2d_1a_3x3, 'Conv2d_1a_3x3')
test_conv2d(outdir, model.Conv2d_2a_3x3, 'Conv2d_2a_3x3')
test_conv2d(outdir, model.Conv2d_2b_3x3, 'Conv2d_2b_3x3')
test_conv2d(outdir, model.Conv2d_3b_1x1, 'Conv2d_3b_1x1')
test_conv2d(outdir, model.Conv2d_4a_3x3, 'Conv2d_4a_3x3')
test_conv2d(outdir, model.Mixed_5b.branch1x1,
'Mixed_5b/Branch_0/Conv2d_0a_1x1')
test_conv2d(outdir, model.Mixed_5b.branch5x5_1,
'Mixed_5b/Branch_1/Conv2d_0a_1x1')
test_conv2d(outdir, model.Mixed_5b.branch5x5_2,
'Mixed_5b/Branch_1/Conv2d_0b_5x5')
test_conv2d(outdir, model.Mixed_5b.branch3x3dbl_1,
'Mixed_5b/Branch_2/Conv2d_0a_1x1')
test_conv2d(outdir, model.Mixed_5b.branch3x3dbl_2,
'Mixed_5b/Branch_2/Conv2d_0b_3x3')
test_conv2d(outdir, model.Mixed_5b.branch3x3dbl_3,
'Mixed_5b/Branch_2/Conv2d_0c_3x3')
test_conv2d(outdir, model.Mixed_5b.branch_pool,
'Mixed_5b/Branch_3/Conv2d_0b_1x1')
test_conv2d(outdir, model.Mixed_5c.branch1x1,
'Mixed_5c/Branch_0/Conv2d_0a_1x1')
test_conv2d(outdir, model.Mixed_5c.branch5x5_1,
'Mixed_5c/Branch_1/Conv2d_0b_1x1')
test_conv2d(outdir, model.Mixed_5c.branch5x5_2,
'Mixed_5c/Branch_1/Conv_1_0c_5x5')
test_conv2d(outdir, model.Mixed_5c.branch3x3dbl_1,
'Mixed_5c/Branch_2/Conv2d_0a_1x1')
test_conv2d(outdir, model.Mixed_5c.branch3x3dbl_2,
'Mixed_5c/Branch_2/Conv2d_0b_3x3')
test_conv2d(outdir, model.Mixed_5c.branch3x3dbl_3,
'Mixed_5c/Branch_2/Conv2d_0c_3x3')
test_conv2d(outdir, model.Mixed_5c.branch_pool,
'Mixed_5c/Branch_3/Conv2d_0b_1x1')
test_conv2d(outdir, model.Mixed_6b.branch7x7_1,
'Mixed_6b/Branch_1/Conv2d_0a_1x1')
test_conv2d(outdir, model.Mixed_6b.branch7x7_2,
'Mixed_6b/Branch_1/Conv2d_0b_1x7')
test_conv2d(outdir, model.Mixed_6b.branch7x7_3,
'Mixed_6b/Branch_1/Conv2d_0c_7x1')
# 7a
test_conv2d(outdir, model.Mixed_7a.branch3x3_1,
'Mixed_7a/Branch_0/Conv2d_0a_1x1')
test_conv2d(outdir, model.Mixed_7a.branch3x3_2,
'Mixed_7a/Branch_0/Conv2d_1a_3x3')
# 7b
test_conv2d(outdir, model.Mixed_7b.branch3x3_1,
'Mixed_7b/Branch_1/Conv2d_0a_1x1')
test_conv2d(outdir, model.Mixed_7b.branch3x3_2a,
'Mixed_7b/Branch_1/Conv2d_0b_1x3')
test_conv2d(outdir, model.Mixed_7b.branch3x3_2b,
'Mixed_7b/Branch_1/Conv2d_0b_3x1')
# 7c
test_conv2d(outdir, model.Mixed_7c.branch3x3_1,
'Mixed_7c/Branch_1/Conv2d_0a_1x1')
test_conv2d(outdir, model.Mixed_7c.branch3x3_2a,
'Mixed_7c/Branch_1/Conv2d_0b_1x3')
test_conv2d(outdir, model.Mixed_7c.branch3x3_2b,
'Mixed_7c/Branch_1/Conv2d_0c_3x1')
test_conv2d_nobn(outdir, model.fc, 'Logits/Conv2d_1c_1x1')
def run_test(outdir, model):
_register_forward_hook(outdir, model)
img = scipy.misc.imread('./000b7d55b6184b08.png')
inputs = np.ones((1, 299, 299, 3), dtype=np.float32)
inputs[0] = img.astype(np.float32) / 255.0
inputs = torch.from_numpy(inputs)
inputs = inputs * 2 - 1.0
inputs = inputs.permute(0, 3, 1, 2)
outputs = model.forward(torch.autograd.Variable(inputs))
print("OK")
@click.command()
@click.option('--checkpoint',
help='checkpoint file',
default='./ens3_adv_inception_v3.ckpt')
@click.option('--outdir',
help='output directory',
default='./dump')
@click.option('--export-path',
default='../working_files/ens3incepv3_fullconv_state.pth')
@click.option('-v', '--verbose', count=True)
def main(checkpoint, outdir, export_path, verbose):
dump_proc(checkpoint, outdir, verbose)
model = load_proc(outdir, export_path, verbose)
if verbose > 0:
run_test(outdir, model)
if __name__ == '__main__':
main()
| apache-2.0 | 3,584,438,041,364,404,000 | 37.477273 | 76 | 0.628138 | false |
makielab/sea-cucumber | seacucumber/tasks.py | 1 | 3306 | """
Supporting celery tasks go in this module. The primarily interesting one is
SendEmailTask, which handles sending a single Django EmailMessage object.
"""
import logging
from django.conf import settings
from celery.task import Task
from boto.ses.exceptions import SESAddressBlacklistedError, SESDomainEndsWithDotError
from seacucumber.util import get_boto_ses_connection, dkim_sign
from seacucumber import FORCE_UNICODE
logger = logging.getLogger(__name__)
class SendEmailTask(Task):
"""
Sends an email through Boto's SES API module.
"""
def __init__(self):
self.max_retries = getattr(settings, 'CUCUMBER_MAX_RETRIES', 60)
self.default_retry_delay = getattr(settings, 'CUCUMBER_RETRY_DELAY', 60)
self.rate_limit = getattr(settings, 'CUCUMBER_RATE_LIMIT', 1)
# A boto.ses.SESConnection object, after running _open_ses_conn().
self.connection = None
def run(self, from_email, recipients, message):
"""
This does the dirty work. Connects to Amazon SES via boto and fires
off the message.
:param str from_email: The email address the message will show as
originating from.
:param list recipients: A list of email addresses to send the
message to.
:param str message: The body of the message.
"""
self._open_ses_conn()
try:
# We use the send_raw_email func here because the Django
# EmailMessage object we got these values from constructs all of
# the headers and such.
signed_msg = dkim_sign(message)
if FORCE_UNICODE:
signed_msg = unicode(signed_msg, 'utf-8')
self.connection.send_raw_email(
source=from_email,
destinations=recipients,
raw_message=signed_msg,
)
except SESAddressBlacklistedError, exc:
# Blacklisted users are those which delivery failed for in the
# last 24 hours. They'll eventually be automatically removed from
# the blacklist, but for now, this address is marked as
# undeliverable to.
logger.warning(
'Attempted to email a blacklisted user: %s' % recipients,
exc_info=exc,
extra={'trace': True}
)
return False
except SESDomainEndsWithDotError, exc:
# Domains ending in a dot are simply invalid.
logger.warning(
'Invalid recipient, ending in dot: %s' % recipients,
exc_info=exc,
extra={'trace': True}
)
return False
except Exception, exc:
# Something else happened that we haven't explicitly forbade
# retry attempts for.
#noinspection PyUnresolvedReferences
self.retry(exc=exc)
# We shouldn't ever block long enough to see this, but here it is
# just in case (for debugging?).
return True
def _open_ses_conn(self):
"""
Create a connection to the AWS API server. This can be reused for
sending multiple emails.
"""
if self.connection:
return
self.connection = get_boto_ses_connection()
| mit | 261,616,636,548,345,380 | 37.44186 | 85 | 0.609498 | false |
cpcloud/numpy | numpy/lib/function_base.py | 2 | 123894 | from __future__ import division, absolute_import, print_function
import warnings
import sys
import collections
import operator
import numpy as np
import numpy.core.numeric as _nx
from numpy.core import linspace, atleast_1d, atleast_2d
from numpy.core.numeric import (
ones, zeros, arange, concatenate, array, asarray, asanyarray, empty,
empty_like, ndarray, around, floor, ceil, take, dot, where, intp,
integer, isscalar
)
from numpy.core.umath import (
pi, multiply, add, arctan2, frompyfunc, cos, less_equal, sqrt, sin,
mod, exp, log10
)
from numpy.core.fromnumeric import (
ravel, nonzero, sort, partition, mean
)
from numpy.core.numerictypes import typecodes, number
from numpy.lib.twodim_base import diag
from .utils import deprecate
from ._compiled_base import _insert, add_docstring
from ._compiled_base import digitize, bincount, interp as compiled_interp
from ._compiled_base import add_newdoc_ufunc
from numpy.compat import long
# Force range to be a generator, for np.delete's usage.
if sys.version_info[0] < 3:
range = xrange
__all__ = [
'select', 'piecewise', 'trim_zeros', 'copy', 'iterable', 'percentile',
'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'disp',
'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average',
'histogram', 'histogramdd', 'bincount', 'digitize', 'cov', 'corrcoef',
'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett',
'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring',
'meshgrid', 'delete', 'insert', 'append', 'interp', 'add_newdoc_ufunc'
]
def iterable(y):
"""
Check whether or not an object can be iterated over.
Parameters
----------
y : object
Input object.
Returns
-------
b : {0, 1}
Return 1 if the object has an iterator method or is a sequence,
and 0 otherwise.
Examples
--------
>>> np.iterable([1, 2, 3])
1
>>> np.iterable(2)
0
"""
try:
iter(y)
except:
return 0
return 1
def histogram(a, bins=10, range=None, normed=False, weights=None,
density=None):
"""
Compute the histogram of a set of data.
Parameters
----------
a : array_like
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a sequence,
it defines the bin edges, including the rightmost edge, allowing
for non-uniform bin widths.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored.
normed : bool, optional
This keyword is deprecated in Numpy 1.6 due to confusing/buggy
behavior. It will be removed in Numpy 2.0. Use the density keyword
instead.
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that this latter behavior is
known to be buggy with unequal bin widths; use `density` instead.
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in `a`
only contributes its associated weight towards the bin count
(instead of 1). If `normed` is True, the weights are normalized,
so that the integral of the density over the range remains 1
density : bool, optional
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability *mass* function.
Overrides the `normed` keyword if given.
Returns
-------
hist : array
The values of the histogram. See `normed` and `weights` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
See Also
--------
histogramdd, bincount, searchsorted, digitize
Notes
-----
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is::
[1, 2, 3, 4]
then the first bin is ``[1, 2)`` (including 1, but excluding 2) and the
second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which *includes*
4.
Examples
--------
>>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])
(array([0, 2, 1]), array([0, 1, 2, 3]))
>>> np.histogram(np.arange(4), bins=np.arange(5), density=True)
(array([ 0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))
>>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])
(array([1, 4, 1]), array([0, 1, 2, 3]))
>>> a = np.arange(5)
>>> hist, bin_edges = np.histogram(a, density=True)
>>> hist
array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])
>>> hist.sum()
2.4999999999999996
>>> np.sum(hist*np.diff(bin_edges))
1.0
"""
a = asarray(a)
if weights is not None:
weights = asarray(weights)
if np.any(weights.shape != a.shape):
raise ValueError(
'weights should have the same shape as a.')
weights = weights.ravel()
a = a.ravel()
if (range is not None):
mn, mx = range
if (mn > mx):
raise AttributeError(
'max must be larger than min in range parameter.')
if not iterable(bins):
if np.isscalar(bins) and bins < 1:
raise ValueError(
'`bins` should be a positive integer.')
if range is None:
if a.size == 0:
# handle empty arrays. Can't determine range, so use 0-1.
range = (0, 1)
else:
range = (a.min(), a.max())
mn, mx = [mi + 0.0 for mi in range]
if mn == mx:
mn -= 0.5
mx += 0.5
bins = linspace(mn, mx, bins + 1, endpoint=True)
else:
bins = asarray(bins)
if (np.diff(bins) < 0).any():
raise AttributeError(
'bins must increase monotonically.')
# Histogram is an integer or a float array depending on the weights.
if weights is None:
ntype = int
else:
ntype = weights.dtype
n = np.zeros(bins.shape, ntype)
block = 65536
if weights is None:
for i in arange(0, len(a), block):
sa = sort(a[i:i+block])
n += np.r_[sa.searchsorted(bins[:-1], 'left'),
sa.searchsorted(bins[-1], 'right')]
else:
zero = array(0, dtype=ntype)
for i in arange(0, len(a), block):
tmp_a = a[i:i+block]
tmp_w = weights[i:i+block]
sorting_index = np.argsort(tmp_a)
sa = tmp_a[sorting_index]
sw = tmp_w[sorting_index]
cw = np.concatenate(([zero, ], sw.cumsum()))
bin_index = np.r_[sa.searchsorted(bins[:-1], 'left'),
sa.searchsorted(bins[-1], 'right')]
n += cw[bin_index]
n = np.diff(n)
if density is not None:
if density:
db = array(np.diff(bins), float)
return n/db/n.sum(), bins
else:
return n, bins
else:
# deprecated, buggy behavior. Remove for Numpy 2.0
if normed:
db = array(np.diff(bins), float)
return n/(n*db).sum(), bins
else:
return n, bins
def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
"""
Compute the multidimensional histogram of some data.
Parameters
----------
sample : array_like
The data to be histogrammed. It must be an (N,D) array or data
that can be converted to such. The rows of the resulting array
are the coordinates of points in a D dimensional polytope.
bins : sequence or int, optional
The bin specification:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... =bins)
* The number of bins for all dimensions (nx=ny=...=bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitly in `bins`. Defaults to the minimum and maximum
values along each dimension.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_volume``.
weights : array_like (N,), optional
An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`.
Weights are normalized to 1 if normed is True. If normed is False,
the values of the returned histogram are equal to the sum of the
weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray
The multidimensional histogram of sample x. See normed and weights
for the different possible semantics.
edges : list
A list of D arrays describing the bin edges for each dimension.
See Also
--------
histogram: 1-D histogram
histogram2d: 2-D histogram
Examples
--------
>>> r = np.random.randn(100,3)
>>> H, edges = np.histogramdd(r, bins = (5, 8, 4))
>>> H.shape, edges[0].size, edges[1].size, edges[2].size
((5, 8, 4), 6, 9, 5)
"""
try:
# Sample is an ND-array.
N, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = atleast_2d(sample).T
N, D = sample.shape
nbin = empty(D, int)
edges = D*[None]
dedges = D*[None]
if weights is not None:
weights = asarray(weights)
try:
M = len(bins)
if M != D:
raise AttributeError(
'The dimension of bins must be equal to the dimension of the '
' sample x.')
except TypeError:
# bins is an integer
bins = D*[bins]
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
# Handle empty input. Range can't be determined in that case, use 0-1.
if N == 0:
smin = zeros(D)
smax = ones(D)
else:
smin = atleast_1d(array(sample.min(0), float))
smax = atleast_1d(array(sample.max(0), float))
else:
smin = zeros(D)
smax = zeros(D)
for i in arange(D):
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in arange(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - .5
smax[i] = smax[i] + .5
# avoid rounding issues for comparisons when dealing with inexact types
if np.issubdtype(sample.dtype, np.inexact):
edge_dt = sample.dtype
else:
edge_dt = float
# Create edge arrays
for i in arange(D):
if isscalar(bins[i]):
if bins[i] < 1:
raise ValueError(
"Element at index %s in `bins` should be a positive "
"integer." % i)
nbin[i] = bins[i] + 2 # +2 for outlier bins
edges[i] = linspace(smin[i], smax[i], nbin[i]-1, dtype=edge_dt)
else:
edges[i] = asarray(bins[i], edge_dt)
nbin[i] = len(edges[i]) + 1 # +1 for outlier bins
dedges[i] = diff(edges[i])
if np.any(np.asarray(dedges[i]) <= 0):
raise ValueError(
"Found bin edge of size <= 0. Did you specify `bins` with"
"non-monotonic sequence?")
nbin = asarray(nbin)
# Handle empty input.
if N == 0:
return np.zeros(nbin-2), edges
# Compute the bin number each sample falls into.
Ncount = {}
for i in arange(D):
Ncount[i] = digitize(sample[:, i], edges[i])
# Using digitize, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right edge to be
# counted in the last bin, and not as an outlier.
for i in arange(D):
# Rounding precision
mindiff = dedges[i].min()
if not np.isinf(mindiff):
decimal = int(-log10(mindiff)) + 6
# Find which points are on the rightmost edge.
not_smaller_than_edge = (sample[:, i] >= edges[i][-1])
on_edge = (around(sample[:, i], decimal) ==
around(edges[i][-1], decimal))
# Shift these points one bin to the left.
Ncount[i][where(on_edge & not_smaller_than_edge)[0]] -= 1
# Flattened histogram matrix (1D)
# Reshape is used so that overlarge arrays
# will raise an error.
hist = zeros(nbin, float).reshape(-1)
# Compute the sample indices in the flattened histogram matrix.
ni = nbin.argsort()
xy = zeros(N, int)
for i in arange(0, D-1):
xy += Ncount[ni[i]] * nbin[ni[i+1:]].prod()
xy += Ncount[ni[-1]]
# Compute the number of repetitions in xy and assign it to the
# flattened histmat.
if len(xy) == 0:
return zeros(nbin-2, int), edges
flatcount = bincount(xy, weights)
a = arange(len(flatcount))
hist[a] = flatcount
# Shape into a proper matrix
hist = hist.reshape(sort(nbin))
for i in arange(nbin.size):
j = ni.argsort()[i]
hist = hist.swapaxes(i, j)
ni[i], ni[j] = ni[j], ni[i]
# Remove outliers (indices 0 and -1 for each dimension).
core = D*[slice(1, -1)]
hist = hist[core]
# Normalize if normed is True
if normed:
s = hist.sum()
for i in arange(D):
shape = ones(D, int)
shape[i] = nbin[i] - 2
hist = hist / dedges[i].reshape(shape)
hist /= s
if (hist.shape != nbin - 2).any():
raise RuntimeError(
"Internal Shape Error")
return hist, edges
def average(a, axis=None, weights=None, returned=False):
"""
Compute the weighted average along the specified axis.
Parameters
----------
a : array_like
Array containing data to be averaged. If `a` is not an array, a
conversion is attempted.
axis : int, optional
Axis along which to average `a`. If `None`, averaging is done over
the flattened array.
weights : array_like, optional
An array of weights associated with the values in `a`. Each value in
`a` contributes to the average according to its associated weight.
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given axis) or of the same shape as `a`.
If `weights=None`, then all data in `a` are assumed to have a
weight equal to one.
returned : bool, optional
Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`)
is returned, otherwise only the average is returned.
If `weights=None`, `sum_of_weights` is equivalent to the number of
elements over which the average is taken.
Returns
-------
average, [sum_of_weights] : {array_type, double}
Return the average along the specified axis. When returned is `True`,
return a tuple with the average as the first element and the sum
of the weights as the second element. The return type is `Float`
if `a` is of integer type, otherwise it is of the same type as `a`.
`sum_of_weights` is of the same type as `average`.
Raises
------
ZeroDivisionError
When all weights along axis are zero. See `numpy.ma.average` for a
version robust to this type of error.
TypeError
When the length of 1D `weights` is not the same as the shape of `a`
along axis.
See Also
--------
mean
ma.average : average for masked arrays -- useful if your data contains
"missing" values
Examples
--------
>>> data = range(1,5)
>>> data
[1, 2, 3, 4]
>>> np.average(data)
2.5
>>> np.average(range(1,11), weights=range(10,0,-1))
4.0
>>> data = np.arange(6).reshape((3,2))
>>> data
array([[0, 1],
[2, 3],
[4, 5]])
>>> np.average(data, axis=1, weights=[1./4, 3./4])
array([ 0.75, 2.75, 4.75])
>>> np.average(data, weights=[1./4, 3./4])
Traceback (most recent call last):
...
TypeError: Axis must be specified when shapes of a and weights differ.
"""
if not isinstance(a, np.matrix):
a = np.asarray(a)
if weights is None:
avg = a.mean(axis)
scl = avg.dtype.type(a.size/avg.size)
else:
a = a + 0.0
wgt = np.array(weights, dtype=a.dtype, copy=0)
# Sanity checks
if a.shape != wgt.shape:
if axis is None:
raise TypeError(
"Axis must be specified when shapes of a and weights "
"differ.")
if wgt.ndim != 1:
raise TypeError(
"1D weights expected when shapes of a and weights differ.")
if wgt.shape[0] != a.shape[axis]:
raise ValueError(
"Length of weights not compatible with specified axis.")
# setup wgt to broadcast along axis
wgt = np.array(wgt, copy=0, ndmin=a.ndim).swapaxes(-1, axis)
scl = wgt.sum(axis=axis)
if (scl == 0.0).any():
raise ZeroDivisionError(
"Weights sum to zero, can't be normalized")
avg = np.multiply(a, wgt).sum(axis)/scl
if returned:
scl = np.multiply(avg, 0) + scl
return avg, scl
else:
return avg
def asarray_chkfinite(a, dtype=None, order=None):
"""
Convert the input to an array, checking for NaNs or Infs.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists and ndarrays. Success requires no NaNs or Infs.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('FORTRAN') memory
representation. Defaults to 'C'.
Returns
-------
out : ndarray
Array interpretation of `a`. No copy is performed if the input
is already an ndarray. If `a` is a subclass of ndarray, a base
class ndarray is returned.
Raises
------
ValueError
Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity).
See Also
--------
asarray : Create and array.
asanyarray : Similar function which passes through subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array. If all elements are finite
``asarray_chkfinite`` is identical to ``asarray``.
>>> a = [1, 2]
>>> np.asarray_chkfinite(a, dtype=float)
array([1., 2.])
Raises ValueError if array_like contains Nans or Infs.
>>> a = [1, 2, np.inf]
>>> try:
... np.asarray_chkfinite(a)
... except ValueError:
... print 'ValueError'
...
ValueError
"""
a = asarray(a, dtype=dtype, order=order)
if a.dtype.char in typecodes['AllFloat'] and not np.isfinite(a).all():
raise ValueError(
"array must not contain infs or NaNs")
return a
def piecewise(x, condlist, funclist, *args, **kw):
"""
Evaluate a piecewise-defined function.
Given a set of conditions and corresponding functions, evaluate each
function on the input data wherever its condition is true.
Parameters
----------
x : ndarray
The input domain.
condlist : list of bool arrays
Each boolean array corresponds to a function in `funclist`. Wherever
`condlist[i]` is True, `funclist[i](x)` is used as the output value.
Each boolean array in `condlist` selects a piece of `x`,
and should therefore be of the same shape as `x`.
The length of `condlist` must correspond to that of `funclist`.
If one extra function is given, i.e. if
``len(funclist) - len(condlist) == 1``, then that extra function
is the default value, used wherever all conditions are false.
funclist : list of callables, f(x,*args,**kw), or scalars
Each function is evaluated over `x` wherever its corresponding
condition is True. It should take an array as input and give an array
or a scalar value as output. If, instead of a callable,
a scalar is provided then a constant function (``lambda x: scalar``) is
assumed.
args : tuple, optional
Any further arguments given to `piecewise` are passed to the functions
upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then
each function is called as ``f(x, 1, 'a')``.
kw : dict, optional
Keyword arguments used in calling `piecewise` are passed to the
functions upon execution, i.e., if called
``piecewise(..., ..., lambda=1)``, then each function is called as
``f(x, lambda=1)``.
Returns
-------
out : ndarray
The output is the same shape and type as x and is found by
calling the functions in `funclist` on the appropriate portions of `x`,
as defined by the boolean arrays in `condlist`. Portions not covered
by any condition have a default value of 0.
See Also
--------
choose, select, where
Notes
-----
This is similar to choose or select, except that functions are
evaluated on elements of `x` that satisfy the corresponding condition from
`condlist`.
The result is::
|--
|funclist[0](x[condlist[0]])
out = |funclist[1](x[condlist[1]])
|...
|funclist[n2](x[condlist[n2]])
|--
Examples
--------
Define the sigma function, which is -1 for ``x < 0`` and +1 for ``x >= 0``.
>>> x = np.linspace(-2.5, 2.5, 6)
>>> np.piecewise(x, [x < 0, x >= 0], [-1, 1])
array([-1., -1., -1., 1., 1., 1.])
Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for
``x >= 0``.
>>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x])
array([ 2.5, 1.5, 0.5, 0.5, 1.5, 2.5])
"""
x = asanyarray(x)
n2 = len(funclist)
if (isscalar(condlist) or not (isinstance(condlist[0], list) or
isinstance(condlist[0], ndarray))):
condlist = [condlist]
condlist = array(condlist, dtype=bool)
n = len(condlist)
# This is a hack to work around problems with NumPy's
# handling of 0-d arrays and boolean indexing with
# numpy.bool_ scalars
zerod = False
if x.ndim == 0:
x = x[None]
zerod = True
if condlist.shape[-1] != 1:
condlist = condlist.T
if n == n2 - 1: # compute the "otherwise" condition.
totlist = np.logical_or.reduce(condlist, axis=0)
condlist = np.vstack([condlist, ~totlist])
n += 1
if (n != n2):
raise ValueError(
"function list and condition list must be the same")
y = zeros(x.shape, x.dtype)
for k in range(n):
item = funclist[k]
if not isinstance(item, collections.Callable):
y[condlist[k]] = item
else:
vals = x[condlist[k]]
if vals.size > 0:
y[condlist[k]] = item(vals, *args, **kw)
if zerod:
y = y.squeeze()
return y
def select(condlist, choicelist, default=0):
"""
Return an array drawn from elements in choicelist, depending on conditions.
Parameters
----------
condlist : list of bool ndarrays
The list of conditions which determine from which array in `choicelist`
the output elements are taken. When multiple conditions are satisfied,
the first one encountered in `condlist` is used.
choicelist : list of ndarrays
The list of arrays from which the output elements are taken. It has
to be of the same length as `condlist`.
default : scalar, optional
The element inserted in `output` when all conditions evaluate to False.
Returns
-------
output : ndarray
The output at position m is the m-th element of the array in
`choicelist` where the m-th element of the corresponding array in
`condlist` is True.
See Also
--------
where : Return elements from one of two arrays depending on condition.
take, choose, compress, diag, diagonal
Examples
--------
>>> x = np.arange(10)
>>> condlist = [x<3, x>5]
>>> choicelist = [x, x**2]
>>> np.select(condlist, choicelist)
array([ 0, 1, 2, 0, 0, 0, 36, 49, 64, 81])
"""
# Check the size of condlist and choicelist are the same, or abort.
if len(condlist) != len(choicelist):
raise ValueError(
'list of cases must be same length as list of conditions')
# Now that the dtype is known, handle the deprecated select([], []) case
if len(condlist) == 0:
warnings.warn("select with an empty condition list is not possible"
"and will be deprecated",
DeprecationWarning)
return np.asarray(default)[()]
choicelist = [np.asarray(choice) for choice in choicelist]
choicelist.append(np.asarray(default))
# need to get the result type before broadcasting for correct scalar
# behaviour
dtype = np.result_type(*choicelist)
# Convert conditions to arrays and broadcast conditions and choices
# as the shape is needed for the result. Doing it seperatly optimizes
# for example when all choices are scalars.
condlist = np.broadcast_arrays(*condlist)
choicelist = np.broadcast_arrays(*choicelist)
# If cond array is not an ndarray in boolean format or scalar bool, abort.
deprecated_ints = False
for i in range(len(condlist)):
cond = condlist[i]
if cond.dtype.type is not np.bool_:
if np.issubdtype(cond.dtype, np.integer):
# A previous implementation accepted int ndarrays accidentally.
# Supported here deliberately, but deprecated.
condlist[i] = condlist[i].astype(bool)
deprecated_ints = True
else:
raise ValueError(
'invalid entry in choicelist: should be boolean ndarray')
if deprecated_ints:
msg = "select condlists containing integer ndarrays is deprecated " \
"and will be removed in the future. Use `.astype(bool)` to " \
"convert to bools."
warnings.warn(msg, DeprecationWarning)
if choicelist[0].ndim == 0:
# This may be common, so avoid the call.
result_shape = condlist[0].shape
else:
result_shape = np.broadcast_arrays(condlist[0], choicelist[0])[0].shape
result = np.full(result_shape, choicelist[-1], dtype)
# Use np.copyto to burn each choicelist array onto result, using the
# corresponding condlist as a boolean mask. This is done in reverse
# order since the first choice should take precedence.
choicelist = choicelist[-2::-1]
condlist = condlist[::-1]
for choice, cond in zip(choicelist, condlist):
np.copyto(result, choice, where=cond)
return result
def copy(a, order='K'):
"""
Return an array copy of the given object.
Parameters
----------
a : array_like
Input data.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible. (Note that this function and :meth:ndarray.copy are very
similar, but have different default values for their order=
arguments.)
Returns
-------
arr : ndarray
Array interpretation of `a`.
Notes
-----
This is equivalent to
>>> np.array(a, copy=True) #doctest: +SKIP
Examples
--------
Create an array x, with a reference y and a copy z:
>>> x = np.array([1, 2, 3])
>>> y = x
>>> z = np.copy(x)
Note that, when we modify x, y changes, but not z:
>>> x[0] = 10
>>> x[0] == y[0]
True
>>> x[0] == z[0]
False
"""
return array(a, order=order, copy=True)
# Basic operations
def gradient(f, *varargs):
"""
Return the gradient of an N-dimensional array.
The gradient is computed using second order accurate central differences
in the interior and second order accurate one-sides (forward or backwards)
differences at the boundaries. The returned gradient hence has the same
shape as the input array.
Parameters
----------
f : array_like
An N-dimensional array containing samples of a scalar function.
`*varargs` : scalars
0, 1, or N scalars specifying the sample distances in each direction,
that is: `dx`, `dy`, `dz`, ... The default distance is 1.
Returns
-------
gradient : ndarray
N arrays of the same shape as `f` giving the derivative of `f` with
respect to each dimension.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 11, 16], dtype=np.float)
>>> np.gradient(x)
array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ])
>>> np.gradient(x, 2)
array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ])
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float))
[array([[ 2., 2., -1.],
[ 2., 2., -1.]]),
array([[ 1. , 2.5, 4. ],
[ 1. , 1. , 1. ]])]
>>> x = np.array([0,1,2,3,4])
>>> dx = gradient(x)
>>> y = x**2
>>> gradient(y,dx)
array([0., 2., 4., 6., 8.])
"""
f = np.asanyarray(f)
N = len(f.shape) # number of dimensions
n = len(varargs)
if n == 0:
dx = [1.0]*N
elif n == 1:
dx = [varargs[0]]*N
elif n == N:
dx = list(varargs)
else:
raise SyntaxError(
"invalid number of arguments")
# use central differences on interior and one-sided differences on the
# endpoints. This preserves second order-accuracy over the full domain.
outvals = []
# create slice objects --- initially all are [:, :, ..., :]
slice1 = [slice(None)]*N
slice2 = [slice(None)]*N
slice3 = [slice(None)]*N
slice4 = [slice(None)]*N
otype = f.dtype.char
if otype not in ['f', 'd', 'F', 'D', 'm', 'M']:
otype = 'd'
# Difference of datetime64 elements results in timedelta64
if otype == 'M':
# Need to use the full dtype name because it contains unit information
otype = f.dtype.name.replace('datetime', 'timedelta')
elif otype == 'm':
# Needs to keep the specific units, can't be a general unit
otype = f.dtype
# Convert datetime64 data into ints. Make dummy variable `y`
# that is a view of ints if the data is datetime64, otherwise
# just set y equal to the the array `f`.
if f.dtype.char in ["M", "m"]:
y = f.view('int64')
else:
y = f
for axis in range(N):
if y.shape[axis] < 2:
raise ValueError(
"Shape of array too small to calculate a numerical gradient, "
"at least two elements are required.")
# Numerical differentiation: 1st order edges, 2nd order interior
if y.shape[axis] == 2:
# Use first order differences for time data
out = np.empty_like(y, dtype=otype)
slice1[axis] = slice(1, -1)
slice2[axis] = slice(2, None)
slice3[axis] = slice(None, -2)
# 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0
out[slice1] = (y[slice2] - y[slice3])/2.0
slice1[axis] = 0
slice2[axis] = 1
slice3[axis] = 0
# 1D equivalent -- out[0] = (y[1] - y[0])
out[slice1] = (y[slice2] - y[slice3])
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
# 1D equivalent -- out[-1] = (y[-1] - y[-2])
out[slice1] = (y[slice2] - y[slice3])
# Numerical differentiation: 2st order edges, 2nd order interior
else:
# Use second order differences where possible
out = np.empty_like(y, dtype=otype)
slice1[axis] = slice(1, -1)
slice2[axis] = slice(2, None)
slice3[axis] = slice(None, -2)
# 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0
out[slice1] = (y[slice2] - y[slice3])/2.0
slice1[axis] = 0
slice2[axis] = 0
slice3[axis] = 1
slice4[axis] = 2
# 1D equivalent -- out[0] = -(3*y[0] - 4*y[1] + y[2]) / 2.0
out[slice1] = -(3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
slice4[axis] = -3
# 1D equivalent -- out[-1] = (3*y[-1] - 4*y[-2] + y[-3])
out[slice1] = (3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0
# divide by step size
outvals.append(out / dx[axis])
# reset the slice object in this dimension to ":"
slice1[axis] = slice(None)
slice2[axis] = slice(None)
slice3[axis] = slice(None)
slice4[axis] = slice(None)
if N == 1:
return outvals[0]
else:
return outvals
def diff(a, n=1, axis=-1):
"""
Calculate the n-th order discrete difference along given axis.
The first order difference is given by ``out[n] = a[n+1] - a[n]`` along
the given axis, higher order differences are calculated by using `diff`
recursively.
Parameters
----------
a : array_like
Input array
n : int, optional
The number of times values are differenced.
axis : int, optional
The axis along which the difference is taken, default is the last axis.
Returns
-------
diff : ndarray
The `n` order differences. The shape of the output is the same as `a`
except along `axis` where the dimension is smaller by `n`.
See Also
--------
gradient, ediff1d, cumsum
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.diff(x)
array([ 1, 2, 3, -7])
>>> np.diff(x, n=2)
array([ 1, 1, -10])
>>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])
>>> np.diff(x)
array([[2, 3, 4],
[5, 1, 2]])
>>> np.diff(x, axis=0)
array([[-1, 2, 0, -2]])
"""
if n == 0:
return a
if n < 0:
raise ValueError(
"order must be non-negative but got " + repr(n))
a = asanyarray(a)
nd = len(a.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
if n > 1:
return diff(a[slice1]-a[slice2], n-1, axis=axis)
else:
return a[slice1]-a[slice2]
def interp(x, xp, fp, left=None, right=None):
"""
One-dimensional linear interpolation.
Returns the one-dimensional piecewise linear interpolant to a function
with given values at discrete data-points.
Parameters
----------
x : array_like
The x-coordinates of the interpolated values.
xp : 1-D sequence of floats
The x-coordinates of the data points, must be increasing.
fp : 1-D sequence of floats
The y-coordinates of the data points, same length as `xp`.
left : float, optional
Value to return for `x < xp[0]`, default is `fp[0]`.
right : float, optional
Value to return for `x > xp[-1]`, default is `fp[-1]`.
Returns
-------
y : {float, ndarray}
The interpolated values, same shape as `x`.
Raises
------
ValueError
If `xp` and `fp` have different length
Notes
-----
Does not check that the x-coordinate sequence `xp` is increasing.
If `xp` is not increasing, the results are nonsense.
A simple check for increasing is::
np.all(np.diff(xp) > 0)
Examples
--------
>>> xp = [1, 2, 3]
>>> fp = [3, 2, 0]
>>> np.interp(2.5, xp, fp)
1.0
>>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp)
array([ 3. , 3. , 2.5 , 0.56, 0. ])
>>> UNDEF = -99.0
>>> np.interp(3.14, xp, fp, right=UNDEF)
-99.0
Plot an interpolant to the sine function:
>>> x = np.linspace(0, 2*np.pi, 10)
>>> y = np.sin(x)
>>> xvals = np.linspace(0, 2*np.pi, 50)
>>> yinterp = np.interp(xvals, x, y)
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(xvals, yinterp, '-x')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
"""
if isinstance(x, (float, int, number)):
return compiled_interp([x], xp, fp, left, right).item()
elif isinstance(x, np.ndarray) and x.ndim == 0:
return compiled_interp([x], xp, fp, left, right).item()
else:
return compiled_interp(x, xp, fp, left, right)
def angle(z, deg=0):
"""
Return the angle of the complex argument.
Parameters
----------
z : array_like
A complex number or sequence of complex numbers.
deg : bool, optional
Return angle in degrees if True, radians if False (default).
Returns
-------
angle : {ndarray, scalar}
The counterclockwise angle from the positive real axis on
the complex plane, with dtype as numpy.float64.
See Also
--------
arctan2
absolute
Examples
--------
>>> np.angle([1.0, 1.0j, 1+1j]) # in radians
array([ 0. , 1.57079633, 0.78539816])
>>> np.angle(1+1j, deg=True) # in degrees
45.0
"""
if deg:
fact = 180/pi
else:
fact = 1.0
z = asarray(z)
if (issubclass(z.dtype.type, _nx.complexfloating)):
zimag = z.imag
zreal = z.real
else:
zimag = 0
zreal = z
return arctan2(zimag, zreal) * fact
def unwrap(p, discont=pi, axis=-1):
"""
Unwrap by changing deltas between values to 2*pi complement.
Unwrap radian phase `p` by changing absolute jumps greater than
`discont` to their 2*pi complement along the given axis.
Parameters
----------
p : array_like
Input array.
discont : float, optional
Maximum discontinuity between values, default is ``pi``.
axis : int, optional
Axis along which unwrap will operate, default is the last axis.
Returns
-------
out : ndarray
Output array.
See Also
--------
rad2deg, deg2rad
Notes
-----
If the discontinuity in `p` is smaller than ``pi``, but larger than
`discont`, no unwrapping is done because taking the 2*pi complement
would only make the discontinuity larger.
Examples
--------
>>> phase = np.linspace(0, np.pi, num=5)
>>> phase[3:] += np.pi
>>> phase
array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531])
>>> np.unwrap(phase)
array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ])
"""
p = asarray(p)
nd = len(p.shape)
dd = diff(p, axis=axis)
slice1 = [slice(None, None)]*nd # full slices
slice1[axis] = slice(1, None)
ddmod = mod(dd + pi, 2*pi) - pi
_nx.copyto(ddmod, pi, where=(ddmod == -pi) & (dd > 0))
ph_correct = ddmod - dd
_nx.copyto(ph_correct, 0, where=abs(dd) < discont)
up = array(p, copy=True, dtype='d')
up[slice1] = p[slice1] + ph_correct.cumsum(axis)
return up
def sort_complex(a):
"""
Sort a complex array using the real part first, then the imaginary part.
Parameters
----------
a : array_like
Input array
Returns
-------
out : complex ndarray
Always returns a sorted complex array.
Examples
--------
>>> np.sort_complex([5, 3, 6, 2, 1])
array([ 1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j])
>>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j])
array([ 1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j])
"""
b = array(a, copy=True)
b.sort()
if not issubclass(b.dtype.type, _nx.complexfloating):
if b.dtype.char in 'bhBH':
return b.astype('F')
elif b.dtype.char == 'g':
return b.astype('G')
else:
return b.astype('D')
else:
return b
def trim_zeros(filt, trim='fb'):
"""
Trim the leading and/or trailing zeros from a 1-D array or sequence.
Parameters
----------
filt : 1-D array or sequence
Input array.
trim : str, optional
A string with 'f' representing trim from front and 'b' to trim from
back. Default is 'fb', trim zeros from both front and back of the
array.
Returns
-------
trimmed : 1-D array or sequence
The result of trimming the input. The input data type is preserved.
Examples
--------
>>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0))
>>> np.trim_zeros(a)
array([1, 2, 3, 0, 2, 1])
>>> np.trim_zeros(a, 'b')
array([0, 0, 0, 1, 2, 3, 0, 2, 1])
The input data type is preserved, list/tuple in means list/tuple out.
>>> np.trim_zeros([0, 1, 2, 0])
[1, 2]
"""
first = 0
trim = trim.upper()
if 'F' in trim:
for i in filt:
if i != 0.:
break
else:
first = first + 1
last = len(filt)
if 'B' in trim:
for i in filt[::-1]:
if i != 0.:
break
else:
last = last - 1
return filt[first:last]
@deprecate
def unique(x):
"""
This function is deprecated. Use numpy.lib.arraysetops.unique()
instead.
"""
try:
tmp = x.flatten()
if tmp.size == 0:
return tmp
tmp.sort()
idx = concatenate(([True], tmp[1:] != tmp[:-1]))
return tmp[idx]
except AttributeError:
items = sorted(set(x))
return asarray(items)
def extract(condition, arr):
"""
Return the elements of an array that satisfy some condition.
This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If
`condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``.
Parameters
----------
condition : array_like
An array whose nonzero or True entries indicate the elements of `arr`
to extract.
arr : array_like
Input array of the same size as `condition`.
Returns
-------
extract : ndarray
Rank 1 array of values from `arr` where `condition` is True.
See Also
--------
take, put, copyto, compress
Examples
--------
>>> arr = np.arange(12).reshape((3, 4))
>>> arr
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> condition = np.mod(arr, 3)==0
>>> condition
array([[ True, False, False, True],
[False, False, True, False],
[False, True, False, False]], dtype=bool)
>>> np.extract(condition, arr)
array([0, 3, 6, 9])
If `condition` is boolean:
>>> arr[condition]
array([0, 3, 6, 9])
"""
return _nx.take(ravel(arr), nonzero(ravel(condition))[0])
def place(arr, mask, vals):
"""
Change elements of an array based on conditional and input values.
Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that
`place` uses the first N elements of `vals`, where N is the number of
True values in `mask`, while `copyto` uses the elements where `mask`
is True.
Note that `extract` does the exact opposite of `place`.
Parameters
----------
arr : array_like
Array to put data into.
mask : array_like
Boolean mask array. Must have the same size as `a`.
vals : 1-D sequence
Values to put into `a`. Only the first N elements are used, where
N is the number of True values in `mask`. If `vals` is smaller
than N it will be repeated.
See Also
--------
copyto, put, take, extract
Examples
--------
>>> arr = np.arange(6).reshape(2, 3)
>>> np.place(arr, arr>2, [44, 55])
>>> arr
array([[ 0, 1, 2],
[44, 55, 44]])
"""
return _insert(arr, mask, vals)
def disp(mesg, device=None, linefeed=True):
"""
Display a message on a device.
Parameters
----------
mesg : str
Message to display.
device : object
Device to write message. If None, defaults to ``sys.stdout`` which is
very similar to ``print``. `device` needs to have ``write()`` and
``flush()`` methods.
linefeed : bool, optional
Option whether to print a line feed or not. Defaults to True.
Raises
------
AttributeError
If `device` does not have a ``write()`` or ``flush()`` method.
Examples
--------
Besides ``sys.stdout``, a file-like object can also be used as it has
both required methods:
>>> from StringIO import StringIO
>>> buf = StringIO()
>>> np.disp('"Display" in a file', device=buf)
>>> buf.getvalue()
'"Display" in a file\\n'
"""
if device is None:
device = sys.stdout
if linefeed:
device.write('%s\n' % mesg)
else:
device.write('%s' % mesg)
device.flush()
return
class vectorize(object):
"""
vectorize(pyfunc, otypes='', doc=None, excluded=None, cache=False)
Generalized function class.
Define a vectorized function which takes a nested sequence
of objects or numpy arrays as inputs and returns a
numpy array as output. The vectorized function evaluates `pyfunc` over
successive tuples of the input arrays like the python map function,
except it uses the broadcasting rules of numpy.
The data type of the output of `vectorized` is determined by calling
the function with the first element of the input. This can be avoided
by specifying the `otypes` argument.
Parameters
----------
pyfunc : callable
A python function or method.
otypes : str or list of dtypes, optional
The output data type. It must be specified as either a string of
typecode characters or a list of data type specifiers. There should
be one data type specifier for each output.
doc : str, optional
The docstring for the function. If `None`, the docstring will be the
``pyfunc.__doc__``.
excluded : set, optional
Set of strings or integers representing the positional or keyword
arguments for which the function will not be vectorized. These will be
passed directly to `pyfunc` unmodified.
.. versionadded:: 1.7.0
cache : bool, optional
If `True`, then cache the first function call that determines the number
of outputs if `otypes` is not provided.
.. versionadded:: 1.7.0
Returns
-------
vectorized : callable
Vectorized function.
Examples
--------
>>> def myfunc(a, b):
... "Return a-b if a>b, otherwise return a+b"
... if a > b:
... return a - b
... else:
... return a + b
>>> vfunc = np.vectorize(myfunc)
>>> vfunc([1, 2, 3, 4], 2)
array([3, 4, 1, 2])
The docstring is taken from the input function to `vectorize` unless it
is specified
>>> vfunc.__doc__
'Return a-b if a>b, otherwise return a+b'
>>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`')
>>> vfunc.__doc__
'Vectorized `myfunc`'
The output type is determined by evaluating the first element of the input,
unless it is specified
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.int32'>
>>> vfunc = np.vectorize(myfunc, otypes=[np.float])
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.float64'>
The `excluded` argument can be used to prevent vectorizing over certain
arguments. This can be useful for array-like arguments of a fixed length
such as the coefficients for a polynomial as in `polyval`:
>>> def mypolyval(p, x):
... _p = list(p)
... res = _p.pop(0)
... while _p:
... res = res*x + _p.pop(0)
... return res
>>> vpolyval = np.vectorize(mypolyval, excluded=['p'])
>>> vpolyval(p=[1, 2, 3], x=[0, 1])
array([3, 6])
Positional arguments may also be excluded by specifying their position:
>>> vpolyval.excluded.add(0)
>>> vpolyval([1, 2, 3], x=[0, 1])
array([3, 6])
Notes
-----
The `vectorize` function is provided primarily for convenience, not for
performance. The implementation is essentially a for loop.
If `otypes` is not specified, then a call to the function with the
first argument will be used to determine the number of outputs. The
results of this call will be cached if `cache` is `True` to prevent
calling the function twice. However, to implement the cache, the
original function must be wrapped which will slow down subsequent
calls, so only do this if your function is expensive.
The new keyword argument interface and `excluded` argument support
further degrades performance.
"""
def __init__(self, pyfunc, otypes='', doc=None, excluded=None,
cache=False):
self.pyfunc = pyfunc
self.cache = cache
self._ufunc = None # Caching to improve default performance
if doc is None:
self.__doc__ = pyfunc.__doc__
else:
self.__doc__ = doc
if isinstance(otypes, str):
self.otypes = otypes
for char in self.otypes:
if char not in typecodes['All']:
raise ValueError(
"Invalid otype specified: %s" % (char,))
elif iterable(otypes):
self.otypes = ''.join([_nx.dtype(x).char for x in otypes])
else:
raise ValueError(
"Invalid otype specification")
# Excluded variable support
if excluded is None:
excluded = set()
self.excluded = set(excluded)
def __call__(self, *args, **kwargs):
"""
Return arrays with the results of `pyfunc` broadcast (vectorized) over
`args` and `kwargs` not in `excluded`.
"""
excluded = self.excluded
if not kwargs and not excluded:
func = self.pyfunc
vargs = args
else:
# The wrapper accepts only positional arguments: we use `names` and
# `inds` to mutate `the_args` and `kwargs` to pass to the original
# function.
nargs = len(args)
names = [_n for _n in kwargs if _n not in excluded]
inds = [_i for _i in range(nargs) if _i not in excluded]
the_args = list(args)
def func(*vargs):
for _n, _i in enumerate(inds):
the_args[_i] = vargs[_n]
kwargs.update(zip(names, vargs[len(inds):]))
return self.pyfunc(*the_args, **kwargs)
vargs = [args[_i] for _i in inds]
vargs.extend([kwargs[_n] for _n in names])
return self._vectorize_call(func=func, args=vargs)
def _get_ufunc_and_otypes(self, func, args):
"""Return (ufunc, otypes)."""
# frompyfunc will fail if args is empty
if not args:
raise ValueError('args can not be empty')
if self.otypes:
otypes = self.otypes
nout = len(otypes)
# Note logic here: We only *use* self._ufunc if func is self.pyfunc
# even though we set self._ufunc regardless.
if func is self.pyfunc and self._ufunc is not None:
ufunc = self._ufunc
else:
ufunc = self._ufunc = frompyfunc(func, len(args), nout)
else:
# Get number of outputs and output types by calling the function on
# the first entries of args. We also cache the result to prevent
# the subsequent call when the ufunc is evaluated.
# Assumes that ufunc first evaluates the 0th elements in the input
# arrays (the input values are not checked to ensure this)
inputs = [asarray(_a).flat[0] for _a in args]
outputs = func(*inputs)
# Performance note: profiling indicates that -- for simple
# functions at least -- this wrapping can almost double the
# execution time.
# Hence we make it optional.
if self.cache:
_cache = [outputs]
def _func(*vargs):
if _cache:
return _cache.pop()
else:
return func(*vargs)
else:
_func = func
if isinstance(outputs, tuple):
nout = len(outputs)
else:
nout = 1
outputs = (outputs,)
otypes = ''.join([asarray(outputs[_k]).dtype.char
for _k in range(nout)])
# Performance note: profiling indicates that creating the ufunc is
# not a significant cost compared with wrapping so it seems not
# worth trying to cache this.
ufunc = frompyfunc(_func, len(args), nout)
return ufunc, otypes
def _vectorize_call(self, func, args):
"""Vectorized call to `func` over positional `args`."""
if not args:
_res = func()
else:
ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args)
# Convert args to object arrays first
inputs = [array(_a, copy=False, subok=True, dtype=object)
for _a in args]
outputs = ufunc(*inputs)
if ufunc.nout == 1:
_res = array(outputs,
copy=False, subok=True, dtype=otypes[0])
else:
_res = tuple([array(_x, copy=False, subok=True, dtype=_t)
for _x, _t in zip(outputs, otypes)])
return _res
def cov(m, y=None, rowvar=1, bias=0, ddof=None):
"""
Estimate a covariance matrix, given data.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element :math:`C_{ij}` is the covariance of
:math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance
of :math:`x_i`.
Parameters
----------
m : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
form as that of `m`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : int, optional
Default normalization is by ``(N - 1)``, where ``N`` is the number of
observations given (unbiased estimate). If `bias` is 1, then
normalization is by ``N``. These values can be overridden by using
the keyword ``ddof`` in numpy versions >= 1.5.
ddof : int, optional
.. versionadded:: 1.5
If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
the number of observations; this overrides the value implied by
``bias``. The default value is ``None``.
Returns
-------
out : ndarray
The covariance matrix of the variables.
See Also
--------
corrcoef : Normalized covariance matrix
Examples
--------
Consider two variables, :math:`x_0` and :math:`x_1`, which
correlate perfectly, but in opposite directions:
>>> x = np.array([[0, 2], [1, 1], [2, 0]]).T
>>> x
array([[0, 1, 2],
[2, 1, 0]])
Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance
matrix shows this clearly:
>>> np.cov(x)
array([[ 1., -1.],
[-1., 1.]])
Note that element :math:`C_{0,1}`, which shows the correlation between
:math:`x_0` and :math:`x_1`, is negative.
Further, note how `x` and `y` are combined:
>>> x = [-2.1, -1, 4.3]
>>> y = [3, 1.1, 0.12]
>>> X = np.vstack((x,y))
>>> print np.cov(X)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x, y)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x)
11.71
"""
# Check inputs
if ddof is not None and ddof != int(ddof):
raise ValueError(
"ddof must be integer")
# Handles complex arrays too
m = np.asarray(m)
if y is None:
dtype = np.result_type(m, np.float64)
else:
y = np.asarray(y)
dtype = np.result_type(m, y, np.float64)
X = array(m, ndmin=2, dtype=dtype)
if X.shape[0] == 1:
rowvar = 1
if rowvar:
N = X.shape[1]
axis = 0
else:
N = X.shape[0]
axis = 1
# check ddof
if ddof is None:
if bias == 0:
ddof = 1
else:
ddof = 0
fact = float(N - ddof)
if fact <= 0:
warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning)
fact = 0.0
if y is not None:
y = array(y, copy=False, ndmin=2, dtype=dtype)
X = concatenate((X, y), axis)
X -= X.mean(axis=1-axis, keepdims=True)
if not rowvar:
return (dot(X.T, X.conj()) / fact).squeeze()
else:
return (dot(X, X.T.conj()) / fact).squeeze()
def corrcoef(x, y=None, rowvar=1, bias=0, ddof=None):
"""
Return correlation coefficients.
Please refer to the documentation for `cov` for more detail. The
relationship between the correlation coefficient matrix, `P`, and the
covariance matrix, `C`, is
.. math:: P_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} * C_{jj} } }
The values of `P` are between -1 and 1, inclusive.
Parameters
----------
x : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
shape as `m`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : int, optional
Default normalization is by ``(N - 1)``, where ``N`` is the number of
observations (unbiased estimate). If `bias` is 1, then
normalization is by ``N``. These values can be overridden by using
the keyword ``ddof`` in numpy versions >= 1.5.
ddof : {None, int}, optional
.. versionadded:: 1.5
If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
the number of observations; this overrides the value implied by
``bias``. The default value is ``None``.
Returns
-------
out : ndarray
The correlation coefficient matrix of the variables.
See Also
--------
cov : Covariance matrix
"""
c = cov(x, y, rowvar, bias, ddof)
try:
d = diag(c)
except ValueError: # scalar covariance
# nan if incorrect value (nan, inf, 0), 1 otherwise
return c / c
return c / sqrt(multiply.outer(d, d))
def blackman(M):
"""
Return the Blackman window.
The Blackman window is a taper formed by using the first three
terms of a summation of cosines. It was designed to have close to the
minimal leakage possible. It is close to optimal, only slightly worse
than a Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value one
appears only if the number of samples is odd).
See Also
--------
bartlett, hamming, hanning, kaiser
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M)
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the kaiser window.
References
----------
Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra,
Dover Publications, New York.
Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
>>> np.blackman(12)
array([ -1.38777878e-17, 3.26064346e-02, 1.59903635e-01,
4.14397981e-01, 7.36045180e-01, 9.67046769e-01,
9.67046769e-01, 7.36045180e-01, 4.14397981e-01,
1.59903635e-01, 3.26064346e-02, -1.38777878e-17])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.blackman(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.42 - 0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1))
def bartlett(M):
"""
Return the Bartlett window.
The Bartlett window is very similar to a triangular window, except
that the end points are at zero. It is often used in signal
processing for tapering a signal, without generating too much
ripple in the frequency domain.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : array
The triangular window, with the maximum value normalized to one
(the value one appears only if the number of samples is odd), with
the first and last samples equal to zero.
See Also
--------
blackman, hamming, hanning, kaiser
Notes
-----
The Bartlett window is defined as
.. math:: w(n) = \\frac{2}{M-1} \\left(
\\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right|
\\right)
Most references to the Bartlett window come from the signal
processing literature, where it is used as one of many windowing
functions for smoothing values. Note that convolution with this
window produces linear interpolation. It is also known as an
apodization (which means"removing the foot", i.e. smoothing
discontinuities at the beginning and end of the sampled signal) or
tapering function. The fourier transform of the Bartlett is the product
of two sinc functions.
Note the excellent discussion in Kanasewich.
References
----------
.. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika 37, 1-16, 1950.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 109-110.
.. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal
Processing", Prentice-Hall, 1999, pp. 468-471.
.. [4] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 429.
Examples
--------
>>> np.bartlett(12)
array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273,
0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636,
0.18181818, 0. ])
Plot the window and its frequency response (requires SciPy and matplotlib):
>>> from numpy.fft import fft, fftshift
>>> window = np.bartlett(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return where(less_equal(n, (M-1)/2.0), 2.0*n/(M-1), 2.0 - 2.0*n/(M-1))
def hanning(M):
"""
Return the Hanning window.
The Hanning window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray, shape(M,)
The window, with the maximum value normalized to one (the value
one appears only if `M` is odd).
See Also
--------
bartlett, blackman, hamming, kaiser
Notes
-----
The Hanning window is defined as
.. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hanning was named for Julius van Hann, an Austrian meteorologist.
It is also known as the Cosine Bell. Some authors prefer that it be
called a Hann window, to help avoid confusion with the very similar
Hamming window.
Most references to the Hanning window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hanning(12)
array([ 0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037,
0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249,
0.07937323, 0. ])
Plot the window and its frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.hanning(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of the Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.5 - 0.5*cos(2.0*pi*n/(M-1))
def hamming(M):
"""
Return the Hamming window.
The Hamming window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hanning, kaiser
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 - 0.46cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey
and is described in Blackman and Tukey. It was recommended for
smoothing the truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hamming(12)
array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594,
0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909,
0.15302337, 0.08 ])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.hamming(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.54 - 0.46*cos(2.0*pi*n/(M-1))
## Code from cephes for i0
_i0A = [
-4.41534164647933937950E-18,
3.33079451882223809783E-17,
-2.43127984654795469359E-16,
1.71539128555513303061E-15,
-1.16853328779934516808E-14,
7.67618549860493561688E-14,
-4.85644678311192946090E-13,
2.95505266312963983461E-12,
-1.72682629144155570723E-11,
9.67580903537323691224E-11,
-5.18979560163526290666E-10,
2.65982372468238665035E-9,
-1.30002500998624804212E-8,
6.04699502254191894932E-8,
-2.67079385394061173391E-7,
1.11738753912010371815E-6,
-4.41673835845875056359E-6,
1.64484480707288970893E-5,
-5.75419501008210370398E-5,
1.88502885095841655729E-4,
-5.76375574538582365885E-4,
1.63947561694133579842E-3,
-4.32430999505057594430E-3,
1.05464603945949983183E-2,
-2.37374148058994688156E-2,
4.93052842396707084878E-2,
-9.49010970480476444210E-2,
1.71620901522208775349E-1,
-3.04682672343198398683E-1,
6.76795274409476084995E-1
]
_i0B = [
-7.23318048787475395456E-18,
-4.83050448594418207126E-18,
4.46562142029675999901E-17,
3.46122286769746109310E-17,
-2.82762398051658348494E-16,
-3.42548561967721913462E-16,
1.77256013305652638360E-15,
3.81168066935262242075E-15,
-9.55484669882830764870E-15,
-4.15056934728722208663E-14,
1.54008621752140982691E-14,
3.85277838274214270114E-13,
7.18012445138366623367E-13,
-1.79417853150680611778E-12,
-1.32158118404477131188E-11,
-3.14991652796324136454E-11,
1.18891471078464383424E-11,
4.94060238822496958910E-10,
3.39623202570838634515E-9,
2.26666899049817806459E-8,
2.04891858946906374183E-7,
2.89137052083475648297E-6,
6.88975834691682398426E-5,
3.36911647825569408990E-3,
8.04490411014108831608E-1
]
def _chbevl(x, vals):
b0 = vals[0]
b1 = 0.0
for i in range(1, len(vals)):
b2 = b1
b1 = b0
b0 = x*b1 - b2 + vals[i]
return 0.5*(b0 - b2)
def _i0_1(x):
return exp(x) * _chbevl(x/2.0-2, _i0A)
def _i0_2(x):
return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x)
def i0(x):
"""
Modified Bessel function of the first kind, order 0.
Usually denoted :math:`I_0`. This function does broadcast, but will *not*
"up-cast" int dtype arguments unless accompanied by at least one float or
complex dtype argument (see Raises below).
Parameters
----------
x : array_like, dtype float or complex
Argument of the Bessel function.
Returns
-------
out : ndarray, shape = x.shape, dtype = x.dtype
The modified Bessel function evaluated at each of the elements of `x`.
Raises
------
TypeError: array cannot be safely cast to required type
If argument consists exclusively of int dtypes.
See Also
--------
scipy.special.iv, scipy.special.ive
Notes
-----
We use the algorithm published by Clenshaw [1]_ and referenced by
Abramowitz and Stegun [2]_, for which the function domain is
partitioned into the two intervals [0,8] and (8,inf), and Chebyshev
polynomial expansions are employed in each interval. Relative error on
the domain [0,30] using IEEE arithmetic is documented [3]_ as having a
peak of 5.8e-16 with an rms of 1.4e-16 (n = 30000).
References
----------
.. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions", in
*National Physical Laboratory Mathematical Tables*, vol. 5, London:
Her Majesty's Stationery Office, 1962.
.. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical
Functions*, 10th printing, New York: Dover, 1964, pp. 379.
http://www.math.sfu.ca/~cbm/aands/page_379.htm
.. [3] http://kobesearch.cpan.org/htdocs/Math-Cephes/Math/Cephes.html
Examples
--------
>>> np.i0([0.])
array(1.0)
>>> np.i0([0., 1. + 2j])
array([ 1.00000000+0.j , 0.18785373+0.64616944j])
"""
x = atleast_1d(x).copy()
y = empty_like(x)
ind = (x < 0)
x[ind] = -x[ind]
ind = (x <= 8.0)
y[ind] = _i0_1(x[ind])
ind2 = ~ind
y[ind2] = _i0_2(x[ind2])
return y.squeeze()
## End of cephes code for i0
def kaiser(M, beta):
"""
Return the Kaiser window.
The Kaiser window is a taper formed by using a Bessel function.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
beta : float
Shape parameter for window.
Returns
-------
out : array
The window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hamming, hanning
Notes
-----
The Kaiser window is defined as
.. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}}
\\right)/I_0(\\beta)
with
.. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2},
where :math:`I_0` is the modified zeroth-order Bessel function.
The Kaiser was named for Jim Kaiser, who discovered a simple
approximation to the DPSS window based on Bessel functions. The Kaiser
window is a very good approximation to the Digital Prolate Spheroidal
Sequence, or Slepian window, which is the transform which maximizes the
energy in the main lobe of the window relative to total energy.
The Kaiser can approximate many other windows by varying the beta
parameter.
==== =======================
beta Window shape
==== =======================
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hanning
8.6 Similar to a Blackman
==== =======================
A beta value of 14 is probably a good starting point. Note that as beta
gets large, the window narrows, and so the number of samples needs to be
large enough to sample the increasingly narrow spike, otherwise NaNs will
get returned.
Most references to the Kaiser window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by
digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.
John Wiley and Sons, New York, (1966).
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 177-178.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
Examples
--------
>>> np.kaiser(12, 14)
array([ 7.72686684e-06, 3.46009194e-03, 4.65200189e-02,
2.29737120e-01, 5.99885316e-01, 9.45674898e-01,
9.45674898e-01, 5.99885316e-01, 2.29737120e-01,
4.65200189e-02, 3.46009194e-03, 7.72686684e-06])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.kaiser(51, 14)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
from numpy.dual import i0
if M == 1:
return np.array([1.])
n = arange(0, M)
alpha = (M-1)/2.0
return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta))
def sinc(x):
"""
Return the sinc function.
The sinc function is :math:`\\sin(\\pi x)/(\\pi x)`.
Parameters
----------
x : ndarray
Array (possibly multi-dimensional) of values for which to to
calculate ``sinc(x)``.
Returns
-------
out : ndarray
``sinc(x)``, which has the same shape as the input.
Notes
-----
``sinc(0)`` is the limit value 1.
The name sinc is short for "sine cardinal" or "sinus cardinalis".
The sinc function is used in various signal processing applications,
including in anti-aliasing, in the construction of a Lanczos resampling
filter, and in interpolation.
For bandlimited interpolation of discrete-time signals, the ideal
interpolation kernel is proportional to the sinc function.
References
----------
.. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web
Resource. http://mathworld.wolfram.com/SincFunction.html
.. [2] Wikipedia, "Sinc function",
http://en.wikipedia.org/wiki/Sinc_function
Examples
--------
>>> x = np.linspace(-4, 4, 41)
>>> np.sinc(x)
array([ -3.89804309e-17, -4.92362781e-02, -8.40918587e-02,
-8.90384387e-02, -5.84680802e-02, 3.89804309e-17,
6.68206631e-02, 1.16434881e-01, 1.26137788e-01,
8.50444803e-02, -3.89804309e-17, -1.03943254e-01,
-1.89206682e-01, -2.16236208e-01, -1.55914881e-01,
3.89804309e-17, 2.33872321e-01, 5.04551152e-01,
7.56826729e-01, 9.35489284e-01, 1.00000000e+00,
9.35489284e-01, 7.56826729e-01, 5.04551152e-01,
2.33872321e-01, 3.89804309e-17, -1.55914881e-01,
-2.16236208e-01, -1.89206682e-01, -1.03943254e-01,
-3.89804309e-17, 8.50444803e-02, 1.26137788e-01,
1.16434881e-01, 6.68206631e-02, 3.89804309e-17,
-5.84680802e-02, -8.90384387e-02, -8.40918587e-02,
-4.92362781e-02, -3.89804309e-17])
>>> plt.plot(x, np.sinc(x))
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Sinc Function")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("X")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
It works in 2-D as well:
>>> x = np.linspace(-4, 4, 401)
>>> xx = np.outer(x, x)
>>> plt.imshow(np.sinc(xx))
<matplotlib.image.AxesImage object at 0x...>
"""
x = np.asanyarray(x)
y = pi * where(x == 0, 1.0e-20, x)
return sin(y)/y
def msort(a):
"""
Return a copy of an array sorted along the first axis.
Parameters
----------
a : array_like
Array to be sorted.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
sort
Notes
-----
``np.msort(a)`` is equivalent to ``np.sort(a, axis=0)``.
"""
b = array(a, subok=True, copy=True)
b.sort(0)
return b
def _ureduce(a, func, **kwargs):
"""
Internal Function.
Call `func` with `a` as first argument swapping the axes to use extended
axis on functions that don't support it natively.
Returns result and a.shape with axis dims set to 1.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
func : callable
Reduction function Kapable of receiving an axis argument.
It is is called with `a` as first argument followed by `kwargs`.
kwargs : keyword arguments
additional keyword arguments to pass to `func`.
Returns
-------
result : tuple
Result of func(a, **kwargs) and a.shape with axis dims set to 1
which can be used to reshape the result to the same shape a ufunc with
keepdims=True would produce.
"""
a = np.asanyarray(a)
axis = kwargs.get('axis', None)
if axis is not None:
keepdim = list(a.shape)
nd = a.ndim
try:
axis = operator.index(axis)
if axis >= nd or axis < -nd:
raise IndexError("axis %d out of bounds (%d)" % (axis, a.ndim))
keepdim[axis] = 1
except TypeError:
sax = set()
for x in axis:
if x >= nd or x < -nd:
raise IndexError("axis %d out of bounds (%d)" % (x, nd))
if x in sax:
raise ValueError("duplicate value in axis")
sax.add(x % nd)
keepdim[x] = 1
keep = sax.symmetric_difference(frozenset(range(nd)))
nkeep = len(keep)
# swap axis that should not be reduced to front
for i, s in enumerate(sorted(keep)):
a = a.swapaxes(i, s)
# merge reduced axis
a = a.reshape(a.shape[:nkeep] + (-1,))
kwargs['axis'] = -1
else:
keepdim = [1] * a.ndim
r = func(a, **kwargs)
return r, keepdim
def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
"""
Compute the median along the specified axis.
Returns the median of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int or sequence of int, optional
Axis along which the medians are computed. The default (axis=None)
is to compute the median along a flattened version of the array.
A sequence of axes is supported since version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape and buffer length as the expected output, but the
type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array (a) for
calculations. The input array will be modified by the call to
median. This will save memory when you do not need to preserve the
contents of the input array. Treat the input as undefined, but it
will probably be fully or partially sorted. Default is False. Note
that, if `overwrite_input` is True and the input is not already an
ndarray, an error will be raised.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
.. versionadded:: 1.9.0
Returns
-------
median : ndarray
A new array holding the result (unless `out` is specified, in which
case that array is returned instead). If the input contains
integers, or floats of smaller precision than 64, then the output
data-type is float64. Otherwise, the output data-type is the same
as that of the input.
See Also
--------
mean, percentile
Notes
-----
Given a vector V of length N, the median of V is the middle value of
a sorted copy of V, ``V_sorted`` - i.e., ``V_sorted[(N-1)/2]``, when N is
odd. When N is even, it is the average of the two middle values of
``V_sorted``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.median(a)
3.5
>>> np.median(a, axis=0)
array([ 6.5, 4.5, 2.5])
>>> np.median(a, axis=1)
array([ 7., 2.])
>>> m = np.median(a, axis=0)
>>> out = np.zeros_like(m)
>>> np.median(a, axis=0, out=m)
array([ 6.5, 4.5, 2.5])
>>> m
array([ 6.5, 4.5, 2.5])
>>> b = a.copy()
>>> np.median(b, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.median(b, axis=None, overwrite_input=True)
3.5
>>> assert not np.all(a==b)
"""
r, k = _ureduce(a, func=_median, axis=axis, out=out,
overwrite_input=overwrite_input)
if keepdims:
return r.reshape(k)
else:
return r
def _median(a, axis=None, out=None, overwrite_input=False):
# can't be reasonably be implemented in terms of percentile as we have to
# call mean to not break astropy
a = np.asanyarray(a)
if axis is not None and axis >= a.ndim:
raise IndexError(
"axis %d out of bounds (%d)" % (axis, a.ndim))
if overwrite_input:
if axis is None:
part = a.ravel()
sz = part.size
if sz % 2 == 0:
szh = sz // 2
part.partition((szh - 1, szh))
else:
part.partition((sz - 1) // 2)
else:
sz = a.shape[axis]
if sz % 2 == 0:
szh = sz // 2
a.partition((szh - 1, szh), axis=axis)
else:
a.partition((sz - 1) // 2, axis=axis)
part = a
else:
if axis is None:
sz = a.size
else:
sz = a.shape[axis]
if sz % 2 == 0:
part = partition(a, ((sz // 2) - 1, sz // 2), axis=axis)
else:
part = partition(a, (sz - 1) // 2, axis=axis)
if part.shape == ():
# make 0-D arrays work
return part.item()
if axis is None:
axis = 0
indexer = [slice(None)] * part.ndim
index = part.shape[axis] // 2
if part.shape[axis] % 2 == 1:
# index with slice to allow mean (below) to work
indexer[axis] = slice(index, index+1)
else:
indexer[axis] = slice(index-1, index+1)
# Use mean in odd and even case to coerce data type
# and check, use out array.
return mean(part[indexer], axis=axis, out=out)
def percentile(a, q, axis=None, out=None,
overwrite_input=False, interpolation='linear', keepdims=False):
"""
Compute the qth percentile of the data along the specified axis.
Returns the qth percentile of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
q : float in range of [0,100] (or sequence of floats)
Percentile to compute which must be between 0 and 100 inclusive.
axis : int or sequence of int, optional
Axis along which the percentiles are computed. The default (None)
is to compute the percentiles along a flattened version of the array.
A sequence of axes is supported since version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array `a` for
calculations. The input array will be modified by the call to
percentile. This will save memory when you do not need to preserve
the contents of the input array. In this case you should not make
any assumptions about the content of the passed in array `a` after
this function completes -- treat it as undefined. Default is False.
Note that, if the `a` input is not already an array this parameter
will have no effect, `a` will be converted to an array internally
regardless of the value of this parameter.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
.. versionadded:: 1.9.0
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
.. versionadded:: 1.9.0
Returns
-------
percentile : scalar or ndarray
If a single percentile `q` is given and axis=None a scalar is
returned. If multiple percentiles `q` are given an array holding
the result is returned. The results are listed in the first axis.
(If `out` is specified, in which case that array is returned
instead). If the input contains integers, or floats of smaller
precision than 64, then the output data-type is float64. Otherwise,
the output data-type is the same as that of the input.
See Also
--------
mean, median
Notes
-----
Given a vector V of length N, the q-th percentile of V is the q-th ranked
value in a sorted copy of V. The values and distances of the two
nearest neighbors as well as the `interpolation` parameter will
determine the percentile if the normalized ranking does not match q
exactly. This function is the same as the median if ``q=50``, the same
as the minimum if ``q=0``and the same as the maximum if ``q=100``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.percentile(a, 50)
array([ 3.5])
>>> np.percentile(a, 50, axis=0)
array([[ 6.5, 4.5, 2.5]])
>>> np.percentile(a, 50, axis=1)
array([[ 7.],
[ 2.]])
>>> m = np.percentile(a, 50, axis=0)
>>> out = np.zeros_like(m)
>>> np.percentile(a, 50, axis=0, out=m)
array([[ 6.5, 4.5, 2.5]])
>>> m
array([[ 6.5, 4.5, 2.5]])
>>> b = a.copy()
>>> np.percentile(b, 50, axis=1, overwrite_input=True)
array([[ 7.],
[ 2.]])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.percentile(b, 50, axis=None, overwrite_input=True)
array([ 3.5])
"""
q = asarray(q, dtype=np.float64)
r, k = _ureduce(a, func=_percentile, q=q, axis=axis, out=out,
overwrite_input=overwrite_input,
interpolation=interpolation)
if keepdims:
if q.ndim == 0:
return r.reshape(k)
else:
return r.reshape([len(q)] + k)
else:
return r
def _percentile(a, q, axis=None, out=None,
overwrite_input=False, interpolation='linear', keepdims=False):
a = asarray(a)
if q.ndim == 0:
# Do not allow 0-d arrays because following code fails for scalar
zerod = True
q = q[None]
else:
zerod = False
# avoid expensive reductions, relevant for arrays with < O(1000) elements
if q.size < 10:
for i in range(q.size):
if q[i] < 0. or q[i] > 100.:
raise ValueError("Percentiles must be in the range [0,100]")
q[i] /= 100.
else:
# faster than any()
if np.count_nonzero(q < 0.) or np.count_nonzero(q > 100.):
raise ValueError("Percentiles must be in the range [0,100]")
q /= 100.
# prepare a for partioning
if overwrite_input:
if axis is None:
ap = a.ravel()
else:
ap = a
else:
if axis is None:
ap = a.flatten()
else:
ap = a.copy()
if axis is None:
axis = 0
Nx = ap.shape[axis]
indices = q * (Nx - 1)
# round fractional indices according to interpolation method
if interpolation == 'lower':
indices = floor(indices).astype(intp)
elif interpolation == 'higher':
indices = ceil(indices).astype(intp)
elif interpolation == 'midpoint':
indices = floor(indices) + 0.5
elif interpolation == 'nearest':
indices = around(indices).astype(intp)
elif interpolation == 'linear':
pass # keep index as fraction and interpolate
else:
raise ValueError(
"interpolation can only be 'linear', 'lower' 'higher', "
"'midpoint', or 'nearest'")
if indices.dtype == intp: # take the points along axis
ap.partition(indices, axis=axis)
# ensure axis with qth is first
ap = np.rollaxis(ap, axis, 0)
axis = 0
if zerod:
indices = indices[0]
r = take(ap, indices, axis=axis, out=out)
else: # weight the points above and below the indices
indices_below = floor(indices).astype(intp)
indices_above = indices_below + 1
indices_above[indices_above > Nx - 1] = Nx - 1
weights_above = indices - indices_below
weights_below = 1.0 - weights_above
weights_shape = [1, ] * ap.ndim
weights_shape[axis] = len(indices)
weights_below.shape = weights_shape
weights_above.shape = weights_shape
ap.partition(concatenate((indices_below, indices_above)), axis=axis)
x1 = take(ap, indices_below, axis=axis) * weights_below
x2 = take(ap, indices_above, axis=axis) * weights_above
# ensure axis with qth is first
x1 = np.rollaxis(x1, axis, 0)
x2 = np.rollaxis(x2, axis, 0)
if zerod:
x1 = x1.squeeze(0)
x2 = x2.squeeze(0)
if out is not None:
r = add(x1, x2, out=out)
else:
r = add(x1, x2)
return r
def trapz(y, x=None, dx=1.0, axis=-1):
"""
Integrate along the given axis using the composite trapezoidal rule.
Integrate `y` (`x`) along given axis.
Parameters
----------
y : array_like
Input array to integrate.
x : array_like, optional
If `x` is None, then spacing between all `y` elements is `dx`.
dx : scalar, optional
If `x` is None, spacing given by `dx` is assumed. Default is 1.
axis : int, optional
Specify the axis.
Returns
-------
trapz : float
Definite integral as approximated by trapezoidal rule.
See Also
--------
sum, cumsum
Notes
-----
Image [2]_ illustrates trapezoidal rule -- y-axis locations of points
will be taken from `y` array, by default x-axis distances between
points will be 1.0, alternatively they can be provided with `x` array
or with `dx` scalar. Return value will be equal to combined area under
the red lines.
References
----------
.. [1] Wikipedia page: http://en.wikipedia.org/wiki/Trapezoidal_rule
.. [2] Illustration image:
http://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png
Examples
--------
>>> np.trapz([1,2,3])
4.0
>>> np.trapz([1,2,3], x=[4,6,8])
8.0
>>> np.trapz([1,2,3], dx=2)
8.0
>>> a = np.arange(6).reshape(2, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> np.trapz(a, axis=0)
array([ 1.5, 2.5, 3.5])
>>> np.trapz(a, axis=1)
array([ 2., 8.])
"""
y = asanyarray(y)
if x is None:
d = dx
else:
x = asanyarray(x)
if x.ndim == 1:
d = diff(x)
# reshape to correct shape
shape = [1]*y.ndim
shape[axis] = d.shape[0]
d = d.reshape(shape)
else:
d = diff(x, axis=axis)
nd = len(y.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
try:
ret = (d * (y[slice1] + y[slice2]) / 2.0).sum(axis)
except ValueError:
# Operations didn't work, cast to ndarray
d = np.asarray(d)
y = np.asarray(y)
ret = add.reduce(d * (y[slice1]+y[slice2])/2.0, axis)
return ret
#always succeed
def add_newdoc(place, obj, doc):
"""Adds documentation to obj which is in module place.
If doc is a string add it to obj as a docstring
If doc is a tuple, then the first element is interpreted as
an attribute of obj and the second as the docstring
(method, docstring)
If doc is a list, then each element of the list should be a
sequence of length two --> [(method1, docstring1),
(method2, docstring2), ...]
This routine never raises an error.
This routine cannot modify read-only docstrings, as appear
in new-style classes or built-in functions. Because this
routine never raises an error the caller must check manually
that the docstrings were changed.
"""
try:
new = getattr(__import__(place, globals(), {}, [obj]), obj)
if isinstance(doc, str):
add_docstring(new, doc.strip())
elif isinstance(doc, tuple):
add_docstring(getattr(new, doc[0]), doc[1].strip())
elif isinstance(doc, list):
for val in doc:
add_docstring(getattr(new, val[0]), val[1].strip())
except:
pass
# Based on scitools meshgrid
def meshgrid(*xi, **kwargs):
"""
Return coordinate matrices from coordinate vectors.
Make N-D coordinate arrays for vectorized evaluations of
N-D scalar/vector fields over N-D grids, given
one-dimensional coordinate arrays x1, x2,..., xn.
.. versionchanged:: 1.9
1-D and 0-D cases are allowed.
Parameters
----------
x1, x2,..., xn : array_like
1-D arrays representing the coordinates of a grid.
indexing : {'xy', 'ij'}, optional
Cartesian ('xy', default) or matrix ('ij') indexing of output.
See Notes for more details.
.. versionadded:: 1.7.0
sparse : bool, optional
If True a sparse grid is returned in order to conserve memory.
Default is False.
.. versionadded:: 1.7.0
copy : bool, optional
If False, a view into the original arrays are returned in order to
conserve memory. Default is True. Please note that
``sparse=False, copy=False`` will likely return non-contiguous
arrays. Furthermore, more than one element of a broadcast array
may refer to a single memory location. If you need to write to the
arrays, make copies first.
.. versionadded:: 1.7.0
Returns
-------
X1, X2,..., XN : ndarray
For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` ,
return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij'
or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy'
with the elements of `xi` repeated to fill the matrix along
the first dimension for `x1`, the second for `x2` and so on.
Notes
-----
This function supports both indexing conventions through the indexing
keyword argument. Giving the string 'ij' returns a meshgrid with
matrix indexing, while 'xy' returns a meshgrid with Cartesian indexing.
In the 2-D case with inputs of length M and N, the outputs are of shape
(N, M) for 'xy' indexing and (M, N) for 'ij' indexing. In the 3-D case
with inputs of length M, N and P, outputs are of shape (N, M, P) for
'xy' indexing and (M, N, P) for 'ij' indexing. The difference is
illustrated by the following code snippet::
xv, yv = meshgrid(x, y, sparse=False, indexing='ij')
for i in range(nx):
for j in range(ny):
# treat xv[i,j], yv[i,j]
xv, yv = meshgrid(x, y, sparse=False, indexing='xy')
for i in range(nx):
for j in range(ny):
# treat xv[j,i], yv[j,i]
In the 1-D and 0-D case, the indexing and sparse keywords have no effect.
See Also
--------
index_tricks.mgrid : Construct a multi-dimensional "meshgrid"
using indexing notation.
index_tricks.ogrid : Construct an open multi-dimensional "meshgrid"
using indexing notation.
Examples
--------
>>> nx, ny = (3, 2)
>>> x = np.linspace(0, 1, nx)
>>> y = np.linspace(0, 1, ny)
>>> xv, yv = meshgrid(x, y)
>>> xv
array([[ 0. , 0.5, 1. ],
[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0., 0., 0.],
[ 1., 1., 1.]])
>>> xv, yv = meshgrid(x, y, sparse=True) # make sparse output arrays
>>> xv
array([[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0.],
[ 1.]])
`meshgrid` is very useful to evaluate functions on a grid.
>>> x = np.arange(-5, 5, 0.1)
>>> y = np.arange(-5, 5, 0.1)
>>> xx, yy = meshgrid(x, y, sparse=True)
>>> z = np.sin(xx**2 + yy**2) / (xx**2 + yy**2)
>>> h = plt.contourf(x,y,z)
"""
ndim = len(xi)
copy_ = kwargs.pop('copy', True)
sparse = kwargs.pop('sparse', False)
indexing = kwargs.pop('indexing', 'xy')
if kwargs:
raise TypeError("meshgrid() got an unexpected keyword argument '%s'"
% (list(kwargs)[0],))
if indexing not in ['xy', 'ij']:
raise ValueError(
"Valid values for `indexing` are 'xy' and 'ij'.")
s0 = (1,) * ndim
output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1::])
for i, x in enumerate(xi)]
shape = [x.size for x in output]
if indexing == 'xy' and ndim > 1:
# switch first and second axis
output[0].shape = (1, -1) + (1,)*(ndim - 2)
output[1].shape = (-1, 1) + (1,)*(ndim - 2)
shape[0], shape[1] = shape[1], shape[0]
if sparse:
if copy_:
return [x.copy() for x in output]
else:
return output
else:
# Return the full N-D matrix (not only the 1-D vector)
if copy_:
mult_fact = np.ones(shape, dtype=int)
return [x * mult_fact for x in output]
else:
return np.broadcast_arrays(*output)
def delete(arr, obj, axis=None):
"""
Return a new array with sub-arrays along an axis deleted. For a one
dimensional array, this returns those entries not returned by
`arr[obj]`.
Parameters
----------
arr : array_like
Input array.
obj : slice, int or array of ints
Indicate which sub-arrays to remove.
axis : int, optional
The axis along which to delete the subarray defined by `obj`.
If `axis` is None, `obj` is applied to the flattened array.
Returns
-------
out : ndarray
A copy of `arr` with the elements specified by `obj` removed. Note
that `delete` does not occur in-place. If `axis` is None, `out` is
a flattened array.
See Also
--------
insert : Insert elements into an array.
append : Append elements at the end of an array.
Notes
-----
Often it is preferable to use a boolean mask. For example:
>>> mask = np.ones(len(arr), dtype=bool)
>>> mask[[0,2,4]] = False
>>> result = arr[mask,...]
Is equivalent to `np.delete(arr, [0,2,4], axis=0)`, but allows further
use of `mask`.
Examples
--------
>>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
>>> arr
array([[ 1, 2, 3, 4],
[ 5, 6, 7, 8],
[ 9, 10, 11, 12]])
>>> np.delete(arr, 1, 0)
array([[ 1, 2, 3, 4],
[ 9, 10, 11, 12]])
>>> np.delete(arr, np.s_[::2], 1)
array([[ 2, 4],
[ 6, 8],
[10, 12]])
>>> np.delete(arr, [1,3,5], None)
array([ 1, 3, 5, 7, 8, 9, 10, 11, 12])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim
axis = ndim - 1
if ndim == 0:
warnings.warn(
"in the future the special handling of scalars will be removed "
"from delete and raise an error", DeprecationWarning)
if wrap:
return wrap(arr)
else:
return arr.copy()
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, slice):
start, stop, step = obj.indices(N)
xr = range(start, stop, step)
numtodel = len(xr)
if numtodel <= 0:
if wrap:
return wrap(arr.copy())
else:
return arr.copy()
# Invert if step is negative:
if step < 0:
step = -step
start = xr[-1]
stop = xr[0] + 1
newshape[axis] -= numtodel
new = empty(newshape, arr.dtype, arr.flags.fnc)
# copy initial chunk
if start == 0:
pass
else:
slobj[axis] = slice(None, start)
new[slobj] = arr[slobj]
# copy end chunck
if stop == N:
pass
else:
slobj[axis] = slice(stop-numtodel, None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(stop, None)
new[slobj] = arr[slobj2]
# copy middle pieces
if step == 1:
pass
else: # use array indexing.
keep = ones(stop-start, dtype=bool)
keep[:stop-start:step] = False
slobj[axis] = slice(start, stop-numtodel)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(start, stop)
arr = arr[slobj2]
slobj2[axis] = keep
new[slobj] = arr[slobj2]
if wrap:
return wrap(new)
else:
return new
_obj = obj
obj = np.asarray(obj)
# After removing the special handling of booleans and out of
# bounds values, the conversion to the array can be removed.
if obj.dtype == bool:
warnings.warn(
"in the future insert will treat boolean arrays and array-likes "
"as boolean index instead of casting it to integer", FutureWarning)
obj = obj.astype(intp)
if isinstance(_obj, (int, long, integer)):
# optimization for a single value
obj = obj.item()
if (obj < -N or obj >= N):
raise IndexError(
"index %i is out of bounds for axis %i with "
"size %i" % (obj, axis, N))
if (obj < 0):
obj += N
newshape[axis] -= 1
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj[axis] = slice(None, obj)
new[slobj] = arr[slobj]
slobj[axis] = slice(obj, None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(obj+1, None)
new[slobj] = arr[slobj2]
else:
if obj.size == 0 and not isinstance(_obj, np.ndarray):
obj = obj.astype(intp)
if not np.can_cast(obj, intp, 'same_kind'):
# obj.size = 1 special case always failed and would just
# give superfluous warnings.
warnings.warn(
"using a non-integer array as obj in delete will result in an "
"error in the future", DeprecationWarning)
obj = obj.astype(intp)
keep = ones(N, dtype=bool)
# Test if there are out of bound indices, this is deprecated
inside_bounds = (obj < N) & (obj >= -N)
if not inside_bounds.all():
warnings.warn(
"in the future out of bounds indices will raise an error "
"instead of being ignored by `numpy.delete`.",
DeprecationWarning)
obj = obj[inside_bounds]
positive_indices = obj >= 0
if not positive_indices.all():
warnings.warn(
"in the future negative indices will not be ignored by "
"`numpy.delete`.", FutureWarning)
obj = obj[positive_indices]
keep[obj, ] = False
slobj[axis] = keep
new = arr[slobj]
if wrap:
return wrap(new)
else:
return new
def insert(arr, obj, values, axis=None):
"""
Insert values along the given axis before the given indices.
Parameters
----------
arr : array_like
Input array.
obj : int, slice or sequence of ints
Object that defines the index or indices before which `values` is
inserted.
.. versionadded:: 1.8.0
Support for multiple insertions when `obj` is a single scalar or a
sequence with one element (similar to calling insert multiple
times).
values : array_like
Values to insert into `arr`. If the type of `values` is different
from that of `arr`, `values` is converted to the type of `arr`.
`values` should be shaped so that ``arr[...,obj,...] = values``
is legal.
axis : int, optional
Axis along which to insert `values`. If `axis` is None then `arr`
is flattened first.
Returns
-------
out : ndarray
A copy of `arr` with `values` inserted. Note that `insert`
does not occur in-place: a new array is returned. If
`axis` is None, `out` is a flattened array.
See Also
--------
append : Append elements at the end of an array.
concatenate : Join a sequence of arrays together.
delete : Delete elements from an array.
Notes
-----
Note that for higher dimensional inserts `obj=0` behaves very different
from `obj=[0]` just like `arr[:,0,:] = values` is different from
`arr[:,[0],:] = values`.
Examples
--------
>>> a = np.array([[1, 1], [2, 2], [3, 3]])
>>> a
array([[1, 1],
[2, 2],
[3, 3]])
>>> np.insert(a, 1, 5)
array([1, 5, 1, 2, 2, 3, 3])
>>> np.insert(a, 1, 5, axis=1)
array([[1, 5, 1],
[2, 5, 2],
[3, 5, 3]])
Difference between sequence and scalars:
>>> np.insert(a, [1], [[1],[2],[3]], axis=1)
array([[1, 1, 1],
[2, 2, 2],
[3, 3, 3]])
>>> np.array_equal(np.insert(a, 1, [1, 2, 3], axis=1),
... np.insert(a, [1], [[1],[2],[3]], axis=1))
True
>>> b = a.flatten()
>>> b
array([1, 1, 2, 2, 3, 3])
>>> np.insert(b, [2, 2], [5, 6])
array([1, 1, 5, 6, 2, 2, 3, 3])
>>> np.insert(b, slice(2, 4), [5, 6])
array([1, 1, 5, 2, 6, 2, 3, 3])
>>> np.insert(b, [2, 2], [7.13, False]) # type casting
array([1, 1, 7, 0, 2, 2, 3, 3])
>>> x = np.arange(8).reshape(2, 4)
>>> idx = (1, 3)
>>> np.insert(x, idx, 999, axis=1)
array([[ 0, 999, 1, 2, 999, 3],
[ 4, 999, 5, 6, 999, 7]])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim
axis = ndim - 1
else:
if ndim > 0 and (axis < -ndim or axis >= ndim):
raise IndexError(
"axis %i is out of bounds for an array of "
"dimension %i" % (axis, ndim))
if (axis < 0):
axis += ndim
if (ndim == 0):
warnings.warn(
"in the future the special handling of scalars will be removed "
"from insert and raise an error", DeprecationWarning)
arr = arr.copy()
arr[...] = values
if wrap:
return wrap(arr)
else:
return arr
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, slice):
# turn it into a range object
indices = arange(*obj.indices(N), **{'dtype': intp})
else:
# need to copy obj, because indices will be changed in-place
indices = np.array(obj)
if indices.dtype == bool:
# See also delete
warnings.warn(
"in the future insert will treat boolean arrays and "
"array-likes as a boolean index instead of casting it to "
"integer", FutureWarning)
indices = indices.astype(intp)
# Code after warning period:
#if obj.ndim != 1:
# raise ValueError('boolean array argument obj to insert '
# 'must be one dimensional')
#indices = np.flatnonzero(obj)
elif indices.ndim > 1:
raise ValueError(
"index array argument obj to insert must be one dimensional "
"or scalar")
if indices.size == 1:
index = indices.item()
if index < -N or index > N:
raise IndexError(
"index %i is out of bounds for axis %i with "
"size %i" % (obj, axis, N))
if (index < 0):
index += N
values = array(values, copy=False, ndmin=arr.ndim)
if indices.ndim == 0:
# broadcasting is very different here, since a[:,0,:] = ... behaves
# very different from a[:,[0],:] = ...! This changes values so that
# it works likes the second case. (here a[:,0:1,:])
values = np.rollaxis(values, 0, (axis % values.ndim) + 1)
numnew = values.shape[axis]
newshape[axis] += numnew
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj[axis] = slice(None, index)
new[slobj] = arr[slobj]
slobj[axis] = slice(index, index+numnew)
new[slobj] = values
slobj[axis] = slice(index+numnew, None)
slobj2 = [slice(None)] * ndim
slobj2[axis] = slice(index, None)
new[slobj] = arr[slobj2]
if wrap:
return wrap(new)
return new
elif indices.size == 0 and not isinstance(obj, np.ndarray):
# Can safely cast the empty list to intp
indices = indices.astype(intp)
if not np.can_cast(indices, intp, 'same_kind'):
warnings.warn(
"using a non-integer array as obj in insert will result in an "
"error in the future", DeprecationWarning)
indices = indices.astype(intp)
indices[indices < 0] += N
numnew = len(indices)
order = indices.argsort(kind='mergesort') # stable sort
indices[order] += np.arange(numnew)
newshape[axis] += numnew
old_mask = ones(newshape[axis], dtype=bool)
old_mask[indices] = False
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj2 = [slice(None)]*ndim
slobj[axis] = indices
slobj2[axis] = old_mask
new[slobj] = values
new[slobj2] = arr
if wrap:
return wrap(new)
return new
def append(arr, values, axis=None):
"""
Append values to the end of an array.
Parameters
----------
arr : array_like
Values are appended to a copy of this array.
values : array_like
These values are appended to a copy of `arr`. It must be of the
correct shape (the same shape as `arr`, excluding `axis`). If
`axis` is not specified, `values` can be any shape and will be
flattened before use.
axis : int, optional
The axis along which `values` are appended. If `axis` is not
given, both `arr` and `values` are flattened before use.
Returns
-------
append : ndarray
A copy of `arr` with `values` appended to `axis`. Note that
`append` does not occur in-place: a new array is allocated and
filled. If `axis` is None, `out` is a flattened array.
See Also
--------
insert : Insert elements into an array.
delete : Delete elements from an array.
Examples
--------
>>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]])
array([1, 2, 3, 4, 5, 6, 7, 8, 9])
When `axis` is specified, `values` must have the correct shape.
>>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0)
array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
>>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0)
Traceback (most recent call last):
...
ValueError: arrays must have same number of dimensions
"""
arr = asanyarray(arr)
if axis is None:
if arr.ndim != 1:
arr = arr.ravel()
values = ravel(values)
axis = arr.ndim-1
return concatenate((arr, values), axis=axis)
| bsd-3-clause | -4,577,193,708,041,557,500 | 31.013953 | 88 | 0.568833 | false |
minzhang28/docker-py | docker/client.py | 1 | 14161 | # Copyright 2013 dotCloud inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import struct
from functools import partial
import requests
import requests.exceptions
import six
import websocket
from . import api
from . import constants
from . import errors
from .auth import auth
from .ssladapter import ssladapter
from .tls import TLSConfig
from .transport import UnixAdapter
from .utils import utils, check_resource, update_headers, kwargs_from_env
from .utils.socket import frames_iter
try:
from .transport import NpipeAdapter
except ImportError:
pass
def from_env(**kwargs):
return Client.from_env(**kwargs)
class Client(
requests.Session,
api.BuildApiMixin,
api.ContainerApiMixin,
api.DaemonApiMixin,
api.ExecApiMixin,
api.ImageApiMixin,
api.VolumeApiMixin,
api.NetworkApiMixin,
api.SwarmApiMixin):
def __init__(self, base_url=None, version=None,
timeout=constants.DEFAULT_TIMEOUT_SECONDS, tls=False,
user_agent=constants.DEFAULT_USER_AGENT):
super(Client, self).__init__()
if tls and not base_url:
raise errors.TLSParameterError(
'If using TLS, the base_url argument must be provided.'
)
self.base_url = base_url
self.timeout = timeout
self.headers['User-Agent'] = user_agent
self._auth_configs = auth.load_config()
base_url = utils.parse_host(
base_url, constants.IS_WINDOWS_PLATFORM, tls=bool(tls)
)
if base_url.startswith('http+unix://'):
self._custom_adapter = UnixAdapter(base_url, timeout)
self.mount('http+docker://', self._custom_adapter)
self.base_url = 'http+docker://localunixsocket'
elif base_url.startswith('npipe://'):
if not constants.IS_WINDOWS_PLATFORM:
raise errors.DockerException(
'The npipe:// protocol is only supported on Windows'
)
try:
self._custom_adapter = NpipeAdapter(base_url, timeout)
except NameError:
raise errors.DockerException(
'Install pypiwin32 package to enable npipe:// support'
)
self.mount('http+docker://', self._custom_adapter)
self.base_url = 'http+docker://localnpipe'
else:
# Use SSLAdapter for the ability to specify SSL version
if isinstance(tls, TLSConfig):
tls.configure_client(self)
elif tls:
self._custom_adapter = ssladapter.SSLAdapter()
self.mount('https://', self._custom_adapter)
self.base_url = base_url
# version detection needs to be after unix adapter mounting
if version is None:
self._version = constants.DEFAULT_DOCKER_API_VERSION
elif isinstance(version, six.string_types):
if version.lower() == 'auto':
self._version = self._retrieve_server_version()
else:
self._version = version
else:
raise errors.DockerException(
'Version parameter must be a string or None. Found {0}'.format(
type(version).__name__
)
)
@classmethod
def from_env(cls, **kwargs):
return cls(**kwargs_from_env(**kwargs))
def _retrieve_server_version(self):
try:
return self.version(api_version=False)["ApiVersion"]
except KeyError:
raise errors.DockerException(
'Invalid response from docker daemon: key "ApiVersion"'
' is missing.'
)
except Exception as e:
raise errors.DockerException(
'Error while fetching server API version: {0}'.format(e)
)
def _set_request_timeout(self, kwargs):
"""Prepare the kwargs for an HTTP request by inserting the timeout
parameter, if not already present."""
kwargs.setdefault('timeout', self.timeout)
return kwargs
@update_headers
def _post(self, url, **kwargs):
return self.post(url, **self._set_request_timeout(kwargs))
@update_headers
def _get(self, url, **kwargs):
return self.get(url, **self._set_request_timeout(kwargs))
@update_headers
def _put(self, url, **kwargs):
return self.put(url, **self._set_request_timeout(kwargs))
@update_headers
def _delete(self, url, **kwargs):
return self.delete(url, **self._set_request_timeout(kwargs))
def _url(self, pathfmt, *args, **kwargs):
for arg in args:
if not isinstance(arg, six.string_types):
raise ValueError(
'Expected a string but found {0} ({1}) '
'instead'.format(arg, type(arg))
)
quote_f = partial(six.moves.urllib.parse.quote_plus, safe="/:")
args = map(quote_f, args)
if kwargs.get('versioned_api', True):
return '{0}/v{1}{2}'.format(
self.base_url, self._version, pathfmt.format(*args)
)
else:
return '{0}{1}'.format(self.base_url, pathfmt.format(*args))
def _raise_for_status(self, response, explanation=None):
"""Raises stored :class:`APIError`, if one occurred."""
try:
response.raise_for_status()
except requests.exceptions.HTTPError as e:
if e.response.status_code == 404:
raise errors.NotFound(e, response, explanation=explanation)
raise errors.APIError(e, response, explanation=explanation)
def _result(self, response, json=False, binary=False):
assert not (json and binary)
self._raise_for_status(response)
if json:
return response.json()
if binary:
return response.content
return response.text
def _post_json(self, url, data, **kwargs):
# Go <1.1 can't unserialize null to a string
# so we do this disgusting thing here.
data2 = {}
if data is not None:
for k, v in six.iteritems(data):
if v is not None:
data2[k] = v
if 'headers' not in kwargs:
kwargs['headers'] = {}
kwargs['headers']['Content-Type'] = 'application/json'
return self._post(url, data=json.dumps(data2), **kwargs)
def _attach_params(self, override=None):
return override or {
'stdout': 1,
'stderr': 1,
'stream': 1
}
@check_resource
def _attach_websocket(self, container, params=None):
url = self._url("/containers/{0}/attach/ws", container)
req = requests.Request("POST", url, params=self._attach_params(params))
full_url = req.prepare().url
full_url = full_url.replace("http://", "ws://", 1)
full_url = full_url.replace("https://", "wss://", 1)
return self._create_websocket_connection(full_url)
def _create_websocket_connection(self, url):
return websocket.create_connection(url)
def _get_raw_response_socket(self, response):
self._raise_for_status(response)
if six.PY3:
sock = response.raw._fp.fp.raw
if self.base_url.startswith("https://"):
sock = sock._sock
else:
sock = response.raw._fp.fp._sock
try:
# Keep a reference to the response to stop it being garbage
# collected. If the response is garbage collected, it will
# close TLS sockets.
sock._response = response
except AttributeError:
# UNIX sockets can't have attributes set on them, but that's
# fine because we won't be doing TLS over them
pass
return sock
def _stream_helper(self, response, decode=False):
"""Generator for data coming from a chunked-encoded HTTP response."""
if response.raw._fp.chunked:
reader = response.raw
while not reader.closed:
# this read call will block until we get a chunk
data = reader.read(1)
if not data:
break
if reader._fp.chunk_left:
data += reader.read(reader._fp.chunk_left)
if decode:
if six.PY3:
data = data.decode('utf-8')
data = json.loads(data)
yield data
else:
# Response isn't chunked, meaning we probably
# encountered an error immediately
yield self._result(response)
def _multiplexed_buffer_helper(self, response):
"""A generator of multiplexed data blocks read from a buffered
response."""
buf = self._result(response, binary=True)
walker = 0
while True:
if len(buf[walker:]) < 8:
break
_, length = struct.unpack_from('>BxxxL', buf[walker:])
start = walker + constants.STREAM_HEADER_SIZE_BYTES
end = start + length
walker = end
yield buf[start:end]
def _multiplexed_response_stream_helper(self, response):
"""A generator of multiplexed data blocks coming from a response
stream."""
# Disable timeout on the underlying socket to prevent
# Read timed out(s) for long running processes
socket = self._get_raw_response_socket(response)
self._disable_socket_timeout(socket)
while True:
header = response.raw.read(constants.STREAM_HEADER_SIZE_BYTES)
if not header:
break
_, length = struct.unpack('>BxxxL', header)
if not length:
continue
data = response.raw.read(length)
if not data:
break
yield data
def _stream_raw_result_old(self, response):
''' Stream raw output for API versions below 1.6 '''
self._raise_for_status(response)
for line in response.iter_lines(chunk_size=1,
decode_unicode=True):
# filter out keep-alive new lines
if line:
yield line
def _stream_raw_result(self, response):
''' Stream result for TTY-enabled container above API 1.6 '''
self._raise_for_status(response)
for out in response.iter_content(chunk_size=1, decode_unicode=True):
yield out
def _read_from_socket(self, response, stream):
socket = self._get_raw_response_socket(response)
if stream:
return frames_iter(socket)
else:
return six.binary_type().join(frames_iter(socket))
def _disable_socket_timeout(self, socket):
""" Depending on the combination of python version and whether we're
connecting over http or https, we might need to access _sock, which
may or may not exist; or we may need to just settimeout on socket
itself, which also may or may not have settimeout on it. To avoid
missing the correct one, we try both.
We also do not want to set the timeout if it is already disabled, as
you run the risk of changing a socket that was non-blocking to
blocking, for example when using gevent.
"""
sockets = [socket, getattr(socket, '_sock', None)]
for s in sockets:
if not hasattr(s, 'settimeout'):
continue
timeout = -1
if hasattr(s, 'gettimeout'):
timeout = s.gettimeout()
# Don't change the timeout if it is already disabled.
if timeout is None or timeout == 0.0:
continue
s.settimeout(None)
def _get_result(self, container, stream, res):
cont = self.inspect_container(container)
return self._get_result_tty(stream, res, cont['Config']['Tty'])
def _get_result_tty(self, stream, res, is_tty):
# Stream multi-plexing was only introduced in API v1.6. Anything
# before that needs old-style streaming.
if utils.compare_version('1.6', self._version) < 0:
return self._stream_raw_result_old(res)
# We should also use raw streaming (without keep-alives)
# if we're dealing with a tty-enabled container.
if is_tty:
return self._stream_raw_result(res) if stream else \
self._result(res, binary=True)
self._raise_for_status(res)
sep = six.binary_type()
if stream:
return self._multiplexed_response_stream_helper(res)
else:
return sep.join(
[x for x in self._multiplexed_buffer_helper(res)]
)
def get_adapter(self, url):
try:
return super(Client, self).get_adapter(url)
except requests.exceptions.InvalidSchema as e:
if self._custom_adapter:
return self._custom_adapter
else:
raise e
@property
def api_version(self):
return self._version
class AutoVersionClient(Client):
def __init__(self, *args, **kwargs):
if 'version' in kwargs and kwargs['version']:
raise errors.DockerException(
'Can not specify version for AutoVersionClient'
)
kwargs['version'] = 'auto'
super(AutoVersionClient, self).__init__(*args, **kwargs)
| apache-2.0 | -6,174,621,705,723,003,000 | 34.760101 | 79 | 0.578985 | false |
Megalinuxcoin/megalinuxcoin | contrib/bitrpc/bitrpc.py | 1 | 7850 | from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:56883")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:56883")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Megalinuxcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Megalinuxcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
| mit | 8,456,373,962,798,494,000 | 23.228395 | 79 | 0.662293 | false |
mekkablue/Glyphs-Scripts | Compare Frontmost Fonts/Compare Glyph Heights of Frontmost Fonts.py | 1 | 8413 | #MenuTitle: Compare Glyph Heights
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
__doc__="""
Lists all glyphs that differ from the second font in height beyond a given threshold.
"""
import vanilla
class CompareGlyphHeightsOfFrontmostFonts( object ):
def __init__( self ):
# Window 'self.w':
windowWidth = 320
windowHeight = 200
windowWidthResize = 100 # user can resize width by this value
windowHeightResize = 0 # user can resize height by this value
self.w = vanilla.FloatingWindow(
( windowWidth, windowHeight ), # default window size
"Compare Glyph Heights of Frontmost Fonts", # window title
minSize = ( windowWidth, windowHeight ), # minimum size (for resizing)
maxSize = ( windowWidth + windowWidthResize, windowHeight + windowHeightResize ), # maximum size (for resizing)
autosaveName = "com.mekkablue.CompareGlyphHeightsOfFrontmostFonts.mainwindow" # stores last window position and size
)
# UI elements:
linePos, inset, lineHeight = 12, 15, 22
self.w.descriptionText = vanilla.TextBox( (inset, linePos+2, -inset, 28), u"Lists all glyphs that differ in height more than the given threshold. Detailed report in Macro Window.", sizeStyle='small', selectable=True )
linePos += lineHeight*2
self.w.tolerateText = vanilla.TextBox( (inset, linePos+2, 140, 14), u"Tolerate difference up to:", sizeStyle='small', selectable=True )
self.w.tolerate = vanilla.EditText( (inset+140, linePos, -inset, 19), "5", callback=self.SavePreferences, sizeStyle='small' )
self.w.tolerate.getNSTextField().setToolTip_("How much of a difference is OK. Hint: overshoot size is a good idea for this one.")
linePos += lineHeight
self.w.heights = vanilla.CheckBox( (inset, linePos-1, -inset, 20), u"Compare top bounds (‘heights’)", value=True, callback=self.SavePreferences, sizeStyle='small' )
self.w.heights.getNSButton().setToolTip_("Measures and compares the heights of the top edges (bbox maximum).")
linePos += lineHeight
self.w.depths = vanilla.CheckBox( (inset, linePos-1, -inset, 20), u"Compare bottom bounds (‘depths’)", value=False, callback=self.SavePreferences, sizeStyle='small' )
self.w.depths.getNSButton().setToolTip_("Measures and compares the heights of the bottom edges (bbox minimum).")
linePos += lineHeight
self.w.includeNonExporting = vanilla.CheckBox( (inset, linePos-1, -inset, 20), u"Include non-exporting glyphs", value=False, callback=self.SavePreferences, sizeStyle='small' )
self.w.includeNonExporting.getNSButton().setToolTip_("If enabled, also measures glyphs that are set to not export.")
linePos += lineHeight
# Run Button:
self.w.runButton = vanilla.Button( (-120-inset, -20-inset, -inset, -inset), "Compare", sizeStyle='regular', callback=self.CompareGlyphHeightsOfFrontmostFontsMain )
self.w.setDefaultButton( self.w.runButton )
# Load Settings:
if not self.LoadPreferences():
print("Note: 'Compare Glyph Heights of Frontmost Fonts' could not load preferences. Will resort to defaults")
# Open window and focus on it:
self.w.open()
self.w.makeKey()
def updateGUI(self):
if not self.w.heights.get() and not self.w.depths.get():
self.w.runButton.enable(False)
else:
self.w.runButton.enable(True)
def SavePreferences( self, sender ):
try:
Glyphs.defaults["com.mekkablue.CompareGlyphHeightsOfFrontmostFonts.heights"] = self.w.heights.get()
Glyphs.defaults["com.mekkablue.CompareGlyphHeightsOfFrontmostFonts.depths"] = self.w.depths.get()
Glyphs.defaults["com.mekkablue.CompareGlyphHeightsOfFrontmostFonts.tolerate"] = self.w.tolerate.get()
Glyphs.defaults["com.mekkablue.CompareGlyphHeightsOfFrontmostFonts.includeNonExporting"] = self.w.includeNonExporting.get()
self.updateGUI()
except:
return False
return True
def LoadPreferences( self ):
try:
Glyphs.registerDefault("com.mekkablue.CompareGlyphHeightsOfFrontmostFonts.heights", 0)
Glyphs.registerDefault("com.mekkablue.CompareGlyphHeightsOfFrontmostFonts.depths", 0)
Glyphs.registerDefault("com.mekkablue.CompareGlyphHeightsOfFrontmostFonts.tolerate", 0)
Glyphs.registerDefault("com.mekkablue.CompareGlyphHeightsOfFrontmostFonts.includeNonExporting", 0)
self.w.heights.set( Glyphs.defaults["com.mekkablue.CompareGlyphHeightsOfFrontmostFonts.heights"] )
self.w.depths.set( Glyphs.defaults["com.mekkablue.CompareGlyphHeightsOfFrontmostFonts.depths"] )
self.w.tolerate.set( Glyphs.defaults["com.mekkablue.CompareGlyphHeightsOfFrontmostFonts.tolerate"] )
self.w.includeNonExporting.set( Glyphs.defaults["com.mekkablue.CompareGlyphHeightsOfFrontmostFonts.includeNonExporting"] )
self.updateGUI()
except:
return False
return True
def CompareGlyphHeightsOfFrontmostFontsMain( self, sender ):
try:
# update settings to the latest user input:
if not self.SavePreferences( self ):
print("Note: 'Compare Glyph Heights of Frontmost Fonts' could not write preferences.")
if len(Glyphs.fonts) < 2:
Message(title="Compare Error", message="You need to have at least two fonts open for comparing.", OKButton="Ooops")
else:
# brings macro window to front and clears its log:
Glyphs.clearLog()
# Glyphs.showMacroWindow()
thisFont = Glyphs.font # frontmost font
otherFont = Glyphs.fonts[1] # second font
thisFileName = thisFont.filepath.lastPathComponent()
otherFileName = otherFont.filepath.lastPathComponent()
print("Compare Glyph Heights of Frontmost Fonts Report for:\n (1) %s: %s\n %s\n (2) %s: %s\n %s" % (
thisFont.familyName, thisFileName, thisFont.filepath,
otherFont.familyName, otherFileName, otherFont.filepath,
))
print()
heights = Glyphs.defaults["com.mekkablue.CompareGlyphHeightsOfFrontmostFonts.heights"]
depths = Glyphs.defaults["com.mekkablue.CompareGlyphHeightsOfFrontmostFonts.depths"]
tolerate = float(Glyphs.defaults["com.mekkablue.CompareGlyphHeightsOfFrontmostFonts.tolerate"])
includeNonExporting = Glyphs.defaults["com.mekkablue.CompareGlyphHeightsOfFrontmostFonts.includeNonExporting"]
theseIDs = [m.id for m in thisFont.masters]
otherIDs = [m.id for m in otherFont.masters]
masters = zip(theseIDs, otherIDs)
collectedGlyphNames = []
if len(theseIDs) != len(otherIDs):
print(u"⚠️ Different number of masters in %s and %s" % (thisFont.filepath.lastPathComponent(), otherFont.filepath.lastPathComponent()))
for thisGlyph in thisFont.glyphs:
if thisGlyph.export or includeNonExporting:
glyphName = thisGlyph.name
otherGlyph = otherFont.glyphs[glyphName]
if not otherGlyph:
print(u"⚠️ %s: not in other font (%s)" % (glyphName, otherFileName))
else:
for idPair in masters:
thisID, otherID = idPair
thisLayer = thisGlyph.layers[thisID]
otherLayer = otherGlyph.layers[otherID]
if not (thisLayer and otherLayer):
print(u"⚠️ Cannot compare layers in %s" % glyphName)
else:
if heights:
thisHeight = thisLayer.bounds.origin.y + thisLayer.bounds.size.height
otherHeight = otherLayer.bounds.origin.y + otherLayer.bounds.size.height
if abs(thisHeight-otherHeight) > tolerate:
print(u"❌ %s heights: (1) %.1f, (2) %.1f" % (glyphName, thisHeight, otherHeight))
collectedGlyphNames.append(glyphName)
if depths:
thisDepth = thisLayer.bounds.origin.y
otherDepth = otherLayer.bounds.origin.y
if abs(thisDepth-otherDepth) > tolerate:
print(u"❌ %s depths: (1) %.1f, (2) %.1f" % (glyphName, thisDepth, otherDepth))
collectedGlyphNames.append(glyphName)
if not collectedGlyphNames:
Message(title="No significant differences", message="No differences larger than %.1f found between the two frontmost fonts. See the macro window for error messages."%tolerate, OKButton=u"😎 Cool")
else:
collectedGlyphNames = tuple(set(collectedGlyphNames))
tabText = "/"+"/".join(collectedGlyphNames)
thisFont.newTab(tabText)
except Exception as e:
# brings macro window to front and reports error:
Glyphs.showMacroWindow()
print("Compare Glyph Heights of Frontmost Fonts Error: %s" % e)
import traceback
print(traceback.format_exc())
CompareGlyphHeightsOfFrontmostFonts() | apache-2.0 | -8,828,711,297,698,775,000 | 48.046784 | 219 | 0.723706 | false |
rustychris/stompy | stompy/xr_utils.py | 1 | 13940 | import logging
log=logging.getLogger('xr_utils')
from collections import OrderedDict
import xarray as xr
import six
import numpy as np
def gradient(ds,varname,coord):
# rather than assume that consecutive data points are valid,
# fit a line to the data values per water column
daC,daz = xr.broadcast(ds[varname],ds[coord])
z=daz.values
C=daC.values
assert z.shape==C.shape
newdims=[dim for dim in daC.dims if dim!=coord]
newshape=[len(daC[dim]) for dim in newdims]
newdims,newshape
result=np.zeros(newshape,'f8')
for idx in np.ndindex(*newshape):
colC=C[idx]
colz=z[idx]
assert colC.ndim==colz.ndim==1 # sanity
valid=np.isfinite(colC*colz)
if np.sum(valid)>1:
mb=np.polyfit(colz[valid],colC[valid],1)
result[idx]=mb[0]
else:
result[idx]=np.nan
return xr.DataArray(result,coords=[ds[dim] for dim in newdims],name='d%s/d%s'%(varname,coord))
def find_var(nc,pred):
for fld in nc:
try:
if pred(nc[fld]):
return fld
except:
pass
return None
def redimension(ds,new_dims,
intragroup_dim=None,
inplace=False,
save_mapping=False):
"""
copy ds, making new_dims into the defining
dimensions for variables which share shape with new_dims.
each entry in new_dims must have the same dimension, and
must be unidimensional
Example:
Dataset:
coordinates
sample [0,1,2..100]
data variables
date(sample) [...]
station(sample) [...]
depth(sample) [...]
salinity(sample) [...]
We'd like to end up with
salinity(date,station,profile_sample)
depth(date,station,profile_sample)
Or
Dataset:
coordinates
time [...]
item [...]
data variables
x(item) [...]
y(item) [...]
z(item) [...]
salinity(time,time) [...,...]
Which you want to become
Dataset:
coordinates
time [.]
x [.]
y [.]
zi [.]
data variables
z(x,y,zi) [...]
salinity(time,x,y,zi) [....]
In other words, replace item with three orthogonal dimensions. Two of the
orthogonal dimensions have existing coordinates, and the third is an index
to elements within the bin defined by x,y.
save_mapping: create an additional variable in the output which stores the
mapping of the linear dimension to the new, orthogonal dimensions
intragroup_dim: introduce an additional dimension to enumerate the original
data which map to the same new coordinates.
"""
if not inplace:
ds=ds.copy()
lin_dim=ds[new_dims[0]].dims[0]# the original linear dimension
orig_dims=[ ds[vname].values.copy()
for vname in new_dims ]
Norig=len(orig_dims[0]) # length of the original, linear dimension
uni_new_dims=[ np.unique(od) for od in orig_dims]
for und in uni_new_dims:
try:
if np.any(und<0):
log.warning("New dimensions have negative values -- will continue but you probably want to drop those first")
except TypeError:
# some versions of numpy/xarray will not compare times to 0,
# triggering a TypeError
pass
# note that this is just the shape that will replace occurences of lin_dim
new_shape=[len(und) for und in uni_new_dims]
# build up an index array
new_idxs=[ np.searchsorted(und,od)
for und,od in zip( uni_new_dims, orig_dims ) ]
if intragroup_dim is not None:
# here we need to first count up the max number within each 'bin'
# so new_idxs
count_per_group=np.zeros(new_shape,'i4')
intra_idx=np.zeros(Norig,'i4')
for orig_idx,idxs in enumerate(zip(*new_idxs)):
intra_idx[orig_idx] = count_per_group[idxs]
count_per_group[ idxs ]+=1
n_intragroup=count_per_group.max() # 55 in the test case
# add in the new dimension
new_shape.append(n_intragroup)
new_idxs.append(intra_idx)
# negative means missing. at this point, intragroup_dim has not been taken care of
# mapper: array of the shape of the new dimensions, with each entry giving the linear
# index into the original dimension
mapper=np.zeros(new_shape,'i4') - 1
mapper[ tuple(new_idxs) ] = np.arange(Norig)
# install the new coordinates - first the grouped coordinates
for nd,und in zip(new_dims,uni_new_dims):
del ds[nd] # doesn't like replacing these in one go
ds[nd]= ( (nd,), und )
if intragroup_dim is not None:
# and second the new intragroup coordinate:
new_dims.append(intragroup_dim)
ds[intragroup_dim] = ( (intragroup_dim,), np.arange(n_intragroup) )
for vname in ds.data_vars:
if lin_dim not in ds[vname].dims:
# print("Skipping %s"%vname)
continue
# print(vname)
var_new_dims=[]
var_new_slice=[]
mask_slice=[]
for d in ds[vname].dims:
if d==lin_dim:
var_new_dims += new_dims
var_new_slice.append( mapper )
mask_slice.append( mapper<0 )
else:
var_new_dims.append(d)
var_new_slice.append(slice(None))
mask_slice.append(slice(None))
var_new_dims=tuple(var_new_dims)
var_new_slice=tuple(var_new_slice)
# this is time x nSegment
# ds[vname].values.shape # 10080,1494
# This is the beast: but now it's including some crap values at the beginning
new_vals=ds[vname].values[var_new_slice]
mask=np.zeros_like(new_vals,'b1')
mask[mask_slice] = True
new_vals=np.ma.array(new_vals,mask=mask)
old_attrs=OrderedDict(ds[vname].attrs)
# This seems to be dropping the attributes
ds[vname]=( var_new_dims, new_vals )
for k in old_attrs:
if k != '_FillValue':
ds[vname].attrs[k] = old_attrs[k]
if save_mapping:
ds['mapping']= ( new_dims, mapper)
return ds
def sort_dimension(ds,sort_var,sort_dim,inplace=False):
"""
sort_var: variable whose value will be used to sort items along sort_dim.
sort_dim must be in sort_var.dims
only variables with dimensions the same or a superset of sort_var.dims
can/will be sorted.
"""
if not inplace:
ds=ds.copy()
#if ds[sort_var].ndim>1:
# the interesting case
# want to sort within each 'bin'
sort_var_dims=ds[sort_var].dims
sort_var_dimi = sort_var_dims.index(sort_dim)
new_order=ds[sort_var].argsort(axis=sort_var_dimi).values
# this only works for variables with a superset of sort_var's
# dimensions (or the same).
# i.e. depth(date,station,prof_sample)
# can't be used to sort a variable of (station,prof_sample)
# but it can be used to sort a variable of (analyte,date,station,prof_sample)
for v in ds.data_vars:
for d in sort_var_dims:
compat=True
if d not in ds[v].dims:
# print("%s not compatible with dimensions for sorting"%v)
compat=False
if not compat: continue
# build up transpose
trans_dims=[]
for d in ds[v].dims:
if d not in sort_var_dims:
trans_dims.append(d)
n_extra=len(trans_dims)
trans_dims+=sort_var_dims
orig_dims=ds[v].dims
tmp_trans=ds[v].transpose(*trans_dims)
vals=tmp_trans.values
# actually a tricky type of indexing to handle:
# new_order.shape: (23, 37, 52)
# luckily numpy knows how to do this relatively efficiently:
idxs=np.ix_( *[np.arange(N) for N in vals.shape] )
idxs=list(idxs)
idxs[n_extra+sort_var_dimi]=new_order
tmp_trans.values=vals[tuple(idxs)]
ds[v].values=tmp_trans.transpose(*orig_dims)
return ds
def first_finite(da,dim):
# yecch.
valid=np.isfinite(da.values)
dimi=da.get_axis_num('prof_sample')
first_valid=np.argmax( valid, axis=dimi)
new_shape=[ slice(length)
for d,length in enumerate(da.shape)
if d!=dimi ]
indexers=np.ogrid[ tuple(new_shape) ]
indexers[dimi:dimi]=[first_valid]
da_reduced=da.isel(**{dim:0,'drop':True})
da_reduced.values=da.values[tuple(indexers)]
return da_reduced
# Helper for z_from_sigma
def decode_sigma(ds,sigma_v):
"""
ds: Dataset
sigma_v: sigma coordinate variable.
return DataArray of z coordinate implied by sigma_v
"""
import re
formula_terms=sigma_v.attrs['formula_terms']
terms={}
for hit in re.findall(r'\s*(\w+)\s*:\s*(\w+)', formula_terms):
terms[hit[0]]=ds[hit[1]]
# this is where xarray really shines -- it will promote z to the
# correct dimensions automatically, by name
# This ordering of the multiplication puts laydim last, which is
# assumed in some other [fragile] code.
# a little shady, but its helpful to make the ordering here intentional
z=(terms['eta'] - terms['bedlevel'])*terms['sigma'] + terms['bedlevel']
return z
def z_from_sigma(dataset,variable,interfaces=False,dz=False):
"""
Create a z coordinate for variable as a Dataset from the given dataset
interfaces: False => do nothing related to layer boundaries
variable name => use the given variable to define interfaces between layers.
True => try to infer the variable, fallback to even spacing otherwise.
if interfaces is anything other than False, then the return value will be a Dataset
with the centers in a 'z_ctr' variable and the interfaces in a 'z_int'
dz: implies interfaces, and includes a z_dz variable giving thickness of each layer.
"""
da=dataset[variable]
da_dims=da.dims
if dz:
assert interfaces is not False,"Currently must enable interfaces to get thickness dz"
# Variables which are definitely sigma, and might be the one we're looking for
sigma_vars=[v for v in dataset.variables
if dataset[v].attrs.get('standard_name',None) == 'ocean_sigma_coordinate']
# xr data arrays
sigma_ctr_v=None # sigma coordinate for centers
sigma_int_v=None # sigma coordinate for interfaces
for v in sigma_vars:
if set(dataset[v].dims)<=set(da_dims):
assert sigma_ctr_v is None,"Multiple matches for layer center sigma coordinate"
sigma_ctr_v=dataset[v]
assert sigma_ctr_v is not None,"Failed to find a layer-center sigma coordinate"
# With the layer center variable known, can search for layer interfaces
if interfaces is False:
pass
else:
if interfaces is True:
maybe_int_vars=sigma_vars
else:
# Even when its specified, check to see that it has the expected form
maybe_int_vars=[interfaces]
for v in maybe_int_vars:
ctr_dims=set(sigma_ctr_v.dims)
int_dims=set(dataset[v].dims)
ctr_only = list(ctr_dims - int_dims)
int_only = list(int_dims - ctr_dims)
if (len(ctr_only)!=1) or (len(int_only)!=1):
continue
if len(dataset[ctr_only[0]])+1==len(dataset[int_only[0]]):
assert sigma_int_v is None,"Multiple matches for layer interface sigma coordinate"
sigma_int_v=dataset[v]
z_ctr=decode_sigma(dataset,sigma_ctr_v)
if sigma_int_v is not None:
z_int=decode_sigma(dataset,sigma_int_v)
result=xr.Dataset()
result['z_ctr']=z_ctr
if interfaces is not False:
result['z_int']=z_int
if dz is not False:
dz=xr.ones_like( z_ctr )
dz.values[...]=np.diff( z_int, axis=z_int.get_axis_num(int_only[0]))
result['z_dz']=dz
return result
def bundle_components(ds,new_var,comp_vars,frame,comp_names=None):
"""
ds: Dataset
new_var: name of the vector-valued variable to create
comp_vars: list of variables, one-per component
frame: name to give the component dimension, i.e. the name of the
reference frame
comp_names: list same length as comp_vars, used to name the components.
"""
vector=xr.concat([ds[v] for v in comp_vars],dim=frame)
# That puts xy as the first dimension, but I'd rather it last
dims=vector.dims
roll_dims=dims[1:] + dims[:1]
ds[new_var]=vector.transpose( *roll_dims )
if comp_names is not None:
ds[frame]=(frame,),comp_names
def concat_permissive(srcs,**kw):
"""
Small wrapper around xr.concat which fills in nan
coordinates where they are missing, in case some
of the incoming datasets have more metadata than others.
"""
extra_coords=set()
for src in srcs:
extra_coords |= set(src.coords)
expanded_srcs=[]
for src in srcs:
for extra in extra_coords:
if extra not in src:
src=src.assign_coords(**{extra:np.nan})
expanded_srcs.append(src)
return xr.concat(expanded_srcs,**kw)
def structure_to_dataset(arr,dim,extra={}):
"""
Convert a numpy structure array to a dataset.
arr: structure array.
dim: name of the array dimension. can be a tuple with multiple dimension
names if arr.ndim>1.
extra: dict optionally mapping specific fields to additional dimensions
within that field.
"""
if isinstance(dim,six.string_types):
dim=(dim,)
ds=xr.Dataset()
for fld in arr.dtype.names:
if fld in extra:
extra_dims=extra[fld]
else:
extra_dims=['d%02d'%d for d in arr[fld].shape[1:]]
ds[fld]=dim+tuple(extra_dims),arr[fld]
return ds
| mit | -1,026,334,121,185,740,400 | 31.877358 | 125 | 0.611047 | false |
helixyte/everest | everest/tests/test_attributes.py | 1 | 1306 | """
This file is part of the everest project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Created on Oct 16, 2013.
"""
import pytest
from everest.attributes import get_attribute_cardinality
from everest.attributes import is_terminal_attribute
from everest.constants import CARDINALITY_CONSTANTS
from everest.resources.base import Member
from everest.resources.descriptors import member_attribute
from everest.resources.descriptors import terminal_attribute
__docformat__ = 'reStructuredText en'
__all__ = ['TestAttributes',
]
class TestAttributes(object):
@pytest.mark.parametrize('attr_name', ['attr'])
def test_get_attribute_cardinality(self, attr_name):
mb_attr = member_attribute(Member, attr_name)
assert get_attribute_cardinality(mb_attr) == CARDINALITY_CONSTANTS.ONE
t_attr = terminal_attribute(int, attr_name)
with pytest.raises(ValueError):
get_attribute_cardinality(t_attr)
@pytest.mark.parametrize('attr_name', ['attr'])
def test_is_terminal_attribute(self, attr_name):
mb_attr = member_attribute(Member, attr_name)
assert is_terminal_attribute(mb_attr) is False
t_attr = terminal_attribute(int, attr_name)
assert is_terminal_attribute(t_attr) is True
| mit | -6,292,481,921,457,133,000 | 35.277778 | 78 | 0.726646 | false |
DataONEorg/d1_python | test_utilities/src/d1_test/__init__.py | 1 | 1672 | #!/usr/bin/env python
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DataONE Test Utilities.
The :doc:`/d1_test/index` package contains various utilities for testing DataONE
infrastructure components and clients. These include:
:doc:`Instance Generator </d1_test/instance_generator/index>`: Used for creating
randomized System Metadata documents
:doc:`Stress Tester </d1_test/stress_tester/index>`: Used for stress testing of Member
Node implementations. The stress_tester creates a configurable number of concurrent
connections to a Member Node and populates the MN with randomly generated objects while
running queries and object retrievals.
:doc:`Utilities </d1_test/utilities/index>`: Misc test utilities.
"""
# Suppress log messages instead of raising exception if the program using the library
# does not configure the logging system.
import logging
logging.getLogger(__name__).addHandler(logging.NullHandler())
| apache-2.0 | 5,586,200,502,569,184,000 | 37.883721 | 87 | 0.7811 | false |
etsinko/Pentaho-reports-for-OpenERP | openerp_addon/pentaho_reports/__openerp__.py | 1 | 9063 | # -*- encoding: utf-8 -*-
{
"name": "Pentaho Reports for OpenERP",
"description": """
Pentaho - OpenERP Report Integration by Willow IT
=================================================
This module integrates Pentaho's reporting engine with OpenERP.
This project was developed by Willow IT, using the libraries and extensions developed by De Bortoli Wines,
Australia (Pieter van der Merwe in particular) for the Pentaho reporting system. The OpenERP addon also derives
from and/or is inspired by the Jasper Reports addon developed by NaN-tic.
Willow IT contributions:
* Deepak Seshadri - OpenERP-Pentaho server connector (Java)
* Richard deMeester - frontend and core functions, automated wizard and action implementation
* Douglas Parker - additional integration
* Jon Wilson - inspiration, testing, and whipping
Report Designer and Java Component
----------------------------------
For notes on installing and running the report designer, 3.9, refer to:
**http://bit.ly/L4wPoC**
Report designer version 5.0 and higher includes the OpenERP datasource. It can be installed and used without the
need for additional components.
The README document contains instructions relevant to building and running the java component.
Report Types
------------
Broadly speaking, two types of data sources can be used to produce the reports. OpenERP object data sources or SQL
query data sources.
* Object data sources have the advantage that they can use OpenERP model columns, even those that are not stored
in the database. This includes functional fields, related fields, properties. They can iterate through one2many
and many2many subfields. They also respect OpenERP record rules.
* SQL data sources have the advantage that they allow greater flexibility in selections, and other SQL features
like grouping and selective inclusion of sub-queries. It also allows selection where no OpenERP model
relationship exists. It does not respect OpenERP record rules, which may be seen as an advantage for creating
summary type reports which may be run by users who may not be able to view low-level data. Because of this, you
need to be careful.
Report Parameters
-----------------
Prompted and hidden parameters are supported, as are mandatory values.
A number of reserved parameters are automatically passed, but the report needs to define these parameters in order
to receive and use them. Reserved parameters currently passed are:
* *ids*: the context ids when the report action is called. This is almost always needed for object based reports
to be meaningful.
* *user_id*: the id of the user running the report.
* *user_name*: the display name of the user running the report.
* *context_lang*: the language in effect when the report action is executed.
* *context_tz*: the timezone in effect when the report action is executed.
This list of reserved parameters is available for use is all data sources, even those which return possible
parameter selections, therefore allowing meaningful selections for other parameters.
Most Pentaho report parameter types and features have been mapped, where practicable, to something which makes
sense to an OpenERP environment. This means that a number of Java data types don't necessarily differentiate.
For example, (Integer / Long / Short / BigInteger / et al) will all be treated as integers.
Default values can be passed for the parameters, and may default value formulae work.
Hidden parameters must obviously receive and work with a default value of some sort. This default can be the
Pentaho default, or can be sent to the report in the context in the format:
**{'pentaho_defaults': { .... }}**
where the passed dictionary contains the parameter names as keys. See below for guidance on where to set this up.
Pentaho Display Types have been consolidated. Drop Down, Single Value List, etc, all display as OpenERP selection
pull downs. Some Pentaho multi value selection types, such as Check Box, etc, are implemented as single value
selection pull downs. Date Picker uses the standard OpenERP date/time widget. Pentaho multi value lists are
implemented as many2manytag widgets, and support integer, string, and numeric data types.
Other Pentaho parameter features should be considered unsupported, such as Post-Processing Formula, Display Value
Formula, Visible Items, Validate Values, et al.
Setup
-----
Some parameters may be required in Settings/Customization/System Parameters.
The address of the Pentaho server which is used to render the report is defined with the parameter:
**pentaho.server.url**
For object based data sources, the Pentaho server will use XML-RPC to connect to the current database using the
interface and port as defined in the OpenERP config file, and the reporting user's credentials.
If not defined in the config file, the interface and port can be defined with the parameters:
* pentaho.openerp.xml.interface
* pentaho.openerp.xml.port
For SQL query based data sources, the Pentaho server will use the following parameters:
* pentaho.postgres.host
* pentaho.postgres.port
* pentaho.postgres.login
* pentaho.postgres.password
Report Actions
--------------
Reports are defined to OpenERP under **Settings/Technical/Low Level Objects/Actions/Reports**. This is the place
where standard OpenERP report actions are defined. Selecting the appropriate checkbox can convert existing report
actions to Pentaho Report Actions.
Reports can be handled by OpenERP in two ways. They can be linked to a menu, or they can be linked to a model.
* Model linked reports will appear in the right hand toolbar as per standard reports, or they can be specifically
called with an action, such as a button. A record or number of records needs to be selected before the action
will be invoked, and the ids of the selected records will be passed to the report as a list parameter called
"ids". A report invoked this way will not prompt for any additional parameters. A front end custom wizard can be
created if desired to invoke the action, but that wizard and the report would be very closely tied and need to
be developed in conjunction with one other.
* Menu linked reports will appear somewhere in a menu. They will pop up a window prompting for a report output
type, and any additional (visible) parameters that are defined. Object ids passed to these reports are not
meaningful, as no object or ids are guaranteed to be in effect when the action is called, so selection of data
needs to be based on other parameters. Other reserved parameters, such as user id, may be meaningful in the
context of prompting for parameters or ultimate data selection.
Report actions can override existing OpenERP actions (such as invoice prints) or can be new actions.
The service name is only important if it is overriding an existing service, or if the report is to be invoked from
coded actions like buttons or wizards. For menu linked actions or generic object linked actions, the service name
is incidental.
Output types specified here are defaults, and can be overridden by either a custom wizard or by menu linked
reports. They will not be changeable for model linked reports which don't have specific coding.
The prpt (Pentaho report definition) file selected is stored in the database. Changing the report using the
designer will require the report to be re-loaded.
A prpt file and action can easily be defined as part of a module and distributed this way. Be aware, though, if
the module is upgraded from within OpenERP it could potentially reload the distributed report and may lose
changes that were uploaded manually after module installation.
Security groups entered against the action will be respected in regard to action visibility - they play no role in
the report execution.
If a context value is required to set a default value, it needs to be set against the created action. It comes up
under Settings/Customization/Low Level Actions/Actions/Window Actions. It will already have a Context Value with
the service_name defined, which should be left intact.
Disclaimer
----------
This has been developed over time to meet specific requirements as we have needed to meet them. If something is
wrong, or you think would make a great feature, please do let us know at:
**[email protected]**
Library
-------
We will be endeavouring to create a library of sample and useful reports. Check at:
**http://www.willowit.com.au/**
where we will announce when and where this is available. In the meantime, if you develop any reports or templates
that you would consider worth sharing, please email them through with some description or details.
""",
"version": "0.1",
"author": "WillowIT Pty Ltd",
"website": "http://www.willowit.com.au/",
"depends": ["base"],
"category": "Reporting subsystems",
"data": [
"report_xml_view.xml",
'wizard/report_prompt.xml',
'data/config_data.xml',
],
"installable": True,
"active": False
}
| gpl-2.0 | -8,140,916,819,647,359,000 | 47.989189 | 114 | 0.766965 | false |
googlearchive/titan | titan/files/mixins/stats_recorder.py | 1 | 5131 | #!/usr/bin/env python
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Enable Titan Stats recording of all core file operations."""
import random
from titan import files
from titan import stats
class StatsRecorderMixin(files.File):
"""Mixin which uses Titan Stats to record usage and timing stats."""
def __init__(self, *args, **kwargs):
self._stats_invocation_counters = {}
self._stats_latency_counters = {}
super(StatsRecorderMixin, self).__init__(*args, **kwargs)
def _start_stats_recording(self, unique_counter_name):
counter_name = unique_counter_name.name
latency_counter_name = '%s/latency' % counter_name
latency_counter = stats.AverageTimingCounter(latency_counter_name)
latency_counter.start()
unique_id = hash(unique_counter_name)
self._stats_latency_counters[unique_id] = latency_counter
self._stats_invocation_counters[unique_id] = stats.Counter(counter_name)
def _stop_stats_recording(self, unique_counter_name):
unique_id = hash(unique_counter_name)
self._stats_latency_counters[unique_id].stop()
self._stats_invocation_counters[unique_id].increment()
counters = [
self._stats_invocation_counters[unique_id],
self._stats_latency_counters[unique_id],
]
stats.log_counters(counters, counters_func=make_all_counters)
@property
def _file(self):
if self.is_loaded:
# Only record stats if not loaded.
return super(StatsRecorderMixin, self)._file
unique_counter_name = _UniqueCounterName('files/File/load')
self._start_stats_recording(unique_counter_name)
try:
return super(StatsRecorderMixin, self)._file
finally:
self._stop_stats_recording(unique_counter_name)
def write(self, *args, **kwargs):
unique_counter_name = _UniqueCounterName('files/File/write')
self._start_stats_recording(unique_counter_name)
try:
return super(StatsRecorderMixin, self).write(*args, **kwargs)
finally:
self._stop_stats_recording(unique_counter_name)
def copy_to(self, *args, **kwargs):
unique_counter_name = _UniqueCounterName('files/File/copy_to')
self._start_stats_recording(unique_counter_name)
try:
return super(StatsRecorderMixin, self).copy_to(*args, **kwargs)
finally:
self._stop_stats_recording(unique_counter_name)
def delete(self, *args, **kwargs):
unique_counter_name = _UniqueCounterName('files/File/delete')
self._start_stats_recording(unique_counter_name)
try:
return super(StatsRecorderMixin, self).delete(*args, **kwargs)
finally:
self._stop_stats_recording(unique_counter_name)
def serialize(self, *args, **kwargs):
unique_counter_name = _UniqueCounterName('files/File/serialize')
self._start_stats_recording(unique_counter_name)
try:
return super(StatsRecorderMixin, self).serialize(*args, **kwargs)
finally:
self._stop_stats_recording(unique_counter_name)
class _UniqueCounterName(object):
"""A unique counter name container.
This object's hash is used to prevent overlap of the same counter name
which may be created multiple times within a code path.
"""
def __init__(self, name):
self.random_offset = random.randint(0, 1000000)
self.name = name
def __hash__(self):
return id(self) + self.random_offset
def make_all_counters():
"""Make a new list of all counters which can be aggregated and saved."""
counters = [
# Invocation counters.
stats.Counter('files/File/load'),
stats.Counter('files/File/write'),
stats.Counter('files/File/delete'),
stats.Counter('files/File/serialize'),
# TODO(user): Add these when the new APIs are implemented.
# stats.Counter('files/Files/list'),
# stats.Counter('files/File/copy'),
# stats.Counter('files/Dir/copy'),
# stats.Counter('files/Dir/list'),
# stats.Counter('files/Dir/exists'),
# Timing counters.
stats.AverageTimingCounter('files/File/load/latency'),
stats.AverageTimingCounter('files/File/write/latency'),
stats.AverageTimingCounter('files/File/delete/latency'),
stats.AverageTimingCounter('files/File/serialize/latency'),
# TODO(user): Add these when the new APIs are implemented.
# stats.AverageTimingCounter('files/Files/list/latency'),
# stats.AverageTimingCounter('files/File/copy/latency'),
# stats.AverageTimingCounter('files/Dir/copy/latency'),
# stats.AverageTimingCounter('files/Dir/list/latency'),
# stats.AverageTimingCounter('files/Dir/exists/latency'),
]
return counters
| apache-2.0 | 4,395,640,276,062,918,700 | 35.913669 | 76 | 0.701228 | false |
abhishekdepro/Mantle | first_time.py | 1 | 2523 | import urllib2
import re
import datetime
import sqlite3
a=0
city=raw_input('enter city')
print("")
print("Entered city: "+city.upper())
print "Crunching the latest data",
for i in range(0,100):
a+=2
if(i%10==0):
print ".",
print("")
print "Searching for alternative sources",
for i in range(0,100):
a+=2
if(i%10==0):
print ".",
print("")
print "Post processing data",
for i in range(0,100):
a+=2
if(i%10==0):
print ".",
print("")
str1="&type=accurate&mode=xml&units=metric&cnt=2"
str2=city+str1
url = "http://api.openweathermap.org/data/2.5/forecast/daily?q=%s" % str2
source=urllib2.urlopen(url)
tomorrow=str(datetime.date.today()+datetime.timedelta(days=1))
htmltext=source.read()
print("<------------------------WEATHER REPORT: "+city.upper()+" for "+tomorrow+" ------------>")
# search for pattern using regular expressions (.+?)
#if(htmltext.find(adate)!=-1):
wind='<windDirection deg="(.+?)" code="(.+?)" name="(.+?)"/>'
temp='<temperature day="(.+?)" min="(.+?)" max="(.+?)" night="(.+?)" eve="(.+?)" morn="(.+?)"/>'
condition='<symbol number="(.+?)" name="(.+?)" var="(.+?)"/>'
pattern_wind=re.compile(wind)
pattern_temp=re.compile(temp)
pattern_cond=re.compile(condition)
try:
# match pattern with htmltext
weather_winddirection=re.findall(pattern_wind,htmltext)
weather_temp=re.findall(pattern_temp,htmltext)
weather_cond=re.findall(pattern_cond,htmltext)
conn = sqlite3.connect(':memory:')
conn = sqlite3.connect("D://Programming/weather_forecast.db")
c=conn.cursor()
print "Overall Weather status: ",weather_cond[1][1]
print "Temperature @Morning: ",weather_temp[1][5]
print "Temperature @Day: ",weather_temp[1][0]
print "Temperature @Evening: ",weather_temp[1][4]
print "Temperature @Night: ",weather_temp[1][3]
print ""
print "Max Temperature: ",weather_temp[1][2]
print "Min Temperature: ",weather_temp[1][1]
print ""
if(weather_winddirection[1][2]!=""):
print "Wind Direction: ",weather_winddirection[1][2]
# Create table
c.execute('''CREATE TABLE data
(city text, tomorrow text, weathercond text, morning text, day text, evening text, night text)''')
# Insert a row of data
c.execute("INSERT INTO data VALUES (?,?,?,?,?,?,?)", (city,tomorrow,weather_cond[1][1],weather_temp[1][5],weather_temp[1][0],weather_temp[1][4],weather_temp[1][3]))
# Save (commit) the changes
conn.commit()
conn.close()
except Exception:
print "Data unavailable!"
| apache-2.0 | -2,885,661,126,470,217,700 | 32.197368 | 168 | 0.632184 | false |
venicegeo/eventkit-cloud | eventkit_cloud/api/views.py | 1 | 86169 | """Provides classes for handling API requests."""
import logging
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from django.core.cache import cache
from dateutil import parser
from django.conf import settings
from django.contrib.auth.models import User, Group
from django.contrib.contenttypes.models import ContentType
from django.contrib.gis.geos import GEOSException, GEOSGeometry
from django.db import transaction
from django.db.models import Q
from django.shortcuts import redirect, render
from django.utils.translation import ugettext as _
from django_filters.rest_framework import DjangoFilterBackend
from notifications.models import Notification
from rest_framework import filters, permissions, status, views, viewsets, mixins
from rest_framework.decorators import action
from rest_framework.exceptions import APIException, NotFound, PermissionDenied
from rest_framework.parsers import JSONParser
from rest_framework.renderers import JSONRenderer
from rest_framework.response import Response
from rest_framework.serializers import ValidationError
from audit_logging.models import AuditEvent
from eventkit_cloud.api.filters import (
ExportRunFilter,
JobFilter,
UserFilter,
GroupFilter,
UserJobActivityFilter,
LogFilter,
)
from eventkit_cloud.api.pagination import LinkHeaderPagination
from eventkit_cloud.api.permissions import IsOwnerOrReadOnly
from eventkit_cloud.api.renderers import (
HOTExportApiRenderer,
PlainTextRenderer,
)
from eventkit_cloud.api.serializers import (
ExportFormatSerializer,
ExportRunSerializer,
ProjectionSerializer,
ExportTaskRecordSerializer,
JobSerializer,
RegionMaskSerializer,
DataProviderTaskRecordSerializer,
RegionSerializer,
ListJobSerializer,
ProviderTaskSerializer,
DataProviderSerializer,
LicenseSerializer,
UserDataSerializer,
GroupSerializer,
UserJobActivitySerializer,
NotificationSerializer,
GroupUserSerializer,
AuditEventSerializer,
DataProviderRequestSerializer,
SizeIncreaseRequestSerializer,
FilteredDataProviderSerializer,
FilteredDataProviderTaskRecordSerializer,
)
from eventkit_cloud.api.validators import validate_bbox_params, validate_search_bbox
from eventkit_cloud.core.helpers import (
sendnotification,
NotificationVerb,
NotificationLevel,
)
from eventkit_cloud.core.models import (
GroupPermission,
GroupPermissionLevel,
annotate_users_restricted,
attribute_class_filter,
annotate_groups_restricted,
)
from eventkit_cloud.jobs.models import (
ExportFormat,
Projection,
Job,
Region,
RegionMask,
DataProvider,
DataProviderTask,
DatamodelPreset,
License,
VisibilityState,
UserJobActivity,
JobPermission,
JobPermissionLevel,
)
from eventkit_cloud.tasks.export_tasks import (
pick_up_run_task,
cancel_export_provider_task,
)
from eventkit_cloud.tasks.models import (
ExportRun,
ExportTaskRecord,
DataProviderTaskRecord,
prefetch_export_runs,
)
from eventkit_cloud.tasks.task_factory import (
create_run,
get_invalid_licenses,
InvalidLicense,
Error,
)
from eventkit_cloud.user_requests.models import DataProviderRequest, SizeIncreaseRequest
from eventkit_cloud.utils.gdalutils import get_area
from eventkit_cloud.utils.provider_check import perform_provider_check
from eventkit_cloud.utils.stats.aoi_estimators import AoiEstimator
from eventkit_cloud.utils.stats.geomutils import get_estimate_cache_key
# Get an instance of a logger
logger = logging.getLogger(__name__)
# controls how api responses are rendered
renderer_classes = (JSONRenderer, HOTExportApiRenderer)
ESTIMATE_CACHE_TIMEOUT = 600
class JobViewSet(viewsets.ModelViewSet):
"""
Main endpoint for export creation and management. Provides endpoints
for creating, listing and deleting export jobs.
Updates to existing jobs are not supported as exports can be cloned.
Request data can be posted as either `application/x-www-form-urlencoded` or `application/json`.
**Request parameters**:
* name (required): The name of the export.
* description (required): A description of the export.
* event: The project or event associated with this export, eg Nepal Activation.
* xmin (required): The minimum longitude coordinate.
* ymin (required): The minimum latitude coordinate.
* xmax (required): The maximum longitude coordinate.
* ymax (required): The maximum latitude coordinate.
* formats (required): One of the supported export formats ([html](/api/formats) or [json](/api/formats.json)).
* Use the format `slug` as the value of the formats parameter, eg `formats=thematic&formats=shp`.
* min_zoom: The minimum zoom level for your export on a per provider basis.
* max_zoom: The maximum zoom level for your export on a per provider basis.
* preset: One of the published preset files ([html](/api/configurations) or [json](/api/configurations.json)).
* Use the `uid` as the value of the preset parameter, eg `preset=eed84023-6874-4321-9b48-2f7840e76257`.
* If no preset parameter is provided, then the default HDM tags will be used for the export.
* visibility : PUBLIC PRIVATE or SHARED
* Unpublished exports will be purged from the system 48 hours after they are created.
"""
serializer_class = JobSerializer
permission_classes = (permissions.IsAuthenticated, IsOwnerOrReadOnly)
parser_classes = (JSONParser,)
lookup_field = "uid"
pagination_class = LinkHeaderPagination
filter_backends = (DjangoFilterBackend, filters.SearchFilter)
filter_class = JobFilter
search_fields = (
"name",
"description",
"visibility",
"event",
"user__username",
"region__name",
)
def dispatch(self, request, *args, **kwargs):
return viewsets.ModelViewSet.dispatch(self, request, *args, **kwargs)
def get_queryset(self):
"""Return all objects user can view."""
jobs = JobPermission.userjobs(self.request.user, JobPermissionLevel.READ.value)
return Job.objects.filter(Q(visibility=VisibilityState.PUBLIC.value) | Q(pk__in=jobs))
def list(self, request, *args, **kwargs):
"""
List export jobs.
The list of returned exports can be filtered by the **filters.JobFilter**
and/or by a bounding box extent.
Args:
request: the HTTP request.
*args: Variable length argument list.
**kwargs: Arbitary keyword arguments.
Returns:
A serialized collection of export jobs.
Uses the **serializers.ListJobSerializer** to
return a simplified representation of export jobs.
Raises:
ValidationError: if the supplied extents are invalid.
"""
params = self.request.query_params.get("bbox", None)
if params is None:
queryset = self.filter_queryset(self.get_queryset())
page = self.paginate_queryset(queryset)
if page is not None:
serializer = ListJobSerializer(page, many=True, context={"request": request})
return self.get_paginated_response(serializer.data)
else:
serializer = ListJobSerializer(queryset, many=True, context={"request": request})
return Response(serializer.data)
if len(params.split(",")) < 4:
raise ValidationError(code="missing_bbox_parameter", detail="Missing bounding box parameter")
else:
extents = params.split(",")
data = {
"xmin": extents[0],
"ymin": extents[1],
"xmax": extents[2],
"ymax": extents[3],
}
try:
bbox_extents = validate_bbox_params(data)
bbox = validate_search_bbox(bbox_extents)
queryset = self.filter_queryset(Job.objects.filter(the_geom__within=bbox))
page = self.paginate_queryset(queryset)
if page is not None:
serializer = ListJobSerializer(page, many=True, context={"request": request})
return self.get_paginated_response(serializer.data)
else:
serializer = ListJobSerializer(queryset, many=True, context={"request": request})
return Response(serializer.data)
except ValidationError as e:
logger.debug(e.detail)
raise ValidationError(code="validation_error", detail=e.detail)
def create(self, request, *args, **kwargs):
"""
Create a Job from the supplied request data.
The request data is validated by *api.serializers.JobSerializer*.
Associates the *Job* with required *ExportFormats*, *ExportConfig*
* request: the HTTP request in JSON.
Example:
{
"name" : "Example Name",
"description" : "Example Description",
"event" : "Example Event (Project)",
"include_zipfile" : true,
"selection": { ... valid geojson ... },
"tags" : [],
"projections" : [4326],
"provider_tasks" : [{
"provider" : "osm",
"formats" : ["shp", "gpkg"]
}
]
}
To monitor the resulting export run retrieve the `uid` value from the returned json
and call /api/runs?job_uid=[the returned uid]
* Returns: the newly created Job instance.
Example:
{
"provider_tasks": [
{
"provider": "osm",
"formats": [
"gpkg"
]
}
],
"uid": "cf9c038c-a09a-4058-855a-b0b1d5a6c5c4",
"url": "http://cloud.eventkit.test/api/jobs/cf9c038c-a09a-4058-855a-b0b1d5a6c5c4",
"name": "test",
"description": "test",
"event": "test",
"created_at": "2017-03-10T15:09:29.802364Z",
"owner": "admin",
"exports": [
{
"formats": [
{
"uid": "167fbc03-83b3-41c9-8034-8566257cb2e8",
"url": "http://cloud.eventkit.test/api/formats/gpkg",
"slug": "gpkg",
"name": "Geopackage",
"description": "GeoPackage"
}
],
"provider": "OpenStreetMap Tiles"
}
],
"configurations": [],
"visibility" : "PRIVATE",
"feature_save": false,
"feature_pub": false,
"region": null,
"extent": {
"type": "Feature",
"properties": {
"uid": "cf9c038c-a09a-4058-855a-b0b1d5a6c5c4",
"name": "test"
},
"geometry": {
"type": "Polygon",
"coordinates": [
[
[
-43.248281,
-22.816694
],
[
-43.248281,
-22.812105
],
[
-43.242617,
-22.812105
],
[
-43.242617,
-22.816694
],
[
-43.248281,
-22.816694
]
]
]
}
},
"tags": [
{
"key": "highway",
"value": "path",
"data_model": "HDM",
"geom_types": [
"line"
]
}
],
"include_zipfile": false
}
* Raises: ValidationError: in case of validation errors.
** returns: Not 202
"""
from eventkit_cloud.tasks.task_factory import InvalidLicense, Unauthorized
serializer = self.get_serializer(data=request.data)
if serializer.is_valid(raise_exception=True):
"""Get the required data from the validated request."""
export_providers = request.data.get("export_providers", [])
provider_tasks = request.data.get("provider_tasks", [])
projections = request.data.get("projections", [])
tags = request.data.get("tags")
preset = request.data.get("preset")
with transaction.atomic():
if export_providers:
for ep in export_providers:
ep["user"] = request.user.id
provider_serializer = DataProviderSerializer(
data=export_providers, many=True, context={"request": request}
)
if provider_serializer.is_valid():
provider_serializer.save()
if len(provider_tasks) > 0:
"""Save the job and make sure it's committed before running tasks."""
try:
job = serializer.save()
provider_serializer = ProviderTaskSerializer(
data=provider_tasks, many=True, context={"request": request}
)
try:
provider_serializer.is_valid(raise_exception=True)
job.provider_tasks.add(*provider_serializer.save())
job.save()
except ValidationError:
raise ValidationError(
code="invalid_provider_task", detail="A provider and an export format must be selected."
)
# Check max area (skip for superusers)
if not self.request.user.is_superuser:
error_data = {"errors": []}
for provider_task in job.provider_tasks.all():
provider = provider_task.provider
bbox = job.extents
srs = "4326"
cache_key = get_estimate_cache_key(
bbox, srs, provider_task.min_zoom, provider_task.max_zoom, provider.slug
)
# find cache key that contains the estimator hash with correct time, size values
size, time = cache.get(cache_key, (None, None))
max_selection = provider.get_max_selection_size(self.request.user)
max_data_size = provider.get_max_data_size(self.request.user)
# Don't rely solely on max_data_size as estimates can sometimes be inaccurate
# Allow user to get a job that passes max_data_size or max_selection condition:
if size and max_data_size is not None:
# max_data_size is an optional configuration
if size <= max_data_size:
continue
else:
status_code = status.HTTP_400_BAD_REQUEST
error_data["errors"] += [
{
"status": status_code,
"title": _("Estimated size too large"),
"detail": _(
f"The estimated size "
f"exceeds the maximum data size for the {provider.name}"
),
}
]
if max_selection and 0 < float(max_selection) < get_area(job.the_geom.geojson):
status_code = status.HTTP_400_BAD_REQUEST
error_data["errors"] += [
{
"status": status_code,
"title": _("Selection area too large"),
"detail": _(f"The selected area is too large for the {provider.name}"),
}
]
if error_data["errors"]:
return Response(error_data, status=status_code)
if preset:
"""Get the tags from the uploaded preset."""
logger.debug("Found preset with uid: {0}".format(preset))
job.json_tags = preset
job.save()
elif tags:
"""Get tags from request."""
simplified_tags = []
for entry in tags:
tag = {
"key": entry["key"],
"value": entry["value"],
"geom": entry["geom_types"],
}
simplified_tags.append(tag)
job.json_tags = simplified_tags
job.save()
else:
"""
Use hdm preset as default tags if no preset or tags
are provided in the request.
"""
hdm_default_tags = DatamodelPreset.objects.get(name="hdm").json_tags
job.json_tags = hdm_default_tags
job.save()
except Exception as e:
logger.error(e)
raise
else:
# TODO: Specify which provider task is invalid.
raise ValidationError(
code="invalid_provider_task",
detail=f"One or more provider tasks are invalid: {provider_tasks}.",
)
try:
projection_db_objects = Projection.objects.filter(srid__in=projections)
job.projections.add(*projection_db_objects)
job.save()
except Exception as e:
# TODO: Specify which projection is invalid.
raise ValidationError(
code="invalid_projection", detail=f"One or more projections are invalid: {projections}."
)
# run the tasks
job_uid = str(job.uid)
# run needs to be created so that the UI can be updated with the task list.
user_details = get_user_details(request)
try:
# run needs to be created so that the UI can be updated with the task list.
run_uid = create_run(job_uid=job_uid, user=request.user)
except InvalidLicense as il:
raise ValidationError(code="invalid_license", detail=str(il))
except Unauthorized as ua:
raise PermissionDenied(code="permission_denied", detail=str(ua))
running = JobSerializer(job, context={"request": request})
# Run is passed to celery to start the tasks.
pick_up_run_task.apply_async(
queue="runs", routing_key="runs", kwargs={"run_uid": run_uid, "user_details": user_details},
)
return Response(running.data, status=status.HTTP_202_ACCEPTED)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@action(methods=["get", "post"], detail=True)
def run(self, request, uid=None, *args, **kwargs):
"""
Creates the run (i.e. runs the job).
Gets the job_uid and current user from the request.
Creates an instance of the TaskFactory and
calls run_task on it, passing the job_uid and user.
*request:* the http request
*Returns:*
- the serialized run data.
"""
# This is just to make it easier to trace when user_details haven't been sent
user_details = get_user_details(request)
if user_details is None:
user_details = {"username": "unknown-JobViewSet.run"}
from eventkit_cloud.tasks.task_factory import InvalidLicense, Unauthorized
try:
# run needs to be created so that the UI can be updated with the task list.
run_uid = create_run(job_uid=uid, user=request.user)
except (InvalidLicense, Error) as err:
return Response([{"detail": _(str(err))}], status.HTTP_400_BAD_REQUEST)
# Run is passed to celery to start the tasks.
except Unauthorized:
raise PermissionDenied(
code="permission_denied", detail="ADMIN permission is required to run this DataPack."
)
run = ExportRun.objects.get(uid=run_uid)
if run:
logger.debug("Placing pick_up_run_task for {0} on the queue.".format(run.uid))
pick_up_run_task.apply_async(
queue="runs", routing_key="runs", kwargs={"run_uid": run_uid, "user_details": user_details},
)
logger.debug("Getting Run Data.".format(run.uid))
running = ExportRunSerializer(run, context={"request": request})
logger.debug("Returning Run Data.".format(run.uid))
return Response(running.data, status=status.HTTP_202_ACCEPTED)
else:
return Response([{"detail": _("Failed to run Export")}], status.HTTP_400_BAD_REQUEST)
@transaction.atomic
def partial_update(self, request, uid=None, *args, **kwargs):
"""
Update one or more attributes for the given job
* request: the HTTP request in JSON.
Examples:
{ "visibility" : 'SHARED', "featured" : true }
{ "featured" : false }
* Returns: a copy of the new values on success
Example:
{
"visibility": 'SHARED',
"featured" : true,
"success": true
}
** returns: 400 on error
"""
job = Job.objects.get(uid=uid)
# Does the user have admin permission to make changes to this job?
jobs = JobPermission.userjobs(request.user, JobPermissionLevel.ADMIN.value)
if not jobs.filter(id=job.id):
return Response(
[{"detail": "ADMIN permission is required to update this Datapack."}], status.HTTP_400_BAD_REQUEST,
)
response = {}
payload = request.data
for attribute, value in payload.items():
if attribute == "visibility" and value not in VisibilityState.__members__:
msg = "unknown visibility value - %s" % value
return Response([{"detail": msg}], status.HTTP_400_BAD_REQUEST)
if attribute == "permissions":
pass
elif hasattr(job, attribute):
setattr(job, attribute, value)
response[attribute] = value
else:
msg = "unidentified job attribute - %s" % attribute
return Response([{"detail": msg}], status.HTTP_400_BAD_REQUEST)
# update permissions if present. Insure we are not left with 0 admministrators
# users and / or groups may be updated. If no update info is provided, maintain
# the current set of permissions.
admins = 0
if "permissions" in payload:
serializer = JobSerializer(job, context={"request": request})
current_permissions = serializer.get_permissions(job)
if "members" not in payload["permissions"]:
payload["permissions"]["members"] = current_permissions["members"]
if "groups" not in payload["permissions"]:
payload["permissions"]["groups"] = current_permissions["groups"]
users = payload["permissions"]["members"]
groups = payload["permissions"]["groups"]
# make sure all user names, group names, and permissions are valid, and insure there is at least one admin
# if the job is made private
for index, set in enumerate([users, groups]):
for key in set:
if index == 0:
record = User.objects.filter(username=key)
else:
record = Group.objects.filter(name=key)
if not record.exists():
return Response(
[{"detail": "unidentified user or group : %s" % key}], status.HTTP_400_BAD_REQUEST,
)
perm = set[key]
if perm not in JobPermissionLevel.__members__:
return Response(
[{"detail": "invalid permission value : %s" % perm}], status.HTTP_400_BAD_REQUEST,
)
if perm == GroupPermissionLevel.ADMIN.value:
admins += 1
if admins == 0:
return Response(
[{"detail": "Cannot update job permissions with no administrator."}], status.HTTP_400_BAD_REQUEST,
)
# The request represents all expected permissions for the file not a partial update of the permissions.
# Therefore we delete the existing permissions, because the new permissions should be the only permissions.
with transaction.atomic():
job.permissions.all().delete()
user_objects = User.objects.filter(username__in=users)
group_objects = Group.objects.filter(name__in=groups)
user_job_permissions = [
JobPermission(job=job, content_object=user, permission=users.get(user.username))
for user in user_objects
] # NOQA
group_job_permissions = [
JobPermission(job=job, content_object=group, permission=groups.get(group.name))
for group in group_objects
] # NOQA
JobPermission.objects.bulk_create(user_job_permissions + group_job_permissions)
response["permissions"] = payload["permissions"]
job.save()
response["success"] = True
return Response(response, status=status.HTTP_200_OK)
def retrieve(self, request, uid=None, *args, **kwargs):
"""
Look up a single job by uid value.
* uid: optional job uid lookup field
* return: The selected job.
"""
return super(JobViewSet, self).retrieve(self, request, uid, *args, **kwargs)
def update(self, request, uid=None, *args, **kwargs):
"""
Update a job object, looked up by uid.
* uid: optional job uid lookup field
* return: The status of the update.
"""
return super(JobViewSet, self).update(self, request, uid, *args, **kwargs)
@action(methods=["post"], detail=False)
def filter(self, request, *args, **kwargs):
"""
Return all jobs that are readable by every
groups and every user in the payload
{ "permissions" : {
groups : [ 'group_one', 'group_two', ...]
members : ['user_one', 'user_two' ... ]
}
}
"""
if "permissions" not in request.data:
raise PermissionDenied(code="permission_denied", detail="Missing permissions attribute.")
jobs = get_jobs_via_permissions(request.data["permissions"])
serializer = ListJobSerializer(jobs, many=True, context={"request": request})
return Response(serializer.data)
@transaction.atomic
def destroy(self, request, uid=None, *args, **kwargs):
"""
Destroy a job
"""
job = Job.objects.get(uid=uid)
# Does the user have admin permission to make changes to this job?
logger.info("DELETE REQUEST")
jobs = JobPermission.userjobs(request.user, JobPermissionLevel.ADMIN.value)
if not jobs.filter(id=job.id):
raise PermissionDenied(code="permission_denied", detail="ADMIN permission is required to delete this job.")
super(JobViewSet, self).destroy(request, *args, **kwargs)
return Response(status=status.HTTP_204_NO_CONTENT)
class ExportFormatViewSet(viewsets.ReadOnlyModelViewSet):
"""
###ExportFormat API endpoint.
Endpoint exposing the supported export formats.
"""
serializer_class = ExportFormatSerializer
permission_classes = (permissions.IsAuthenticated,)
queryset = ExportFormat.objects.all()
lookup_field = "slug"
ordering = ["description"]
def list(self, request, slug=None, *args, **kwargs):
"""
* slug: optional slug value of export format
* return: A list of format types.
"""
return super(ExportFormatViewSet, self).list(self, request, slug, *args, **kwargs)
def retrieve(self, request, slug=None, *args, **kwargs):
"""
* slug: optional slug value of export format
* return: A single format object matching the provided slug value.
"""
return super(ExportFormatViewSet, self).retrieve(self, request, slug, *args, **kwargs)
class ProjectionViewSet(viewsets.ReadOnlyModelViewSet):
"""
A simple ViewSet for listing or retrieving projections.
"""
serializer_class = ProjectionSerializer
permission_classes = (permissions.IsAuthenticated,)
queryset = Projection.objects.all()
lookup_field = "srid"
ordering = ["srid"]
class AuditEventViewSet(viewsets.ReadOnlyModelViewSet):
"""
A simple ViewSet for listing or retrieving AuditEvents.
"""
serializer_class = AuditEventSerializer
permission_classes = (permissions.IsAdminUser,)
queryset = AuditEvent.objects.all()
filter_class = LogFilter
lookup_field = "id"
ordering = ["datetime"]
search_fields = ("username", "datetime", "ip", "email", "event")
class LicenseViewSet(viewsets.ReadOnlyModelViewSet):
"""
Endpoint to get detailed information about the data licenses.
"""
serializer_class = LicenseSerializer
permission_classes = (permissions.IsAuthenticated,)
queryset = License.objects.all()
lookup_field = "slug"
ordering = ["name"]
@action(methods=["get"], detail=True, renderer_classes=[PlainTextRenderer])
def download(self, request, slug=None, *args, **kwargs):
"""
Responds to a GET request with a text file of the license text
*request:* the http request
*slug:* the license slug
*Returns:*
- a .txt file of the license text.
"""
try:
license_text = License.objects.get(slug=slug).text
response = Response(license_text, content_type="text/plain")
response["Content-Disposition"] = 'attachment; filename="{}.txt"'.format(slug)
return response
except Exception:
raise NotFound(code="not_found", detail="Could not find requested license.")
def list(self, request, slug=None, *args, **kwargs):
"""
* slug: optional slug value of license
* return: A list of license objects.
"""
return super(LicenseViewSet, self).list(self, request, slug, *args, **kwargs)
def retrieve(self, request, slug=None, *args, **kwargs):
"""
* slug: optional slug value of license
* return: A single license object matching the provided slug value.
"""
return super(LicenseViewSet, self).retrieve(self, request, slug, *args, **kwargs)
class DataProviderViewSet(viewsets.ReadOnlyModelViewSet):
"""
Endpoint exposing the supported data providers.
"""
serializer_class = DataProviderSerializer
permission_classes = (permissions.IsAuthenticated,)
parser_classes = (JSONParser,)
lookup_field = "slug"
ordering = ["name"]
def get_queryset(self):
"""
This view should return a list of all the purchases
for the currently authenticated user.
"""
return DataProvider.objects.filter(Q(user=self.request.user) | Q(user=None))
@action(methods=["get", "post"], detail=True)
def status(self, request, slug=None, *args, **kwargs):
"""
Checks the status of a data provider to confirm that it is available.
* slug: The DataProvider object slug.
* return: The HTTP response of the data provider health check, in cases where there is no error. If the data
provider does not exist, returns status 400 bad request.
"""
try:
geojson = self.request.data.get("geojson", None)
providers, filtered_provider = attribute_class_filter(self.get_queryset(), self.request.user)
provider = providers.get(slug=slug)
return Response(perform_provider_check(provider, geojson), status=status.HTTP_200_OK)
except DataProvider.DoesNotExist:
raise NotFound(code="not_found", detail="Could not find the requested provider.")
except Exception as e:
logger.error(e)
raise APIException("server_error", detail="Internal server error.")
def list(self, request, slug=None, *args, **kwargs):
"""
List all data providers.
* slug: optional lookup field
* return: A list of data providers.
"""
providers, filtered_providers = attribute_class_filter(self.get_queryset(), self.request.user)
data = DataProviderSerializer(providers, many=True, context={"request": request}).data
data += FilteredDataProviderSerializer(filtered_providers, many=True).data
return Response(data)
def retrieve(self, request, slug=None, *args, **kwargs):
"""
Look up a single data provider by slug value.
* slug: optional lookup field
* return: The data provider with the given slug.
"""
providers, filtered_providers = attribute_class_filter(self.get_queryset().filter(slug=slug), self.request.user)
if providers:
return Response(DataProviderSerializer(providers.get(slug=slug), context={"request": request}).data)
elif filtered_providers:
return Response(FilteredDataProviderSerializer(providers.get(slug=slug)).data)
class RegionViewSet(viewsets.ReadOnlyModelViewSet):
"""
Endpoint exposing the supported regions.
"""
serializer_class = RegionSerializer
permission_classes = (permissions.IsAuthenticated,)
queryset = Region.objects.all()
lookup_field = "uid"
def list(self, request, uid=None, *args, **kwargs):
"""
List all regions.
* uid: optional lookup field
* return: A list of regions.
"""
return super(RegionViewSet, self).list(self, request, uid, *args, **kwargs)
def retrieve(self, request, uid=None, *args, **kwargs):
"""
Look up a single region by slug value.
* uid: optional lookup field
* return: The region with the given slug.
"""
return super(RegionViewSet, self).retrieve(self, request, uid, *args, **kwargs)
class RegionMaskViewSet(viewsets.ReadOnlyModelViewSet):
"""
Return a MULTIPOLYGON representing the mask of the
HOT Regions as a GeoJSON Feature Collection.
"""
serializer_class = RegionMaskSerializer
permission_classes = (permissions.IsAuthenticated,)
queryset = RegionMask.objects.all()
class ExportRunViewSet(viewsets.ModelViewSet):
"""
**retrieve:**
Returns the exact run as specified by the run UID in the url `/runs/{uid}`
**list:**
Returns a list of all the runs.
Export runs can be filtered and ordered by adding optional parameters to the url:
* `user`: The user who created the job.
* `status`: The current run status (can include any number of the following: COMPLETED, SUBMITTED, INCOMPLETE, or
FAILED).
* Example = `/api/runs?status=SUBMITTED,INCOMPLETE,FAILED`
* `job_uid`: The uid of a particular job.
* `min_date`: Minimum date (YYYY-MM-DD) for the `started_at` field.
* `max_date`: Maximum date (YYYY-MM-DD) for the `started_at` field.
* `started_at`: The DateTime a run was started at in ISO date-time format.
* `published`: True or False for whether the owning job is published or not.
* `ordering`: Possible values are `started_at, status, user__username, job__name, job__event, and job__published`.
* Order can be reversed by adding `-` to the front of the order parameter.
An example request using some of the parameters.
`/api/runs?user=test_user&status=FAILED,COMPLETED&min_date=2017-05-20&max_date=2017-12-21&published=True&ordering=
-job__name`
**filter:**
Accessed at `/runs/filter`.
Accepts GET and POST. Support all the url params of 'list' with the addition of advanced features like
`search_term`, `bbox`, and `geojson`.
* `search_term`: A value to search the job name, description and event text for.
* `bbox`: Bounding box in the form of `xmin,ymin,xmax,ymax`.
To filter by geojson send the geojson geometry in the body of a POST request under the key `geojson`.
"""
serializer_class = ExportRunSerializer
permission_classes = (permissions.IsAuthenticated,)
pagination_class = LinkHeaderPagination
filter_backends = (DjangoFilterBackend, filters.OrderingFilter)
filter_class = ExportRunFilter
lookup_field = "uid"
search_fields = (
"user__username",
"status",
"job__uid",
"min_date",
"max_date",
"started_at",
"job__published",
)
ordering_fields = (
"job__name",
"started_at",
"user__username",
"job__published",
"status",
"job__event",
"job__featured",
)
ordering = ("-started_at",)
def get_queryset(self):
jobs = JobPermission.userjobs(self.request.user, "READ")
if self.request.query_params.get("slim"):
return ExportRun.objects.filter(
Q(job__in=jobs) | Q(job__visibility=VisibilityState.PUBLIC.value)
).select_related("job")
else:
return prefetch_export_runs(
(ExportRun.objects.filter(Q(job__in=jobs) | Q(job__visibility=VisibilityState.PUBLIC.value)).filter())
)
def retrieve(self, request, uid=None, *args, **kwargs):
"""
Get an ExportRun.
Gets the run_uid from the request and returns run data for the
associated ExportRun.
Args:
*request: the http request.
*uid: the run uid.
*Returns:
the serialized run data.
"""
from eventkit_cloud.tasks.task_factory import InvalidLicense
queryset = self.get_queryset().filter(uid=uid)
if not request.query_params.get("job_uid"):
queryset = queryset.filter(deleted=False)
try:
self.validate_licenses(queryset, user=request.user)
except InvalidLicense as il:
raise ValidationError(code="invalid_license", detail=str(il))
serializer = self.get_serializer(queryset, many=True, context={"request": request})
return Response(serializer.data, status=status.HTTP_200_OK)
@transaction.atomic
def destroy(self, request, *args, **kwargs):
"""
Destroy a model instance.
"""
instance = self.get_object()
job = instance.job
jobs = JobPermission.userjobs(request.user, JobPermissionLevel.ADMIN.value)
if not jobs.filter(id=job.id):
return Response(
[{"detail": "ADMIN permission is required to delete this DataPack."}], status.HTTP_400_BAD_REQUEST,
)
permissions = JobPermission.jobpermissions(job)
instance.soft_delete(user=request.user, permissions=permissions)
return Response(status=status.HTTP_204_NO_CONTENT)
def list(self, request, *args, **kwargs):
"""
List the ExportRuns
:param request: the http request
:param args:
:param kwargs:
:return: the serialized runs
"""
queryset = self.filter_queryset(self.get_queryset())
try:
self.validate_licenses(queryset, user=request.user)
except InvalidLicense as il:
raise ValidationError(code="invalid_license", detail=str(il))
# This is to display deleted runs on the status and download
if not request.query_params.get("job_uid"):
queryset = queryset.filter(deleted=False)
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True, context={"request": request})
return self.get_paginated_response(serializer.data)
else:
serializer = self.get_serializer(queryset, many=True, context={"request": request})
return Response(serializer.data, status=status.HTTP_200_OK)
@action(methods=["post", "get"], detail=False)
def filter(self, request, *args, **kwargs):
"""
Lists the ExportRuns and provides advanced filtering options like search_term, bbox, and geojson geometry.
Accepts GET and POST request. POST is required if you want to filter by a geojson geometry contained in the
request data
:param request: the http request
:param args:
:param kwargs:
:return: the serialized runs
"""
status_code = status.HTTP_200_OK
queryset = self.filter_queryset(self.get_queryset())
if "permissions" in request.data:
jobs = get_jobs_via_permissions(request.data["permissions"])
queryset = ExportRun.objects.filter(Q(job__in=jobs))
search_geojson = self.request.data.get("geojson", None)
if search_geojson is not None:
try:
geom = geojson_to_geos(search_geojson, 4326)
queryset = queryset.filter(job__the_geom__intersects=geom)
except ValidationError as e:
logger.debug(e.detail)
raise ValidationError(code="validation_error", detail=e.detail)
search_bbox = self.request.query_params.get("bbox", None)
if search_bbox is not None and len(search_bbox.split(",")) == 4:
extents = search_bbox.split(",")
data = {
"xmin": extents[0],
"ymin": extents[1],
"xmax": extents[2],
"ymax": extents[3],
}
try:
bbox_extents = validate_bbox_params(data)
bbox = validate_search_bbox(bbox_extents)
queryset = queryset.filter(job__the_geom__within=bbox)
except ValidationError as e:
logger.debug(e.detail)
raise ValidationError(code="validation_error", detail=e.detail)
search_term = self.request.query_params.get("search_term", None)
if search_term is not None:
queryset = queryset.filter(
(
Q(job__name__icontains=search_term)
| Q(job__description__icontains=search_term)
| Q(job__event__icontains=search_term)
)
)
if not request.query_params.get("job_uid"):
queryset = queryset.filter(deleted=False)
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True, context={"request": request, "no_license": True})
response = self.get_paginated_response(serializer.data)
response.status_code = status_code
return response
else:
serializer = self.get_serializer(queryset, many=True, context={"request": request, "no_license": True})
return Response(serializer.data, status=status_code)
@transaction.atomic
def partial_update(self, request, uid=None, *args, **kwargs):
"""
Update the expiration date for an export run. If the user is a superuser,
then any date may be specified. Otherwise the date must be before todays_date + MAX_DATAPACK_EXPIRATION_DAYS
where MAX_DATAPACK_EXPIRATION_DAYS is a setting found in prod.py
* request: the HTTP request in JSON.
Example:
{
"expiration" : "2019-12-31"
}
* Returns: a copy of the new expiration value on success
Example:
{
"expiration": "2019-12-31",
"success": true
}
** returns: 400 on error
"""
payload = request.data
if "expiration" not in payload:
return Response({"success": False}, status=status.HTTP_400_BAD_REQUEST)
expiration = payload["expiration"]
target_date = parser.parse(expiration).replace(tzinfo=None)
run = ExportRun.objects.get(uid=uid)
if not request.user.is_superuser:
max_days = int(getattr(settings, "MAX_DATAPACK_EXPIRATION_DAYS", 30))
now = datetime.today()
max_date = now + timedelta(max_days)
if target_date > max_date.replace(tzinfo=None):
message = "expiration date must be before " + max_date.isoformat()
return Response({"success": False, "detail": message}, status=status.HTTP_400_BAD_REQUEST,)
if target_date < run.expiration.replace(tzinfo=None):
message = "expiration date must be after " + run.expiration.isoformat()
return Response({"success": False, "detail": message}, status=status.HTTP_400_BAD_REQUEST,)
run.expiration = target_date
run.save()
return Response({"success": True, "expiration": run.expiration}, status=status.HTTP_200_OK)
@staticmethod
def validate_licenses(queryset, user=None):
for run in queryset.all():
invalid_licenses = get_invalid_licenses(run.job, user=user)
if invalid_licenses:
raise InvalidLicense(
"The user: {0} has not agreed to the following licenses: {1}.\n"
"Please use the user account page, or the user api to agree to the "
"licenses prior to viewing run data.".format(run.job.user.username, invalid_licenses)
)
return True
def create(self, request, *args, **kwargs):
"""
Create a run.
* return: The status of the creation.
"""
return super(ExportRunViewSet, self).create(self, request, *args, **kwargs)
def update(self, request, uid=None, *args, **kwargs):
"""
Update a run.
* uid: optional lookup field
* return: The status of the update.
"""
return super(ExportRunViewSet, self).update(self, request, uid, *args, **kwargs)
class ExportTaskViewSet(viewsets.ReadOnlyModelViewSet):
"""
Provides List and Retrieve endpoints for ExportTasks.
"""
serializer_class = ExportTaskRecordSerializer
permission_classes = (permissions.IsAuthenticated,)
lookup_field = "uid"
def get_queryset(self):
return ExportTaskRecord.objects.filter(
Q(export_provider_task__run__user=self.request.user) | Q(export_provider_task__run__job__published=True)
).order_by("-started_at")
def retrieve(self, request, uid=None, *args, **kwargs):
"""
GET a single export task.
Args:
request: the http request.
uid: the uid of the export task to GET.
Returns:
the serialized ExportTaskRecord data.
"""
queryset = ExportTaskRecord.objects.filter(uid=uid)
serializer = self.get_serializer(queryset, many=True, context={"request": request})
return Response(serializer.data, status=status.HTTP_200_OK)
def list(self, request, uid=None, *args, **kwargs):
"""
List all tasks.
* uid: optional lookup field
* return: A list of all tasks.
"""
return super(ExportTaskViewSet, self).list(self, request, uid, *args, **kwargs)
class DataProviderTaskRecordViewSet(viewsets.ModelViewSet):
"""
Provides List and Retrieve endpoints for ExportTasks.
"""
serializer_class = DataProviderTaskRecordSerializer
permission_classes = (permissions.IsAuthenticated,)
lookup_field = "uid"
def get_queryset(self):
"""Return all objects user can view."""
jobs = JobPermission.userjobs(self.request.user, "READ")
return DataProviderTaskRecord.objects.filter(
Q(run__job__visibility=VisibilityState.PUBLIC.value) | Q(run__job__in=jobs)
)
def retrieve(self, request, uid=None, *args, **kwargs):
"""
GET a single export task.
Args:
request: the http request.
uid: the uid of the export provider task to GET.
Returns:
the serialized ExportTaskRecord data
"""
providers_tasks, filtered_provider_task = attribute_class_filter(
self.get_queryset().filter(uid=uid), self.request.user
)
if providers_tasks:
serializer = DataProviderTaskRecordSerializer(providers_tasks.first(), context={"request": request})
else:
serializer = FilteredDataProviderTaskRecordSerializer(filtered_provider_task.first())
return Response(serializer.data, status=status.HTTP_200_OK)
def partial_update(self, request, uid=None, *args, **kwargs):
"""
Cancels an export provider task.
* param uid: The uid of the DataProviderTaskRecord (export provider task model) to be canceled.
* return: Returns {'success': True} on success. If the user did not have the correct rights (if not superuser,
they must be asking for one of their own export provider tasks), then 403 forbidden will be returned.
"""
providers_tasks, filtered_provider_task = attribute_class_filter(
self.get_queryset().filter(uid=uid), self.request.user
)
if not providers_tasks:
return Response({"success": False}, status=status.HTTP_401_UNAUTHORIZED)
data_provider_task_record = providers_tasks.get(uid=uid)
if data_provider_task_record.run.user != request.user and not request.user.is_superuser:
return Response({"success": False}, status=status.HTTP_403_FORBIDDEN)
cancel_export_provider_task.run(
data_provider_task_uid=data_provider_task_record.uid, canceling_username=request.user.username,
)
return Response({"success": True}, status=status.HTTP_200_OK)
def list(self, request, *args, **kwargs):
"""
* return: A list of data provider task objects.
"""
providers_tasks, filtered_provider_task = attribute_class_filter(self.get_queryset(), self.request.user)
data = DataProviderTaskRecordSerializer(providers_tasks, many=True, context={"request": request}).data
data += FilteredDataProviderTaskRecordSerializer(filtered_provider_task, many=True).data
return Response(data)
def create(self, request, uid=None, *args, **kwargs):
"""
Create a data provider task object.
* uid: optional lookup field
* return: The status of the object creation.
"""
providers_tasks, filtered_provider_task = attribute_class_filter(
self.get_queryset().filter(uid=uid), self.request.user
)
if not providers_tasks:
return Response(status=status.HTTP_401_UNAUTHORIZED)
return super(DataProviderTaskRecordViewSet, self).create(self, request, uid, *args, **kwargs)
def destroy(self, request, uid=None, *args, **kwargs):
"""
Delete a data provider task object.
* uid: optional lookup field
* return: The status of the deletion.
"""
providers_tasks, filtered_provider_task = attribute_class_filter(
self.get_queryset().filter(uid=uid), self.request.user
)
if not providers_tasks:
return Response(status=status.HTTP_401_UNAUTHORIZED)
return super(DataProviderTaskRecordViewSet, self).destroy(self, request, uid, *args, **kwargs)
def update(self, request, uid=None, *args, **kwargs):
"""
Update a data provider task object.
* uid: optional lookup field
* return: The status of the update.
"""
providers_tasks, filtered_provider_task = attribute_class_filter(
self.get_queryset().filter(uid=uid), self.request.user
)
if not providers_tasks:
return Response(status=status.HTTP_401_UNAUTHORIZED)
return super(DataProviderTaskRecordViewSet, self).update(self, request, uid, *args, **kwargs)
class UserDataViewSet(viewsets.GenericViewSet):
"""
User Data
"""
serializer_class = UserDataSerializer
permission_classes = (permissions.IsAuthenticated, IsOwnerOrReadOnly)
parser_classes = (JSONParser,)
pagination_class = LinkHeaderPagination
filter_class = UserFilter
filter_backends = (DjangoFilterBackend, filters.SearchFilter)
lookup_field = "username"
lookup_value_regex = "[^/]+"
search_fields = ("username", "last_name", "first_name", "email")
def get_queryset(self):
return User.objects.all()
def partial_update(self, request, username=None, *args, **kwargs):
"""
Update user data.
User data cannot currently be updated via this API menu however UserLicense data can, by sending a patch
message,
with the licenses data that the user agrees to. Users will need to agree to all of the licenses prior to being
allowed to download data.
Request data can be posted as `application/json`.
* request: the HTTP request in JSON.
Example:
{"accepted_licenses": {
"odbl": true
}
}
"""
queryset = self.get_queryset().get(username=username)
serializer = UserDataSerializer(queryset, data=request.data, context={"request": request})
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def list(self, request, *args, **kwargs):
"""
Get a list of users.
* return: A list of all users.
"""
job = None
if request.query_params.get("job_uid"):
job = Job.objects.get(uid=request.query_params["job_uid"])
queryset = JobPermission.get_orderable_queryset_for_job(job, User)
total = queryset.count()
filtered_queryset = self.filter_queryset(queryset)
filtered_queryset = annotate_users_restricted(filtered_queryset, job)
if request.query_params.get("exclude_self"):
filtered_queryset = filtered_queryset.exclude(username=request.user.username)
elif request.query_params.get("prepend_self"):
if request.user in filtered_queryset:
filtered_queryset = filtered_queryset.exclude(username=request.user.username)
filtered_queryset = [qs for qs in filtered_queryset]
filtered_queryset = [request.user] + filtered_queryset
page = None
if not request.query_params.get("disable_page"):
page = self.paginate_queryset(filtered_queryset)
if page is not None:
serializer = self.get_serializer(page, many=True, context={"request": request})
response = self.get_paginated_response(serializer.data)
else:
serializer = self.get_serializer(filtered_queryset, many=True, context={"request": request})
response = Response(serializer.data, status=status.HTTP_200_OK)
response["Total-Users"] = total
return response
def retrieve(self, request, username=None):
"""
GET a user by username
"""
queryset = self.get_queryset().get(username=username)
serializer = self.get_serializer(queryset, context={"request": request})
return Response(serializer.data, status=status.HTTP_200_OK)
@action(detail=False, methods=["post", "get"])
def members(self, request, *args, **kwargs):
"""
Member list from list of group ids
Example : [ 32, 35, 36 ]
"""
targets = request.data
targetnames = []
payload = []
groups = Group.objects.filter(id__in=targets)
for group in groups:
serializer = GroupSerializer(group)
for username in serializer.get_members(group):
if username not in targetnames:
targetnames.append(username)
users = User.objects.filter(username__in=targetnames).all()
for u in users:
serializer = self.get_serializer(u, context={"request": request})
payload.append(serializer.data)
return Response(payload, status=status.HTTP_200_OK)
@action(detail=True, methods=["get"])
def job_permissions(self, request, username=None):
"""
Get user's permission level for a specific job
Example: /api/users/job_permissions/admin_user?uid=job-uid-123
Response: { 'permission': USERS_PERMISSION_LEVEL }
where USERS_PERMISSION_LEVEL is either READ, ADMIN, or None
"""
user = User.objects.get(username=username)
uid = request.query_params.get("uid", None)
if not user or not uid:
return Response(status=status.HTTP_400_BAD_REQUEST)
permission = JobPermission.get_user_permissions(user, uid)
return Response({"permission": permission}, status=status.HTTP_200_OK)
class UserJobActivityViewSet(mixins.CreateModelMixin, mixins.ListModelMixin, viewsets.GenericViewSet):
"""
Endpoint to create and retrieve user activity related to jobs.
"""
serializer_class = UserJobActivitySerializer
permission_classes = (permissions.IsAuthenticated,)
pagination_class = LinkHeaderPagination
filter_class = UserJobActivityFilter
def get_queryset(self):
activity_type = self.request.query_params.get("activity", "").lower()
if self.request.query_params.get("slim"):
activities = UserJobActivity.objects.select_related("job", "user")
else:
activities = UserJobActivity.objects.select_related("job", "user").prefetch_related(
"job__provider_tasks__provider",
"job__provider_tasks__formats",
"job__last_export_run__provider_tasks__tasks__result",
"job__last_export_run__provider_tasks__tasks__exceptions",
)
if activity_type == "viewed":
ids = (
UserJobActivity.objects.filter(
user=self.request.user,
type=UserJobActivity.VIEWED,
job__last_export_run__isnull=False,
job__last_export_run__deleted=False,
)
.distinct("job")
.values_list("id", flat=True)
)
return activities.filter(id__in=ids).order_by("-created_at")
else:
return activities.filter(user=self.request.user).order_by("-created_at")
def list(self, request, *args, **kwargs):
"""
Gets the most recent UserJobActivity objects.
"""
queryset = self.get_queryset()
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True, context={"request": request})
return self.get_paginated_response(serializer.data)
else:
serializer = self.get_serializer(queryset, many=True, context={"request": request})
return Response(serializer.data, status=status.HTTP_200_OK)
def create(self, request):
"""
Creates a new UserJobActivity object.
"""
activity_type = request.query_params.get("activity", "").lower()
job_uid = request.data.get("job_uid")
# Save a record of the view activity
if activity_type == "viewed":
queryset = UserJobActivity.objects.filter(
user=self.request.user,
type=UserJobActivity.VIEWED,
job__last_export_run__isnull=False,
job__last_export_run__deleted=False,
).order_by("-created_at")
if queryset.count() > 0:
last_job_viewed = queryset.first()
# Don't save consecutive views of the same job.
if str(last_job_viewed.job.uid) == job_uid:
return Response({"ignored": True}, content_type="application/json", status=status.HTTP_200_OK,)
job = Job.objects.get(uid=job_uid)
UserJobActivity.objects.create(user=self.request.user, job=job, type=UserJobActivity.VIEWED)
else:
raise ValidationError(code="invalid_activity_type", detail=f"Activity type {activity_type} is invalid.")
return Response({}, content_type="application/json", status=status.HTTP_200_OK)
class GroupViewSet(viewsets.ModelViewSet):
"""
Api components for viewing, creating, and editing groups
"""
serializer_class = GroupSerializer
permission_classes = (permissions.IsAuthenticated,)
pagination_class = LinkHeaderPagination
parser_classes = (JSONParser,)
filter_class = GroupFilter
filter_backends = (DjangoFilterBackend, filters.SearchFilter)
lookup_field = "id"
lookup_value_regex = "[^/]+"
search_fields = ("name",)
def useradmin(self, group, request):
serializer = GroupSerializer(group)
user = User.objects.all().filter(username=request.user.username)[0]
return user.username in serializer.get_administrators(group)
def get_queryset(self):
queryset = Group.objects.all()
return queryset
def update(self, request, *args, **kwargs):
"""
We don't support calls to PUT for this viewset.
* returns: 400 bad request
"""
return Response("BAD REQUEST", status=status.HTTP_400_BAD_REQUEST)
def list(self, request, *args, **kwargs):
"""
GET all groups
Sample result:
[
{
"id": 54,
"name": "Omaha 319",
"members": [
"user2",
"admin"
],
"administrators": [
"admin"
]
}
]
"""
job = None
if request.query_params.get("job_uid"):
job = Job.objects.get(uid=request.query_params["job_uid"])
queryset = JobPermission.get_orderable_queryset_for_job(job, Group)
total = queryset.count()
filtered_queryset = self.filter_queryset(queryset)
filtered_queryset = annotate_groups_restricted(filtered_queryset, job)
page = None
if not request.query_params.get("disable_page"):
page = self.paginate_queryset(filtered_queryset)
if page is not None:
serializer = self.get_serializer(page, many=True, context={"request": request})
response = self.get_paginated_response(serializer.data)
else:
serializer = self.get_serializer(filtered_queryset, many=True, context={"request": request})
response = Response(serializer.data, status=status.HTTP_200_OK)
response["Total-Groups"] = total
return response
@transaction.atomic
def create(self, request, *args, **kwargs):
"""
create a new group and place the current logged in user in the group and its administrators.
optionally, provide additional group members
Sample input:
{
"name": "Omaha 319"
}
"""
name = request.data["name"]
matches = Group.objects.filter(name__iexact=name.lower())
if len(matches) > 0:
error_data = {
"errors": [
{
"status": status.HTTP_400_BAD_REQUEST,
"title": _("Duplicate Group Name"),
"detail": _("A group named %s already exists." % name),
}
]
}
return Response(error_data, status=status.HTTP_400_BAD_REQUEST)
response = super(GroupViewSet, self).create(request, *args, **kwargs)
group_id = response.data["id"]
user = User.objects.all().filter(username=request.user.username)[0]
group = Group.objects.get(pk=group_id)
group.user_set.add(user)
groupadmin = GroupPermission.objects.create(user=user, group=group, permission=GroupPermissionLevel.ADMIN.value)
groupadmin.save()
GroupPermission.objects.create(user=user, group=group, permission=GroupPermissionLevel.MEMBER.value)
if "members" in request.data:
for member in request.data["members"]:
if member != user.username:
user = User.objects.all().filter(username=member)[0]
if user:
GroupPermission.objects.create(
user=user, group=group, permission=GroupPermissionLevel.MEMBER.value,
)
sendnotification(
request.user,
user,
NotificationVerb.ADDED_TO_GROUP.value,
group,
None,
NotificationLevel.INFO.value,
GroupPermissionLevel.MEMBER.value,
)
if "administrators" in request.data:
for admin in request.data["administrators"]:
if admin != request.user.username:
user = User.objects.all().filter(username=admin)[0]
if user:
GroupPermission.objects.create(
user=user, group=group, permission=GroupPermissionLevel.ADMIN.value,
)
sendnotification(
request.user,
user,
NotificationVerb.SET_AS_GROUP_ADMIN.value,
group,
None,
NotificationLevel.INFO.value,
GroupPermissionLevel.ADMIN.value,
)
group = Group.objects.filter(id=group_id)[0]
serializer = GroupSerializer(group)
return Response(serializer.data, status=status.HTTP_200_OK)
def retrieve(self, request, id=None):
"""
* get a group with a specific ID. Return its data, including users in the group
"""
group = Group.objects.filter(id=id)[0]
serializer = GroupSerializer(group)
return Response(serializer.data, status=status.HTTP_200_OK)
@transaction.atomic
def destroy(self, request, id=None, *args, **kwargs):
"""
Destroy a group
"""
# Not permitted if the requesting user is not an administrator
group = Group.objects.filter(id=id)[0]
if not self.useradmin(group, request):
return Response("Administative privileges required.", status=status.HTTP_403_FORBIDDEN)
super(GroupViewSet, self).destroy(request, *args, **kwargs)
return Response("OK", status=status.HTTP_200_OK)
# instance = self.get_object()
# instance.soft_delete(user=request.user)
# return Response(status=status.HTTP_204_NO_CONTENT)
@transaction.atomic
def partial_update(self, request, id=None, *args, **kwargs):
"""
Change the group's name, members, and administrators
Sample input:
{
"name": "Omaha 319"
"members": [ "user2", "user3", "admin"],
"administrators": [ "admin" ]
}
If a member wishes to remove themselves from a group they can make an patch request with no body.
However, this will not work if they are a admin of the group.
"""
group = Group.objects.filter(id=id)[0]
# we are not going anywhere if the requesting user is not an
# administrator of the current group or there is an attempt to end up with no administrators
if not self.useradmin(group, request):
user = User.objects.filter(username=request.user.username)[0]
perms = GroupPermission.objects.filter(user=user, group=group, permission=GroupPermissionLevel.MEMBER.value)
# if the user is not an admin but is a member we remove them from the group
if perms:
perms.delete()
return Response("OK", status=status.HTTP_200_OK)
return Response("Administative privileges required.", status=status.HTTP_403_FORBIDDEN)
if "administrators" in request.data:
request_admins = request.data["administrators"]
if len(request_admins) < 1:
error_data = {
"errors": [
{
"status": status.HTTP_403_FORBIDDEN,
"title": _("Not Permitted"),
"detail": _(
"You must assign another group administator before you can perform this action"
),
}
]
}
return Response(error_data, status=status.HTTP_403_FORBIDDEN)
super(GroupViewSet, self).partial_update(request, *args, **kwargs)
# if name in request we need to change the group name
if "name" in request.data:
name = request.data["name"]
if name:
group.name = name
group.save()
# examine provided lists of administrators and members. Adjust as needed.
for item in [
("members", GroupPermissionLevel.MEMBER.value),
("administrators", GroupPermissionLevel.ADMIN.value),
]:
permissionlabel = item[0]
permission = item[1]
if permissionlabel not in request.data:
continue
user_ids = [
perm.user.id for perm in GroupPermission.objects.filter(group=group).filter(permission=permission)
]
currentusers = [user.username for user in User.objects.filter(id__in=user_ids).all()]
targetusers = request.data[permissionlabel]
# Add new users for this permission level
newusers = list(set(targetusers) - set(currentusers))
users = User.objects.filter(username__in=newusers).all()
verb = NotificationVerb.ADDED_TO_GROUP.value
if permissionlabel == "administrators":
verb = NotificationVerb.SET_AS_GROUP_ADMIN.value
for user in users:
GroupPermission.objects.create(user=user, group=group, permission=permission)
sendnotification(
request.user, user, verb, group, None, NotificationLevel.INFO.value, permission,
)
# Remove existing users for this permission level
removedusers = list(set(currentusers) - set(targetusers))
users = User.objects.filter(username__in=removedusers).all()
verb = NotificationVerb.REMOVED_FROM_GROUP.value
if permissionlabel == "administrators":
verb = NotificationVerb.REMOVED_AS_GROUP_ADMIN.value
for user in users:
sendnotification(
request.user, user, verb, group, None, NotificationLevel.INFO.value, permission,
)
perms = GroupPermission.objects.filter(user=user, group=group, permission=permission).all()
for perm in perms:
perm.delete()
return Response("OK", status=status.HTTP_200_OK)
@action(detail=True, methods=["get"])
def users(self, request, id=None, *args, **kwargs):
try:
group = Group.objects.get(id=id)
except Group.DoesNotExist:
raise NotFound(code="not_found", detail="Could not find the requested group.")
serializer = GroupUserSerializer(group, context={"request": request})
return Response(data=serializer.data, status=status.HTTP_200_OK)
class NotificationViewSet(viewsets.ModelViewSet):
"""
Api components for viewing and working with notifications
"""
serializer_class = NotificationSerializer
filter_backends = (DjangoFilterBackend, filters.SearchFilter)
pagination_class = LinkHeaderPagination
def get_queryset(self):
qs = Notification.objects.filter(recipient_id=self.request.user.id, deleted=False)
return qs
def list(self, request, *args, **kwargs):
"""
Get all user notifications that are not deleted
"""
notifications = self.get_queryset()
page = self.paginate_queryset(notifications)
if page is not None:
serializer = self.get_serializer(page, context={"request": self.request}, many=True)
else:
serializer = self.get_serializer(notifications, context={"request": self.request}, many=True)
return self.get_paginated_response(serializer.data)
@action(detail=False, methods=["delete"])
def delete(self, request, *args, **kwargs):
"""
Delete notifications
If request data of { ids: [....ids] } is provided only those ids will be deleted
If no request data is included all notifications will be deleted
"""
notifications = self.get_queryset()
if request.data.get("ids", None):
for id in request.data.get("ids"):
note = notifications.get(id=id)
if note:
note.deleted = True
note.save()
else:
notifications = self.get_queryset()
notifications.mark_all_as_deleted()
return Response({"success": True}, status=status.HTTP_200_OK)
@action(detail=False, methods=["post"])
def read(self, request, *args, **kwargs):
"""
Mark notifications as read
If request data of { ids: [....ids] } is provided only those ids will be marked
If no request data is included all notifications will be marked read
"""
notifications = self.get_queryset()
if request.data.get("ids", None):
for id in request.data.get("ids"):
note = notifications.get(id=id)
if note:
note.unread = False
note.save()
else:
notifications = self.get_queryset()
notifications.mark_all_as_read()
return Response({"success": True}, status=status.HTTP_200_OK)
@action(detail=False, methods=["post"])
def unread(self, request, *args, **kwargs):
"""
Mark notifications as unread
If request data of { ids: [....ids] } is provided only those ids will be marked
If no request data is included all notifications will be marked unread
"""
notifications = self.get_queryset()
if request.data.get("ids", None):
for id in request.data.get("ids"):
note = notifications.get(id=id)
if note:
note.unread = True
note.save()
else:
notifications = self.get_queryset()
notifications.mark_all_as_unread()
return Response({"success": True}, status=status.HTTP_200_OK)
@action(detail=False, methods=["get"])
def counts(self, request, *args, **kwargs):
payload = {
"read": len(request.user.notifications.read()),
"unread": len(request.user.notifications.unread()),
}
return Response(payload, status=status.HTTP_200_OK)
@action(detail=False, methods=["post"])
def mark(self, request, *args, **kwargs):
"""
Change the status of one or more notifications.
**Use if you need to modify in more than one way. Otherwise just use 'delete', 'read', or 'unread'**
Args:
A list containing one or more records like this:
[
{"id": 3, "action": "DELETE" },
{"id": 17, "action": "READ" },
{"id" : 19, "action" "UNREAD" },
...
]
Returns:
{ "success" : True} or error
"""
logger.debug(request.data)
for row in request.data:
qs = Notification.objects.filter(recipient_id=self.request.user.id, id=row["id"])
logger.debug(qs)
if row["action"] == "READ":
qs.mark_all_as_read()
if row["action"] == "DELETE":
qs.mark_all_as_deleted()
if row["action"] == "UNREAD":
qs.mark_all_as_unread()
return Response({"success": True}, status=status.HTTP_200_OK)
class DataProviderRequestViewSet(viewsets.ModelViewSet):
permission_classes = (permissions.IsAuthenticated, IsOwnerOrReadOnly)
serializer_class = DataProviderRequestSerializer
lookup_field = "uid"
def get_queryset(self):
"""
This view should return a list of all
of Data Provider Requests for the
currently authenticated user.
"""
user = self.request.user
# Admins and staff should be able to view all requests.
if user.is_staff or user.is_superuser:
return DataProviderRequest.objects.all()
return DataProviderRequest.objects.filter(user=user)
class SizeIncreaseRequestViewSet(viewsets.ModelViewSet):
permission_classes = (permissions.IsAuthenticated, IsOwnerOrReadOnly)
serializer_class = SizeIncreaseRequestSerializer
lookup_field = "uid"
def get_queryset(self):
"""
This view should return a list of all
of the Data Size Increase Requests for the
currently authenticated user.
"""
user = self.request.user
# Admins and staff should be able to view all requests.
if user.is_staff or user.is_superuser:
return SizeIncreaseRequest.objects.all()
return SizeIncreaseRequest.objects.filter(user=user)
class EstimatorView(views.APIView):
"""
Api components for computing size estimates for providers within a specified bounding box
"""
@action(detail=False, methods=["get"])
def get(self, request, *args, **kwargs):
"""
Args:
slugs: Comma separated list of slugs for provider slugs (e.g. 'osm,some_wms1')
bbox: Bounding box as w,s,e,n (e.g. '-130,-45,-100,10)
srs: EPSG code for the bbox srs (default=4326)
Returns:
[{ "slug" : $slug_1, "size": $estimate_1, "unit": "mb"}, ...] or error
"""
payload = []
logger.debug(request.query_params)
bbox = request.query_params.get("bbox", None).split(",") # w, s, e, n
bbox = list(map(lambda a: float(a), bbox))
srs = request.query_params.get("srs", "4326")
min_zoom = request.query_params.get("min_zoom", None)
max_zoom = request.query_params.get("max_zoom", None)
if request.query_params.get("slugs", None):
estimator = AoiEstimator(bbox=bbox, bbox_srs=srs, min_zoom=min_zoom, max_zoom=max_zoom)
for slug in request.query_params.get("slugs").split(","):
size = estimator.get_estimate_from_slug(AoiEstimator.Types.SIZE, slug)[0]
time = estimator.get_estimate_from_slug(AoiEstimator.Types.TIME, slug)[0]
payload += [
{"slug": slug, "size": {"value": size, "unit": "MB"}, "time": {"value": time, "unit": "seconds"}}
]
cache_key = get_estimate_cache_key(bbox, srs, min_zoom, max_zoom, slug)
cache.set(cache_key, (size, time), ESTIMATE_CACHE_TIMEOUT)
else:
return Response([{"detail": _("No estimates found")}], status=status.HTTP_400_BAD_REQUEST)
return Response(payload, status=status.HTTP_200_OK)
def get_models(model_list, model_object, model_index):
models = []
if not model_list:
return models
for model_id in model_list:
# TODO: would be good to accept either format slug or uuid here..
try:
model = model_object.objects.get(**{model_index: model_id})
models.append(model)
except model_object.DoesNotExist:
logger.warn(f"{str(model_object)} with {model_index}: {model_id} does not exist.")
raise NotFound(
code="not_found", detail=f"{str(model_object)} with {model_index}: {model_id} does not exist."
)
return models
def get_provider_task(export_provider, export_formats):
"""
Args:
export_provider: An DataProvider model for the content provider (i.e. osm or wms service)
export_formats: An ExportFormat model for the geospatial data format (i.e. shapefile or geopackage)
Returns:
"""
provider_task = DataProviderTask.objects.create(provider=export_provider)
for export_format in export_formats:
supported_formats = export_provider.export_provider_type.supported_formats.all()
provider_task.formats.add(*supported_formats)
provider_task.save()
return provider_task
def get_user_details(request):
"""
Gets user data from a request.
:param request: View request.
:return: A dict with user data.
"""
logged_in_user = request.user
return {
"username": logged_in_user.username,
"is_superuser": logged_in_user.is_superuser,
"is_staff": logged_in_user.is_staff,
}
def geojson_to_geos(geojson_geom, srid=None):
"""
:param geojson_geom: A stringified geojson geometry
:param srid: The ESPG code of the input data
:return: A GEOSGeometry object
"""
if not geojson_geom:
raise ValidationError(code="missing_geojson", detail="No geojson geometry string supplied.")
if not srid:
srid = 4326
try:
geom = GEOSGeometry(geojson_geom, srid=srid)
except GEOSException:
raise ValidationError(
code="invalid_geometry", detail="Could not convert geojson geometry, check that your geometry is valid."
)
if not geom.valid:
raise ValidationError(
code="invalid_geometry", detail="GEOSGeometry invalid, check that your geojson geometry is valid."
)
return geom
def get_jobs_via_permissions(permissions):
groups = Group.objects.filter(name__in=permissions.get("groups", []))
group_query = [
Q(permissions__content_type=ContentType.objects.get_for_model(Group)),
Q(permissions__object_id__in=groups),
Q(permissions__permission=JobPermissionLevel.READ.value),
]
users = User.objects.filter(username__in=permissions.get("members", []))
user_query = [
Q(permissions__content_type=ContentType.objects.get_for_model(User)),
Q(permissions__object_id__in=users),
Q(permissions__permission=JobPermissionLevel.READ.value),
]
return Job.objects.filter(Q(*user_query) | Q(*group_query))
def api_docs_view(request):
if request.user.is_authenticated:
return render(request, template_name="swagger-ui.html", context={"schema_url": "api:openapi-schema"})
else:
return redirect("/api/login?next=/api/docs")
| bsd-3-clause | -3,858,344,402,547,503,600 | 38.382541 | 120 | 0.577133 | false |
dichen001/Go4Jobs | JackChen/linked_list/160. Intersection of Two Linked Lists.py | 1 | 1101 | """
Write a program to find the node at which the intersection of two singly linked lists begins.
For example, the following two linked lists:
A: a1 → a2
↘
c1 → c2 → c3
↗
B: b1 → b2 → b3
begin to intersect at node c1.
Notes:
If the two linked lists have no intersection at all, return null.
The linked lists must retain their original structure after the function returns.
You may assume there are no cycles anywhere in the entire linked structure.
Your code should preferably run in O(n) time and use only O(1) memory.
"""
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
import gc
class Solution(object):
def getIntersectionNode(self, headA, headB):
if headA and headB:
A, B = headA, headB
while A!=B:
A = A.next if A else headB
B = B.next if B else headA
gc.collect()
return A
| gpl-3.0 | -2,618,085,599,512,454,700 | 25.175 | 93 | 0.574057 | false |
rebelact/mailsync-app | mailsync/views/auth.py | 1 | 2858 | from mailsync.views.base import BaseView
from mailsync.forms import CreateUserForm
from mailsync.models.auth import user_model
from formencode.validators import Invalid as InvalidForm
class LoginView(BaseView):
def initialize(self):
super(LoginView, self).initialize()
def get(self):
# Redirect if there are no users in the database
users = user_model.count_users()
if users == 0:
self.redirect("/create_user")
else:
message = self.session.get("message",None)
errors = self.session.get("errors",None)
next = self.get_argument("next", None)
self.delete_session_object("errors")
self.delete_session_object("message")
self.render("auth/login.html", message=message, errors=errors, next=next)
def post(self):
self.check_xsrf_cookie()
form_data = {
"username": self.get_argument("username", ""),
"password": self.get_argument("password", ""),
}
user = user_model.check_user(form_data)
if not user:
self.session["errors"] = "Invalid login details"
self.redirect("/login")
else:
userdata = user.__dict__
self.session["user"] = {
"username": userdata["username"],
"user_id": userdata["_id"]
}
self.redirect("/")
class LogoutView(BaseView):
def initialize(self):
super(LogoutView, self).initialize()
def get(self):
self.delete_session_object("user")
self.redirect("/login")
class CreateUserView(BaseView):
def initialize(self):
super(CreateUserView, self).initialize()
def get(self):
errors = self.session.get("errors", None)
form_data = self.session.get("form_data", None)
users = user_model.count_users()
if users == 0:
self.render("auth/create_user.html", errors=errors, form_data=form_data)
else:
self.redirect("/login")
def post(self):
self.check_xsrf_cookie()
form_data = {
"username": self.get_argument("username", ""),
"password": self.get_argument("password",""),
}
try:
valid_data = CreateUserForm.to_python(form_data)
user_model.create_user(valid_data)
self.session["message"] = "Account successfuly created. You can now log in"
self.delete_session_object("errors")
self.delete_session_object("form_data")
self.redirect("/login")
except InvalidForm, e:
self.session["errors"] = e.unpack_errors()
self.session["form_data"] = form_data
self.redirect("/create_user") | mit | -3,389,300,913,787,661,000 | 28.474227 | 87 | 0.559832 | false |
kmollee/2014cdab | wsgi/programs/cdag30/.leo_shadow/xtest.py | 1 | 11341 | #@+leo-ver=4-thin
#@+node:2014pythonE.20140517034519.1935:@shadow test.py
#@@language python
import cherrypy
# 這是 MAN 類別的定義
'''
# 在 application 中導入子模組
import programs.cdag30.man as cdag30_man
# 加入 cdag30 模組下的 man.py 且以子模組 man 對應其 MAN() 類別
root.cdag30.man = cdag30_man.MAN()
# 完成設定後, 可以利用
/cdag30/man/assembly
# 呼叫 man.py 中 MAN 類別的 assembly 方法
'''
class Test(object):
# 各組利用 index 引導隨後的程式執行
@cherrypy.expose
def index(self, *args, **kwargs):
outstring = '''
這是 2014CDA 協同專案下的 cdag30 模組下的 MAN 類別.<br /><br />
<!-- 這裡採用相對連結, 而非網址的絕對連結 (這一段為 html 註解) -->
<a href="assembly">執行 MAN 類別中的 assembly 方法</a><br /><br />
請確定下列零件於 V:/home/lego/man 目錄中, 且開啟空白 Creo 組立檔案.<br />
<a href="/static/lego_man.7z">lego_man.7z</a>(滑鼠右鍵存成 .7z 檔案)<br />
'''
return outstring
@cherrypy.expose
def assembly(self, *args, **kwargs):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<script type="text/javascript" src="/static/weblink/examples/jscript/pfcUtils.js"></script>
</head>
<body>
<script language="JavaScript">
/*設計一個零件組立函示*/
/*
軸面接
axis_plane_assembly(session, assembly, transf, featID, constrain_way, part2, axis1, plane1, axis2, plane2)
====================
assembly 組立檔案
transf 座標矩陣
feadID 要組裝的父
part2 要組裝的子
constrain_way 參數
1 對齊 對齊
2 對齊 貼合
else 按照 1
plane1~plane2 要組裝的父 參考面
plane3~plane4 要組裝的子 參考面
*/
function axis_plane_assembly(session, assembly, transf, featID, constrain_way, part2, axis1, plane1, axis2, plane2) {
var descr = pfcCreate("pfcModelDescriptor").CreateFromFileName("v:/home/lego/man/" + part2);
var componentModel = session.GetModelFromDescr(descr);
if (componentModel == null) {
document.write("在session 取得不到零件" + part2);
componentModel = session.RetrieveModel(descr);
if (componentModel == null) {
throw new Error(0, "Current componentModel is not loaded.");
}
}
if (componentModel != void null) {
var asmcomp = assembly.AssembleComponent(componentModel, transf);
}
var ids = pfcCreate("intseq");
//假如 asm 有零件時候
if (featID != -1) {
ids.Append(featID);
var subPath = pfcCreate("MpfcAssembly").CreateComponentPath(assembly, ids);
var subassembly = subPath.Leaf;
}
// 假如是第一個零件 asm 就當作父零件
else {
var subassembly = assembly;
var subPath = pfcCreate("MpfcAssembly").CreateComponentPath(assembly, ids);
}
var asmDatums = new Array(axis1, plane1);
var compDatums = new Array(axis2, plane2);
if (constrain_way == 1) {
var relation = new Array(pfcCreate("pfcComponentConstraintType").ASM_CONSTRAINT_ALIGN, pfcCreate("pfcComponentConstraintType").ASM_CONSTRAINT_MATE);
} else if (constrain_way == 2) {
var relation = new Array(pfcCreate("pfcComponentConstraintType").ASM_CONSTRAINT_ALIGN, pfcCreate("pfcComponentConstraintType").ASM_CONSTRAINT_ALIGN);
} else {
var relation = new Array(pfcCreate("pfcComponentConstraintType").ASM_CONSTRAINT_ALIGN, pfcCreate("pfcComponentConstraintType").ASM_CONSTRAINT_MATE);
}
var relationItem = new Array(pfcCreate("pfcModelItemType").ITEM_AXIS, pfcCreate("pfcModelItemType").ITEM_SURFACE);
var constrs = pfcCreate("pfcComponentConstraints");
for (var i = 0; i < 2; i++) {
var asmItem = subassembly.GetItemByName(relationItem[i], asmDatums[i]);
if (asmItem == void null) {
interactFlag = true;
continue;
}
var compItem = componentModel.GetItemByName(relationItem[i], compDatums[i]);
if (compItem == void null) {
interactFlag = true;
continue;
}
var MpfcSelect = pfcCreate("MpfcSelect");
var asmSel = MpfcSelect.CreateModelItemSelection(asmItem, subPath);
var compSel = MpfcSelect.CreateModelItemSelection(compItem, void null);
var constr = pfcCreate("pfcComponentConstraint").Create(relation[i]);
constr.AssemblyReference = asmSel;
constr.ComponentReference = compSel;
constr.Attributes = pfcCreate("pfcConstraintAttributes").Create(true, false);
constrs.Append(constr);
}
asmcomp.SetConstraints(constrs, void null);
return asmcomp.Id;
}
// 以上為 axis_plane_assembly() 函式
//
function three_plane_assembly(session, assembly, transf, featID, constrain_way, part2, plane1, plane2, plane3, plane4, plane5, plane6) {
var descr = pfcCreate("pfcModelDescriptor").CreateFromFileName("v:/home/lego/man/" + part2);
var componentModel = session.GetModelFromDescr(descr);
if (componentModel == null) {
document.write("在session 取得不到零件" + part2);
componentModel = session.RetrieveModel(descr);
if (componentModel == null) {
throw new Error(0, "Current componentModel is not loaded.");
}
}
if (componentModel != void null) {
var asmcomp = assembly.AssembleComponent(componentModel, transf);
}
var ids = pfcCreate("intseq");
//假如 asm 有零件時候
if (featID != -1) {
ids.Append(featID);
var subPath = pfcCreate("MpfcAssembly").CreateComponentPath(assembly, ids);
var subassembly = subPath.Leaf;
}
// 假如是第一個零件 asm 就當作父零件
else {
var subassembly = assembly;
var subPath = pfcCreate("MpfcAssembly").CreateComponentPath(assembly, ids);
}
var constrs = pfcCreate("pfcComponentConstraints");
var asmDatums = new Array(plane1, plane2, plane3);
var compDatums = new Array(plane4, plane5, plane6);
var MpfcSelect = pfcCreate("MpfcSelect");
for (var i = 0; i < 3; i++) {
var asmItem = subassembly.GetItemByName(pfcCreate("pfcModelItemType").ITEM_SURFACE, asmDatums[i]);
if (asmItem == void null) {
interactFlag = true;
continue;
}
var compItem = componentModel.GetItemByName(pfcCreate("pfcModelItemType").ITEM_SURFACE, compDatums[i]);
if (compItem == void null) {
interactFlag = true;
continue;
}
var asmSel = MpfcSelect.CreateModelItemSelection(asmItem, subPath);
var compSel = MpfcSelect.CreateModelItemSelection(compItem, void null);
if (constrain_way == 1) {
var constr = pfcCreate("pfcComponentConstraint").Create(pfcCreate("pfcComponentConstraintType").ASM_CONSTRAINT_ALIGN);
} else if (constrain_way == 2) {
var constr = pfcCreate("pfcComponentConstraint").Create(pfcCreate("pfcComponentConstraintType").ASM_CONSTRAINT_MATE);
} else {
var constr = pfcCreate("pfcComponentConstraint").Create(pfcCreate("pfcComponentConstraintType").ASM_CONSTRAINT_ALIGN);
}
constr.AssemblyReference = asmSel;
constr.ComponentReference = compSel;
constr.Attributes = pfcCreate("pfcConstraintAttributes").Create(false, false);
constrs.Append(constr);
}
asmcomp.SetConstraints(constrs, void null);
return asmcomp.Id;
}
// 以上為 three_plane_assembly() 函式
//
// 假如 Creo 所在的操作系統不是 Windows 環境
if (!pfcIsWindows()) {
// 則啟動對應的 UniversalXPConnect 執行權限 (等同 Windows 下的 ActiveX)
netscape.security.PrivilegeManager.enablePrivilege("UniversalXPConnect");
}
// pfcGetProESession() 是位於 pfcUtils.js 中的函式, 確定此 JavaScript 是在嵌入式瀏覽器中執行
var session = pfcGetProESession();
// 設定 config option, 不要使用元件組立流程中內建的假設約束條件
session.SetConfigOption("comp_placement_assumptions", "no");
// 建立擺放零件的位置矩陣, Pro/Web.Link 中的變數無法直接建立, 必須透過 pfcCreate() 建立
var identityMatrix = pfcCreate("pfcMatrix3D");
// 建立 identity 位置矩陣
for (var x = 0; x < 4; x++) {
for (var y = 0; y < 4; y++) {
if (x == y) {
identityMatrix.Set(x, y, 1.0);
} else {
identityMatrix.Set(x, y, 0.0);
}
}
}
// 利用 identityMatrix 建立 transf 座標轉換矩陣
var transf = pfcCreate("pfcTransform3D").Create(identityMatrix);
// 取得目前的工作目錄
var currentDir = session.getCurrentDirectory();
// 以目前已開檔的空白組立檔案, 作為 model
var model = session.CurrentModel;
// 查驗有無 model, 或 model 類別是否為組立件, 若不符合條件則丟出錯誤訊息
if (model == void null || model.Type != pfcCreate("pfcModelType").MDL_ASSEMBLY)
throw new Error(0, "Current model is not an assembly.");
// 將此模型設為組立物件
var assembly = model;
/*
three_plane_assembly(session, assembly, transf, featID, constrain_way, part2, plane1, plane2, plane3, plane4, plane5, plane6)
=====================
assembly 組立檔案
transf 座標矩陣
feadID 要組裝的父
part2 要組裝的子
constrain_way 參數
1 對齊
2 貼合
else 按照 1
plane1~plane3 要組裝的父 參考面
plane4~plane6 要組裝的子 參考面
axis_plane_assembly(session, assembly, transf, featID, constrain_way, part2, axis1, plane1, axis2, plane2)
====================
assembly 組立檔案
transf 座標矩陣
feadID 要組裝的父
part2 要組裝的子
constrain_way 參數
1 對齊 對齊
2 對齊 貼合
else 按照 1
plane1~plane2 要組裝的父 參考面
plane3~plane4 要組裝的子 參考面
*/
var body_id = three_plane_assembly(session, assembly, transf, -1, 1, "LEGO_BODY.prt", "ASM_FRONT", "ASM_TOP", "ASM_RIGHT", "FRONT", "TOP", "RIGHT");
var arm_right_id = axis_plane_assembly(session, assembly, transf, body_id, 2, "LEGO_ARM_RT.prt", "A_14", "DTM1", "A_9", "TOP");
var arm_left_id = axis_plane_assembly(session, assembly, transf, body_id, 2, "lego_arm_lt.prt", "A_15", "DTM2", "A_7", "TOP");
var hand_left_id = axis_plane_assembly(session, assembly, transf, arm_left_id, 1, "lego_hand.prt", "A_8", "DTM1", "A_1", "DTM3");
var hand_right_id = axis_plane_assembly(session, assembly, transf, arm_right_id, 1, "lego_hand.prt", "A_10", "DTM1", "A_1", "DTM3");
var head_id = axis_plane_assembly(session, assembly, transf, body_id, 1, "lego_head.prt", "A_16", "DTM3", "A_2", "DTM1");
var hat_id = axis_plane_assembly(session, assembly, transf, head_id, 1, "lego_hat.prt", "A_2", "TOP", "A_2", "FRONT");
var waist_id = three_plane_assembly(session, assembly, transf, body_id, 1, "LEGO_WAIST.prt", "DTM4", "TOP", "DTM7", "DTM1", "RIGHT", "FRONT");
var leg_left_id = axis_plane_assembly(session, assembly, transf, waist_id, 2, "lego_leg_lt.prt", "A_8", "DTM4", "A_10", "DTM1");
var leg_right_id = axis_plane_assembly(session, assembly, transf, waist_id, 2, "lego_leg_rt.prt", "A_8", "DTM5", "A_10", "DTM1");
assembly.Regenerate(void null);
session.GetModelWindow(assembly).Repaint();
</script></body></html>'''
return outstring
#@nonl
#@-node:2014pythonE.20140517034519.1935:@shadow test.py
#@-leo
| gpl-2.0 | 202,546,961,501,817,920 | 35.796429 | 157 | 0.667378 | false |
willemw12/mythcli | src/mythcli.py | 1 | 2591 | #!/usr/bin/env python3
# For more information about this program, see https://github.com/willemw12/mythcli
import argparse
import os
import sys
from mythcli.services.dvr.controllers import conflicting, expiring, recorded, upcoming
def main():
""" For all datetime format codes, see http://docs.python.org/library/datetime.html#strftime-and-strptime-behavior """
#NOTE remove leading zero in day: "%-d" or "%e". This may not be portable.
# Create top-level parser
parser = argparse.ArgumentParser(description="MythTV Services API command-line interface.")
parser.add_argument("-d", "--date-format", default="%a %d %B, %Y", help='examples: "%%Y-%%m-%%d", "%%a %%d %%B, %%Y", "%%x"')
parser.add_argument("-m", "--max-items", type=int, default=0, help="limit number of requested items. Default: no limit (0)")
parser.add_argument("-t", "--time-format", default="%H:%M", help='examples: "%%H:%%M", "%%I:%%M %%p", "%%X"')
#parser.add_argument("--debug", action="store_const", const=True, default=False, help="set log level to debug")
#parser.add_argument("--quiet", action="store_const", const=True, default=False, help="set log level to fatal")
parser.add_argument("--short-desc", action="store_const", const=True, default=False, help="short descriptions in listings")
parser.add_argument("--title-desc", action="store_const", const=True, default=False, help="titles only in listings")
#parser.add_argument("--verbose", action="store_const", const=True, default=False, help="set log level to info")
#parser.add_argument("--version", action="store_const", const=True, default=False, help="print version")
# Register subcommands
#subparsers = parser.add_subparsers(title="subcommands", description="<valid subcommands>", help="<additional help>")
subparsers = parser.add_subparsers(title="subcommands")
conflicting.add_subparser(subparsers)
expiring.add_subparser(subparsers)
recorded.add_subparser(subparsers)
upcoming.add_subparser(subparsers)
# Parse the arguments and call whatever function (subcommand) was selected
args = parser.parse_args()
#TODO? Handle missing subcommand differently. Instead of try/except, configure 'parser' to handle this, e.g. make 'help' the default subcommand
try:
args.func(args)
except AttributeError:
parser.print_usage()
#parser.exit(2, os.path.basename(__file__) + ": error: missing subcommand\n")
parser.exit(2, os.path.basename(sys.argv[0]) + ": error: missing subcommand\n")
if __name__ == "__main__":
main()
| gpl-3.0 | 1,401,626,688,239,907,800 | 54.12766 | 147 | 0.686222 | false |
huggingface/transformers | src/transformers/models/canine/convert_canine_original_tf_checkpoint_to_pytorch.py | 1 | 2118 | # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert CANINE checkpoint."""
import argparse
from transformers import CanineConfig, CanineModel, CanineTokenizer, load_tf_weights_in_canine
from transformers.utils import logging
logging.set_verbosity_info()
def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, pytorch_dump_path):
# Initialize PyTorch model
config = CanineConfig()
model = CanineModel(config)
model.eval()
print(f"Building PyTorch model from configuration: {config}")
# Load weights from tf checkpoint
load_tf_weights_in_canine(model, config, tf_checkpoint_path)
# Save pytorch-model (weights and configuration)
print(f"Save PyTorch model to {pytorch_dump_path}")
model.save_pretrained(pytorch_dump_path)
# Save tokenizer files
tokenizer = CanineTokenizer()
print(f"Save tokenizer files to {pytorch_dump_path}")
tokenizer.save_pretrained(pytorch_dump_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path",
default=None,
type=str,
required=True,
help="Path to the TensorFlow checkpoint. Should end with model.ckpt",
)
parser.add_argument(
"--pytorch_dump_path",
default=None,
type=str,
required=True,
help="Path to a folder where the PyTorch model will be placed.",
)
args = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.pytorch_dump_path)
| apache-2.0 | 1,678,900,075,974,450,400 | 30.61194 | 94 | 0.706327 | false |
pekrau/Publications | publications/scripts/create_curator.py | 1 | 1211 | "Create an account with the 'curator' role."
import sys
from publications import constants
from publications import utils
from publications.account import AccountSaver
def get_args():
parser = utils.get_command_line_parser(
description='Create a new curator account.')
return parser.parse_args()
def create_curator(db, email, labels):
with AccountSaver(db=db) as saver:
saver.set_email(email)
saver['owner'] = email
saver['role'] = constants.CURATOR
saver['labels'] = labels
print("Created 'curator' role account", email)
print('NOTE: No email sent!')
if __name__ == '__main__':
args = get_args()
utils.load_settings(filepath=args.settings)
db = utils.get_db()
email = input('Email address (=account identifier) > ')
if not email:
sys.exit('Error: no email address provided')
labels = []
while True:
label = input('Give label > ')
label = label.strip()
if not label: break
try:
doc = utils.get_label(db, label)
except KeyError:
print('no such label:', label)
else:
labels.append(label)
create_curator(db, email, labels)
| mit | -6,509,203,966,159,130,000 | 27.162791 | 59 | 0.618497 | false |
pombredanne/grab-1 | grab/transport/urllib3.py | 1 | 14546 | # Copyright: 2015, Grigoriy Petukhov
# Author: Grigoriy Petukhov (http://getdata.pro)
# License: MIT
from __future__ import absolute_import
import logging
from weblib.http import (normalize_url, normalize_post_data,
normalize_http_values)
from weblib.encoding import make_str, decode_pairs
from urllib3 import PoolManager, ProxyManager, exceptions, make_headers
from urllib3.filepost import encode_multipart_formdata
from urllib3.fields import RequestField
from urllib3.util.retry import Retry
from urllib3.util.timeout import Timeout
from urllib3.exceptions import ProxySchemeUnknown
import six
from six.moves.urllib.parse import urlencode, urlsplit
import random
from six.moves.http_cookiejar import CookieJar
from six.moves.urllib.parse import urlparse
from grab import error
from grab.error import GrabMisuseError
from grab.cookie import CookieManager, MockRequest, MockResponse
from grab.response import Response
from grab.upload import UploadFile, UploadContent
from grab.transport.base import BaseTransport
from user_agent import generate_user_agent
logger = logging.getLogger('grab.transport.urllib3')
def make_unicode(val, encoding='utf-8', errors='strict'):
if isinstance(val, six.binary_type):
return val.decode(encoding, errors)
elif isinstance(val, six.text_type):
return val
else:
return make_unicode(str(val))
def process_upload_items(items):
result = []
for key, val in items:
if isinstance(val, UploadContent):
headers = {'Content-Type': val.content_type}
field = RequestField(name=key, data=val.content,
filename=val.filename, headers=headers)
field.make_multipart(content_type=val.content_type)
result.append(field)
elif isinstance(val, UploadFile):
data = open(val.path, 'rb').read()
headers = {'Content-Type': val.content_type}
field = RequestField(name=key, data=data,
filename=val.filename, headers=headers)
field.make_multipart(content_type=val.content_type)
result.append(field)
else:
result.append((key, val))
return result
class Request(object):
def __init__(self, method=None, url=None, data=None,
proxy=None, proxy_userpwd=None, proxy_type=None,
headers=None, body_maxsize=None):
self.url = url
self.method = method
self.data = data
self.proxy = proxy
self.proxy_userpwd = proxy_userpwd
self.proxy_type = proxy_type
self.headers = headers
self.body_maxsize = body_maxsize
self._response_file = None
self._response_path = None
def get_full_url(self):
return self.url
class Urllib3Transport(BaseTransport):
"""
Grab network transport based on urllib3 library.
"""
def __init__(self):
self.pool = PoolManager(10)
logger = logging.getLogger('urllib3.connectionpool')
logger.setLevel(logging.WARNING)
def reset(self):
#self.response_header_chunks = []
#self.response_body_chunks = []
#self.response_body_bytes_read = 0
#self.verbose_logging = False
#self.body_file = None
#self.body_path = None
# Maybe move to super-class???
self.request_head = ''
self.request_body = ''
self.request_log = ''
self._request = None
def process_config(self, grab):
req = Request(data=None)
try:
request_url = normalize_url(grab.config['url'])
except Exception as ex:
raise error.GrabInvalidUrl(
u'%s: %s' % (six.text_type(ex), grab.config['url']))
req.url = request_url
method = grab.detect_request_method()
req.method = make_str(method)
req.body_maxsize = grab.config['body_maxsize']
if grab.config['nobody']:
req.body_maxsize = 0
req.timeout = grab.config['timeout']
req.connect_timeout = grab.config['connect_timeout']
extra_headers = {}
# Body processing
if grab.config['body_inmemory']:
pass
else:
if not grab.config['body_storage_dir']:
raise GrabMisuseError(
'Option body_storage_dir is not defined')
file_, path_ = self.setup_body_file(
grab.config['body_storage_dir'],
grab.config['body_storage_filename'],
create_dir=grab.config['body_storage_create_dir'])
req._response_file = file_
req._response_path = path_
if grab.config['multipart_post'] is not None:
post_data = grab.config['multipart_post']
if isinstance(post_data, six.binary_type):
pass
elif isinstance(post_data, six.text_type):
raise GrabMisuseError('Option multipart_post data'
' does not accept unicode.')
else:
post_items = normalize_http_values(
grab.config['multipart_post'],
charset=grab.config['charset'],
ignore_classes=(UploadFile, UploadContent),
)
#if six.PY3:
post_items = decode_pairs(post_items,
grab.config['charset'])
post_items = process_upload_items(post_items)
post_data, content_type = encode_multipart_formdata(post_items)
extra_headers['Content-Type'] = content_type
extra_headers['Content-Length'] = len(post_data)
req.data = post_data
elif grab.config['post'] is not None:
post_data = normalize_post_data(grab.config['post'],
grab.config['charset'])
# py3 hack
# if six.PY3:
# post_data = smart_unicode(post_data,
# grab.config['charset'])
extra_headers['Content-Length'] = len(post_data)
req.data = post_data
if method in ('POST', 'PUT'):
if (grab.config['post'] is None and
grab.config['multipart_post'] is None):
raise GrabMisuseError('Neither `post` or `multipart_post`'
' options was specified for the %s'
' request' % method)
# Proxy
if grab.config['proxy']:
req.proxy = grab.config['proxy']
if grab.config['proxy_userpwd']:
req.proxy_userpwd = grab.config['proxy_userpwd']
if grab.config['proxy_type']:
req.proxy_type = grab.config['proxy_type']
else:
req.proxy_type = 'http'
# User-Agent
if grab.config['user_agent'] is None:
if grab.config['user_agent_file'] is not None:
with open(grab.config['user_agent_file']) as inf:
lines = inf.read().splitlines()
grab.config['user_agent'] = random.choice(lines)
else:
grab.config['user_agent'] = generate_user_agent()
extra_headers['User-Agent'] = grab.config['user_agent']
# Headers
headers = extra_headers
headers.update(grab.config['common_headers'])
if grab.config['headers']:
headers.update(grab.config['headers'])
req.headers = headers
# Cookies
self.process_cookie_options(grab, req)
self._request = req
def request(self):
req = self._request
if req.proxy:
if req.proxy_userpwd:
headers = make_headers(proxy_basic_auth=req.proxy_userpwd)
else:
headers = None
proxy_url = '%s://%s' % (req.proxy_type, req.proxy)
try:
pool = ProxyManager(proxy_url, proxy_headers=headers)
except ProxySchemeUnknown:
raise GrabMisuseError('Urllib3 transport does '
'not support %s proxies' % req.proxy_type)
else:
pool = self.pool
try:
retry = Retry(redirect=False, connect=False, read=False)
timeout = Timeout(connect=req.connect_timeout,
read=req.timeout)
#req_headers = dict((make_unicode(x), make_unicode(y))
# for (x, y) in req.headers.items())
if six.PY3:
req_url = make_unicode(req.url)
req_method = make_unicode(req.method)
else:
req_url = make_str(req.url)
req_method = req.method
res = pool.urlopen(req_method,
req_url,
body=req.data, timeout=timeout,
retries=retry, headers=req.headers,
preload_content=False)
except exceptions.ConnectTimeoutError as ex:
raise error.GrabConnectionError('Could not create connection')
except exceptions.ProtocolError as ex:
raise error.GrabConnectionError(ex.args[1][0], ex.args[1][1])
# WTF?
self.request_head = ''
self.request_body = ''
self.request_log = ''
self._response = res
#raise error.GrabNetworkError(ex.args[0], ex.args[1])
#raise error.GrabTimeoutError(ex.args[0], ex.args[1])
#raise error.GrabConnectionError(ex.args[0], ex.args[1])
#raise error.GrabAuthError(ex.args[0], ex.args[1])
#raise error.GrabTooManyRedirectsError(ex.args[0],
# ex.args[1])
#raise error.GrabCouldNotResolveHostError(ex.args[0],
# ex.args[1])
#raise error.GrabNetworkError(ex.args[0], ex.args[1])
#six.reraise(error.GrabInternalError, error.GrabInternalError(ex),
# sys.exc_info()[2])
def prepare_response(self, grab):
#if self.body_file:
# self.body_file.close()
response = Response()
head = ''
for key, val in self._response.getheaders().items():
head += '%s: %s\r\n' % (key, val)
head += '\r\n'
response.head = make_str(head, encoding='latin', errors='ignore')
#if self.body_path:
# response.body_path = self.body_path
#else:
# response.body = b''.join(self.response_body_chunks)
if self._request._response_path:
response.body_path = self._request._response_path
# Quick dirty hack, actullay, response is fully read into memory
self._request._response_file.write(self._response.read())#data)
self._request._response_file.close()
else:
response.body = self._response.read()#data
if self._request.body_maxsize is not None:
#if self.response_body_bytes_read > self.config_body_maxsize:
# logger.debug('Response body max size limit reached: %s' %
# self.config_body_maxsize)
response.body = self._response.read(self._request.body_maxsize)
# Clear memory
#self.response_header_chunks = []
response.code = self._response.status
#response.total_time = self.curl.getinfo(pycurl.TOTAL_TIME)
#response.connect_time = self.curl.getinfo(pycurl.CONNECT_TIME)
#response.name_lookup_time = self.curl.getinfo(pycurl.NAMELOOKUP_TIME)
#response.download_size = self.curl.getinfo(pycurl.SIZE_DOWNLOAD)
#response.upload_size = self.curl.getinfo(pycurl.SIZE_UPLOAD)
#response.download_speed = self.curl.getinfo(pycurl.SPEED_DOWNLOAD)
#response.remote_ip = self.curl.getinfo(pycurl.PRIMARY_IP)
response.url = self._response.get_redirect_location() or self._request.url
import email.message
hdr = email.message.Message()
for key, val in self._response.getheaders().items():
hdr[key] = val
response.parse(charset=grab.config['document_charset'],
headers=hdr)
jar = self.extract_cookiejar(self._response, self._request)
response.cookies = CookieManager(jar)
# We do not need anymore cookies stored in the
# curl instance so drop them
#self.curl.setopt(pycurl.COOKIELIST, 'ALL')
return response
def extract_cookiejar(self, resp, req):
jar = CookieJar()
jar.extract_cookies(MockResponse(resp._original_response.msg),
MockRequest(req))
return jar
def process_cookie_options(self, grab, req):
# `cookiefile` option should be processed before `cookies` option
# because `load_cookies` updates `cookies` option
if grab.config['cookiefile']:
# Do not raise exception if cookie file does not exist
try:
grab.cookies.load_from_file(grab.config['cookiefile'])
except IOError as ex:
logging.error(ex)
request_host = urlsplit(req.url).hostname
if request_host:
if request_host.startswith('www.'):
request_host_no_www = request_host[4:]
else:
request_host_no_www = request_host
# Process `cookies` option that is simple dict i.e.
# it provides only `name` and `value` attributes of cookie
# No domain, no path, no expires, etc
# I pass these no-domain cookies to *each* requested domain
# by setting these cookies with corresponding domain attribute
# Trying to guess better domain name by removing leading "www."
if grab.config['cookies']:
if not isinstance(grab.config['cookies'], dict):
raise error.GrabMisuseError('cookies option should be a dict')
for name, value in grab.config['cookies'].items():
grab.cookies.set(
name=name,
value=value,
domain=request_host_no_www
)
cookie_hdr = grab.cookies.get_cookie_header(req)
if cookie_hdr:
req.headers['Cookie'] = cookie_hdr
| mit | -5,214,602,950,276,204,000 | 38.313514 | 82 | 0.566341 | false |
QTB-HHU/ModelHeatShock | HSM_Reactions.py | 1 | 3798 | from HSM_Temperature import *
############ REACTIONS ############
def nuP(Ph, HP, kP0): # P#-->P
nuP = kP0 * Ph * HP # //(2000+HP)
return nuP
def nuPp(P, t, kP0p, n1, T0const, TparamSet, DirectControlnuPp): # P-->P#
# the DirectControl optional argument serves to switch between the normal nuPp and the nuPp that we change directly.
if DirectControlnuPp[0] == "No":
#n1 and T0const are not used anymore
R = 8.3144598 # Ideal Gas Constant (J mol^-1 K^-1)
Ea = 174440. # (J mol^-1) Activation energy (J mol^-1)
B = Ea/R # B = 20980.330
A = kP0p * 9.431831774375398 * pow(10.,28) # 9.27*pow(10,30) (kP0p was 98.28419570824974)
KelvinToCelsius = 273.15
TinKelvin = T(t, TparamSet) + KelvinToCelsius
nuPp = P * A*math.exp(-B/TinKelvin) # P * (Arrenius Equation for the Temp dependent k)
#nuPp = kP0p * P * T(t, TparamSet) ** n1 / (T0const ** n1 + T(t, TparamSet) ** n1)
#print( "!!! " + str((t-vorl)/60.) + " " + str(T(t, TparamSet)) + " " + str(nuPp))
#print()
#print(DirectControlnuPp[0]+" "+str(nuPp))
elif DirectControlnuPp[0] == "Yes":
nuPp = DirectControlnuPp[1] * P
#print( "XXXXXXXXXXXXXXXXXXXXXXX " + str((t-vorl)/60.) + " " + str(T(t, TparamSet)) + " " + str(nuPp))
#print()
#print(DirectControlnuPp[0]+" "+str(nuPp))
else:
print("Error in nuPp in HSM_Reactions.py")
return nuPp
def nuS(Ss, kS): # S*-->S
nuS = kS * Ss
return nuS
def nuSp(S, Ph, kSp0, n2, P0const): # S-->S*
nuSp = kSp0 * S * pow(Ph, n2) / (pow(P0const, n2) + pow(Ph, n2))
return nuSp
def nuFp(F, Ss, kFp0): # F-->F*
nuFp = kFp0 * F * Ss / (1. + Ss)
return nuFp
def nuF(I, Fs, kF0): # F*-->F //wird wohl michaelis Menten sein ?
nuF = kF0 * I * Fs
return nuF
def piF(RF, kFpi0): # mRF: F + mRF
piF = kFpi0 * RF
return piF
def nuFGp(FG, kFGp): # FG -> F + G
nuFGp = kFGp * FG
return nuFGp
def nuFG(G, F, kFG): # F +G -> FG //PROBLEM!!! WIE REAKTION (= how reactive) MIT 2 REAKTANTEN?
nuFG = kFG * G * F
return nuFG
def etaF(F, ketaF): # F-->
etaF = ketaF * F
return etaF
def nuFsG(G, Fs, kFsG): # F* + G -> F*G //PROBLEM!!! sehe oben (=see above)!?
nuFsG = kFsG * G * Fs
return nuFsG
def nuFsGp(FsG, kFsGp): # F*G->F* + G
nuFsGp = kFsGp * FsG
return nuFsGp
def nuFsp(FsG, I, kFsp): # F*G->FG
nuFsp = kFsp * FsG * I
return nuFsp
def nuFs(FG, kFs): # FG->F*G
nuFs = kFs * FG
return nuFs
def piRF(FsG, kpiRF): # F*G: mRF
piRF = kpiRF * FsG
return piRF
def piRHP(FsG, kpiRH): # F*G: mRHP
piRHP = kpiRH * FsG
return piRHP
def piRFAddConst(piRFconst):
return piRFconst
def piRHPAddConst(piRHPconst):
return piRHPconst
def piHP(RHP, kpiHP): # mRHP: HP + mRHP
piHP = kpiHP * RHP
return piHP
def etaHP(HP, ketaHP): # HP-->
etaHP = ketaHP * HP
return etaHP
def etaRF(RF, ketaRF): # mRF-->
etaRF = ketaRF * RF
return etaRF
def etaRHP(RHP, ketaRHP): # mRHP-->
etaRHP = ketaRHP * RHP
return etaRHP
# The following 4 reactions, not present in the original model by Alexander, can be added to the system
# to reproduce the results of the experiments performed in Schroda et al. 2000
def piRHP_ARS(FsG, kpiRH_ARS): # F*G: mRHP_ARS
piRHP_ARS = kpiRH_ARS * FsG
return piRHP_ARS
def piHP_ARS(RHP_ARS, kpiHP_ARS): # mRHP_ARS: HP_ARS + mRHP_ARS
piHP_ARS = kpiHP_ARS * RHP_ARS
return piHP_ARS
def etaHP_ARS(HP_ARS, ketaHP_ARS): # HP_ARS-->
etaHP_ARS = ketaHP_ARS * HP_ARS
return etaHP_ARS
def etaRHP_ARS(RHP_ARS, ketaRHP_ARS): # mRHP_ARS-->
etaRHP_ARS = ketaRHP_ARS * RHP_ARS
return etaRHP_ARS
| gpl-3.0 | 1,146,378,558,672,577,200 | 22.7375 | 120 | 0.575566 | false |
RichardTMR/homework | week1/class1_linear_regression.py | 1 | 3290 | import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, linear_model
def plot_line(x, y, y_hat,line_color='blue'):
# Plot outputs
plt.scatter(x, y, color='black')
plt.plot(x, y_hat, color=line_color,
linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
def linear_grad_func(theta, x, y):
# compute gradient
m = y.size
it = np.ones(shape=(m, 2))
it[:, 1] = x[:, 0]
prediction = it.dot(theta.transpose())
grad = ((prediction - y).transpose().dot(it)) / m * 1.0
return grad
def linear_val_func(theta, x):
# forwarding
return np.dot(np.c_[np.ones(x.shape[0]), x], theta.T)
def linear_cost_func(theta, x, y):
# compute cost (loss)
m = len(y)
it = np.ones(shape=(m, 2))
it[:, 1] = x[:, 0]
predictions = it.dot(theta.transpose())
sqerrors = (predictions - y) ** 2
cost = (1.0 / (2 * m)) * sqerrors.sum()
return cost
def linear_grad_desc(theta, X_train, Y_train, lr=0.1, max_iter=10000, converge_change=.001):
cost_iter = []
cost = linear_cost_func(theta, X_train, Y_train)
cost_iter.append([0, cost])
cost_change = 1
i = 1
while cost_change > converge_change and i < max_iter:
pre_cost = cost
# compute gradient
grad = linear_grad_func(theta, X_train, Y_train)
# Update gradient
theta = theta - lr * grad
cost = linear_cost_func(theta, X_train, Y_train)
cost_iter.append([i, cost])
cost_change = abs(cost - pre_cost)
i += 1
return theta, cost_iter
def linear_regression():
# load dataset
dataset = datasets.load_diabetes()
# Select only 2 dims
X = dataset.data[:, 2]
Y = dataset.target
# split dataset into training and testing
X_train = X[:-20, None]
X_test = X[-20:, None]
Y_train = Y[:-20, None]
Y_test = Y[-20:, None]
# Linear regression
theta = np.random.rand(1, X_train.shape[1]+1)
fitted_theta, cost_iter = linear_grad_desc(theta, X_train, Y_train, lr=0.1, max_iter=50000)
print('Coefficients: {}'.format(fitted_theta[0,-1]))
print('Intercept: {}'.format(fitted_theta[0,-2]))
print('MSE: {}'.format(np.sum((linear_val_func(fitted_theta, X_test) - Y_test)**2) / Y_test.shape[0]))
plot_line(X_test, Y_test, linear_val_func(fitted_theta, X_test))
def sklearn_linear_regression():
# load dataset
dataset = datasets.load_diabetes()
# Select only 2 dims
X = dataset.data[:, 2]
Y = dataset.target
# split dataset into training and testing
X_train = X[:-20, None]
X_test = X[-20:, None]
Y_train = Y[:-20, None]
Y_test = Y[-20:, None]
# Linear regression
regressor = linear_model.LinearRegression()
regressor.fit(X_train, Y_train)
print('Coefficients: {}'.format(regressor.coef_))
print('Intercept: {}'.format(regressor.intercept_))
print('MSE:{}'.format(np.mean((regressor.predict(X_test) - Y_test) ** 2)))
plot_line(X_test, Y_test, regressor.predict(X_test),line_color='red')
def main():
print('Class 1 Linear Regression Example')
linear_regression()
print ('')
print('sklearn Linear Regression Example')
sklearn_linear_regression()
if __name__ == "__main__":
main()
| apache-2.0 | -7,782,481,078,116,972,000 | 24.905512 | 106 | 0.59848 | false |
examachine/pisi | pisi/actionsapi/coreutils.py | 1 | 1995 | #-*- coding: utf-8 -*-
#
# Copyright (C) 2005, TUBITAK/UEKAE
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# Please read the COPYING file.
#
# Author: S. Caglar Onur
# Standard Python Modules
import re
import sys
from itertools import izip
from itertools import imap
from itertools import count
from itertools import ifilter
from itertools import ifilterfalse
# ActionsAPI
import pisi.actionsapi
def cat(filename):
return file(filename).xreadlines()
class grep:
'''keep only lines that match the regexp'''
def __init__(self, pat, flags = 0):
self.fun = re.compile(pat, flags).match
def __ror__(self, input):
return ifilter(self.fun, input)
class tr:
'''apply arbitrary transform to each sequence element'''
def __init__(self, transform):
self.tr = transform
def __ror__(self, input):
return imap(self.tr, input)
class printto:
'''print sequence elements one per line'''
def __init__(self, out = sys.stdout):
self.out = out
def __ror__(self,input):
for line in input:
print >> self.out, line
printlines = printto(sys.stdout)
class terminator:
def __init__(self,method):
self.process = method
def __ror__(self,input):
return self.process(input)
aslist = terminator(list)
asdict = terminator(dict)
astuple = terminator(tuple)
join = terminator(''.join)
enum = terminator(enumerate)
class sort:
def __ror__(self,input):
ll = list(input)
ll.sort()
return ll
sort = sort()
class uniq:
def __ror__(self,input):
for i in input:
try:
if i == prev:
continue
except NameError:
pass
prev = i
yield i
uniq = uniq()
| gpl-3.0 | 8,104,818,021,379,982,000 | 23.329268 | 79 | 0.622055 | false |
Technikradio/C3FOCSite | c3shop/test/tools.py | 1 | 1768 | from frontpage.models import Article, Profile, Media, ArticleMedia, MediaUpload, Post, Settings
from django.contrib.auth.models import User
# This function assumes that the create superuser command has already been run.
def make_testing_db():
m = Media()
m.headline = "Most ugly image"
m.lowResFile = "https://example.com/image.jpg"
m.highResFile = "https://example.com/image.jpg"
m.save()
print("media created")
u = Profile()
u.authuser = User.objects.all()[0]
u.active = True
u.dect = 5234
u.displayName = "Test Profile 01"
u.rights = 0
u.avatarMedia = m
u.notes = "<center>This is to test html insertion</center>"
u.save()
print("Profile created")
a = Article()
a.cachedText = "<h2>This is a dummy article due to testing purposes</h2>"
a.description = "Test article"
a.price = "$15.00"
a.quantity = 1000
a.size = "XXL"
a.type = 1
a.visible = True
a.addedByUser = u
a.save()
print("Article created")
am = ArticleMedia()
am.AID = a
am.MID = m
am.save()
print("Article media link created")
mu = MediaUpload()
mu.MID = m
mu.UID = u
mu.save()
print("Media user link created")
p = Post()
p.title = "Test post 01"
p.cacheText = "<p>this is a test post<br/>generated by tools.make_testing_db()</p>"
p.createdByUser = u
p.visibleLevel = 0
s = Settings()
s.changedByUser = u
s.property = '''[{
"type":"link",
"href":"example.com","text":"Visit example.com"
},{"type":"link","text":"Visit the top level website",
"href":".."}]'''
s.SName = 'frontpage.ui.navbar.content'
s.requiredLevel = 0
s.save()
print("NavBar setting created")
| bsd-3-clause | -6,947,669,652,041,555,000 | 25.787879 | 95 | 0.60181 | false |
userzimmermann/robotframework-python3 | src/robot/output/filelogger.py | 1 | 2129 | # Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from six import PY3
from robot.errors import DataError
from .loggerhelper import AbstractLogger
class FileLogger(AbstractLogger):
def __init__(self, path, level):
AbstractLogger.__init__(self, level)
self._writer = self._get_writer(path) # unit test hook
def _get_writer(self, path):
try:
return open(path, 'w')
except EnvironmentError as err:
raise DataError(err.strerror)
def message(self, msg):
if self._is_logged(msg.level) and not self._writer.closed:
entry = '%s | %s | %s\n' % (msg.timestamp, msg.level.ljust(5),
msg.message)
if PY3 and hasattr(self._writer, 'encoding'):
self._writer.write(entry)
else:
self._writer.write(entry.encode('UTF-8'))
def start_suite(self, suite):
self.info("Started test suite '%s'" % suite.name)
def end_suite(self, suite):
self.info("Ended test suite '%s'" % suite.name)
def start_test(self, test):
self.info("Started test case '%s'" % test.name)
def end_test(self, test):
self.info("Ended test case '%s'" % test.name)
def start_keyword(self, kw):
self.debug(lambda: "Started keyword '%s'" % kw.name)
def end_keyword(self, kw):
self.debug(lambda: "Ended keyword '%s'" % kw.name)
def output_file(self, name, path):
self.info('%s: %s' % (name, path))
def close(self):
self._writer.close()
| apache-2.0 | 8,191,555,460,039,876,000 | 31.753846 | 75 | 0.622828 | false |
google/dotty | efilter/protocols/iset.py | 1 | 2326 | # -*- coding: utf-8 -*-
# EFILTER Forensic Query Language
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EFILTER abstract type system."""
from efilter import dispatch
from efilter import protocol
from efilter.protocols import eq
# Declarations:
# pylint: disable=unused-argument
@dispatch.multimethod
def union(x, y):
raise NotImplementedError()
@dispatch.multimethod
def intersection(x, y):
raise NotImplementedError()
@dispatch.multimethod
def difference(x, y):
raise NotImplementedError()
@dispatch.multimethod
def issuperset(x, y):
raise NotImplementedError()
@dispatch.multimethod
def issubset(x, y):
return issuperset(y, x)
@dispatch.multimethod
def isstrictsuperset(x, y):
return issuperset(x, y) and eq.ne(x, y)
@dispatch.multimethod
def isstrictsubset(x, y):
return isstrictsuperset(y, x)
@dispatch.multimethod
def contains(s, e):
raise NotImplementedError()
class ISet(protocol.Protocol):
_required_functions = (union, intersection, difference, issuperset,
contains)
# Default implementations:
ISet.implement(
for_types=(set, frozenset),
implementations={
union: lambda x, y: x | frozenset(y),
intersection: lambda x, y: x & frozenset(y),
difference: lambda x, y: x - frozenset(y),
issuperset: lambda x, y: x >= frozenset(y),
contains: lambda s, e: e in s
}
)
ISet.implement(
for_types=(list, tuple),
implementations={
union: lambda x, y: frozenset(x) | frozenset(y),
intersection: lambda x, y: frozenset(x) & frozenset(y),
difference: lambda x, y: frozenset(x) - frozenset(y),
issuperset: lambda x, y: frozenset(x) >= frozenset(y),
contains: lambda s, e: e in s
}
)
| apache-2.0 | 4,413,265,449,356,274,000 | 22.734694 | 74 | 0.689166 | false |
kusinkay/plugin.offtictor | torhelper.py | 1 | 4925 | from resources.lib import tor
class TorFeeds:
def __init__(self):
self.feeds = []
self.time = 0
def add_feeds(self, feeds):
'''
Add an array of feeds
'''
self.feeds = feeds
def add_feed(self, feed):
torFeed = TorFeed(feed)
self.feeds.append(torFeed)
def get_feeds(self):
result = []
for tFeed in self.feeds:
result.append(tFeed.feed)
return result
def serialize(self):
feeds = []
for feed in self.feeds:
feeds.append( feed.serialize())
output = {
"time": self.time,
"feeds" : feeds
}
return repr(output)
def unserialize(self, object):
self.time = object["time"]
for oFeed in object["feeds"]:
feed = TorFeed(None)
feed.unserialize(oFeed)
self.add_feed(feed.feed)
def __getstate__(self):
d = dict(self.__dict__)
del d['feeds']
return d
def __setstate__(self, d):
self.__dict__.update(d)
class TorFeed:
def __init__(self, feed):
self.feed = tor.Subscriptions(None)
self.feed = feed
def serialize(self):
output = {
"feed":{
"id": self.feed.id,
"matches": self.feed.matches,
"title": self.feed.title,
"iconUrl": self.feed.iconUrl,
"firstitemmsec": self.feed.firstitemmsec,
"unread_count": self.feed.unread_count
}
}
return output
def unserialize(self, object):
self.feed = tor.Subscriptions(None)
self.feed.id = object["feed"]["id"]
self.feed.matches = object["feed"]["matches"]
self.feed.title = object["feed"]["title"]
self.feed.iconUrl = object["feed"]["iconUrl"]
self.feed.firstitemmsec = object["feed"]["firstitemmsec"]
self.feed.unread_count = object["feed"]["unread_count"]
def __getstate__(self):
d = dict(self.__dict__)
#del d['_logger']
return d
def __setstate__(self, d):
self.__dict__.update(d)
class TorList:
'''
Helps to save a feed to cache
'''
def __init__(self):
self.posts = []
self.time = 0
def count(self):
return len(self.posts)
def add_post(self, post):
self.posts.append(post)
self.posts = sorted(self.posts, key=lambda TorPost: TorPost.item.published, reverse=True)
def get_post_list(self):
'''
result = []
for post in self.posts:
result.append(post.item.title)
return result
'''
return self.posts
def serialize(self):
posts = []
for post in self.posts:
posts.append( post.serialize())
output = {
"time": self.time,
"posts" : posts
}
return repr(output)
def unserialize(self, object):
self.time = object["time"]
for oPost in object["posts"]:
post = TorPost(None)
post.unserialize(oPost)
self.add_post(post)
def __getstate__(self):
d = dict(self.__dict__)
del d['posts']
return d
def __setstate__(self, d):
self.__dict__.update(d)
class TorPost:
def __init__(self, item):
self.item = item
def __getstate__(self):
d = dict(self.__dict__)
#del d['_logger']
return d
def __setstate__(self, d):
self.__dict__.update(d)
def serialize(self):
output = {
"item":{
"item_id": self.item.item_id,
"title" : self.item.title,
"content" : self.item.content,
"href" : self.item.href,
"mediaUrl" : self.item.mediaUrl,
"source" : self.item.source,
"source_id" : self.item.source_id,
"published": self.item.published,
#"video": self.item.video
}
}
return output
def unserialize(self, object):
self.item = tor.Item(None, object["item"]["item_id"])
self.item.title = object["item"]["title"]
self.item.content = object["item"]["content"]
self.item.href = object["item"]["href"]
self.item.mediaUrl = object["item"]["mediaUrl"]
self.item.source = object["item"]["source"]
self.item.source_id = object["item"]["source_id"]
self.item.published = object["item"]["published"]
#self.item.video = object["item"]["video"]
| gpl-2.0 | -4,851,109,222,728,168,000 | 26.519553 | 97 | 0.478376 | false |
SkyTruth/skytruth-automation-hub | gae/pubsub.py | 1 | 5643 | """
Publish/Subscribe tool
@author Paul Woods <[email protected]>
"""
import webapp2
from google.appengine.ext import db
from google.appengine.api import taskqueue
import json
import urllib2
import os
from taskqueue import TaskQueue
class Subscription (db.Model):
event = db.StringProperty()
url = db.StringProperty()
created = db.DateTimeProperty(auto_now_add=True)
data = db.StringProperty() #this contains arbitrary content encoded as json
class PubSub ():
@staticmethod
def publish (event, pub_data = None):
"""Publish an event. This will trigger all subscriptions with a matching event
Args:
event name of the event
pub_data optional dict of params to be passed to the subscriber
Returns:
the number of subscriptions that were triggered
"""
#Get all subscriptions with a matching event
count = 0
q = db.GqlQuery("SELECT __key__ FROM Subscription WHERE event = :1", event)
for key in q.run():
# add a push-queue entry for each notification
taskqueue.add(url='/pubsub/notify', params={'key':str(key), 'event': event, 'pub_data': json.dumps(pub_data)})
count = count + 1
#return the number of subscriptions triggered
return count
@staticmethod
def subscribe (event, url, sub_data = None):
"""Subscribe to an event.
Args:
event name of the event to subscribe to
url url to receive a POST when the specified event is published
sub_data optional dict of params to be passed to the subscriber. This can be used to contain
a 'secret' key that will identify the post as coming from this source
Returns:
a subscription id
"""
sub = Subscription(event=event, url=url, data=json.dumps(sub_data))
return str(sub.put())
@staticmethod
def unsubscribe (key):
""" Remove an existing subscription.
Args:
key A subscrption key previously returned by a call to subscribe
Returns:
True if the subscription was removed, False if it was not found
"""
sub = Subscription.get(db.Key(encoded=key))
if sub:
sub.delete()
return sub is not None
@staticmethod
def notify (key, event, pub_data):
"""Send notification to the specified subscription.
"""
sub = Subscription.get(db.Key(encoded=key))
if not sub:
return None
data = {
'key' : key,
'event': event,
'pub_data': pub_data,
'sub_data': json.loads(sub.data)
}
if sub.url.startswith ('/'):
#Handle local urls through the task queue
taskqueue.add(
url=sub.url,
headers = {'Content-Type':'application/json'},
payload=json.dumps(data))
else:
#for external urls use urllib2
req = urllib2.Request(sub.url)
req.add_header('Content-Type', 'application/json')
response = urllib2.urlopen(req, json.dumps(data))
#handler for notify - called once for each subscription that is triggered by a published event
class NotifyHandler(webapp2.RequestHandler):
"""Handler for pubsub notifications.
This gets called from the taskqueue by tasks added by PubSub.publish()
"""
def post(self):
self.response.headers.add_header('Content-Type', 'application/json')
r = {'status': 'OK'}
try:
PubSub.notify(
key = self.request.get('key'),
event = self.request.get('event'),
pub_data = json.loads(self.request.get('pub_data')))
except Exception, e:
r['status'] = 'ERR'
r['message'] = str(e)
self.response.write(json.dumps( r ))
#creates a new task in the task queue
class TaskHandler(webapp2.RequestHandler):
"""This handler acts as a subscribe target and creates a new task in the task queue
subdata.channel specifies the task queue channel
subdata.taskname specifies the name to be assigned to the task
OR
subdata.pubname specifies the field name in pub data to use for the task name
"""
def post(self):
self.response.headers.add_header('Content-Type', 'application/json')
r = {'status': 'OK'}
try:
data = json.loads(self.request.body)
channel = data['sub_data']['channel']
name = data['sub_data'].get('taskname')
if not name:
name = data['pub_data'].get(data['sub_data'].get('pubname'))
r['id'] = TaskQueue.add (channel, name, data)
except Exception, e:
r['status'] = 'ERR'
r['message'] = str(e)
self.response.write(json.dumps( r ))
class TestHandler(webapp2.RequestHandler):
"""This handler expects a json POST, and it returns same json it receives. Used for testing."""
def post(self):
self.response.headers.add_header('Content-Type', self.request.headers['Content-Type'])
self.response.write (self.request.body)
app = webapp2.WSGIApplication([
('/pubsub/notify', NotifyHandler),
('/pubsub/test', TestHandler),
('/pubsub/task', TaskHandler)
],debug=True)
| mit | -7,474,916,540,044,112,000 | 35.173077 | 122 | 0.577352 | false |
eroicaleo/LearningPython | ch20/GeneratorFunctionsandExpressions.py | 1 | 2789 | #!/usr/bin/python3.3
def gensquares(N):
for i in range(N):
yield i ** 2
for i in gensquares(5):
print(i, end=":")
print()
x = gensquares(4)
print(x)
print(next(x))
print(next(x))
print(next(x))
print(next(x))
# throw exception: StopIteration
# print(next(x))
def ups(line):
for x in line.split(','):
yield x.upper()
print(tuple(ups('aaa,bbb,ccc')))
print({i: s for (i, s) in enumerate(ups('aaa,bbb,ccc'))})
G = (x**2 for x in range(4))
print(next(G))
print(next(G))
print(next(G))
print(next(G))
# throw exception: StopIteration
# print(next(G))
# Generator expression
for x in (x**2 for x in range(4)):
print('%s, %s' % (x, x / 2.0))
s = ''.join(x.upper() for x in 'aaa,bbb,ccc'.split(','))
print(s)
a, b, c = (x + '\n' for x in 'aaa,bbb,ccc'.split(','))
print(a, c)
print(sum(x**2 for x in range(4)))
print(sorted(x**2 for x in range(4)))
print(sorted((x**2 for x in range(4)), reverse=True))
print(list(map(abs, [-1, -2, 3, 4])))
print(list(abs(x) for x in [-1, -2, 3, 4]))
print(list(map(lambda x: x * 2, (1, 2, 3, 4))))
print(list(x * 2 for x in (1, 2, 3, 4)))
# Compare between generator expression, list comprehension and map
line = "aaa,bbb,ccc"
print(''.join([x.upper() for x in line.split(',')]))
print(''.join(x.upper() for x in line.split(',')))
print(''.join(map(str.upper, line.split(','))))
print(''.join(x*2 for x in line.split(',')))
print(''.join(map(lambda x: x * 2, line.split(','))))
# filter v.s. generator expression
line = "aa bbb c"
print(''.join(x for x in line.split(' ') if len(x) > 1))
print(''.join(filter(lambda x: len(x) > 1, line.split(' '))))
print(''.join(x.upper() for x in line.split(' ') if len(x) > 1))
print(''.join(map(lambda x: x.upper(), filter(lambda x: len(x) > 1, line.split(' ')))))
res = ''
for x in line.split():
if len(x) > 1:
res += x.upper()
print(res)
# Generator functions Versus Generator expressions
G = (c * 4 for c in 'SPAM')
print(list(G))
def fourtimes(s):
for c in s:
yield c * 4
G = fourtimes('spam')
print(list(G))
G = (c * 4 for c in 'SPAM')
I = iter(G)
print(next(I))
print(next(I))
G = fourtimes('spam')
I = iter(G)
print(next(I))
print(next(I))
print(iter(G) is G)
G = (c * 4 for c in 'SPAM')
I = iter(G)
print(next(I))
print(next(I))
I2 = iter(G)
print(next(I2))
def both(N):
yield from range(N)
yield from (x ** 2 for x in range(N))
print(list(both(5)))
print(" : ".join(str(i) for i in both(5)))
# Generators in Build-in types, tools and classes
import os
for (root, subs, files) in os.walk('.'):
for name in files:
print(name)
def f(a, b, c): print("%s, %s, %s" % (a, b, c))
f(1, 2, 3)
f(*range(3))
f(*(i for i in range(2)), c=5)
D = {'a': 'Bob', 'b': 'dev', 'c': 40.5}
print(D)
f(**D)
f(*D)
f(*D.values())
| mit | 1,515,841,372,151,251,500 | 19.507353 | 87 | 0.580495 | false |
evancich/apm_motor | modules/waf/waflib/extras/gccdeps.py | 1 | 5955 | #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2008-2010 (ita)
"""
Execute the tasks with gcc -MD, read the dependencies from the .d file
and prepare the dependency calculation for the next run.
Usage:
def configure(conf):
conf.load('gccdeps')
"""
import os, re, threading
from waflib import Task, Logs, Utils, Errors
from waflib.Tools import c_preproc
from waflib.TaskGen import before_method, feature
lock = threading.Lock()
gccdeps_flags = ['-MD']
if not c_preproc.go_absolute:
gccdeps_flags = ['-MMD']
# Third-party tools are allowed to add extra names in here with append()
supported_compilers = ['gcc', 'icc', 'clang']
def scan(self):
if not self.__class__.__name__ in self.env.ENABLE_GCCDEPS:
if not self.env.GCCDEPS:
self.generator.bld.fatal('Load gccdeps in configure!')
return self.no_gccdeps_scan()
nodes = self.generator.bld.node_deps.get(self.uid(), [])
names = []
return (nodes, names)
re_o = re.compile("\.o$")
re_splitter = re.compile(r'(?<!\\)\s+') # split by space, except when spaces are escaped
def remove_makefile_rule_lhs(line):
# Splitting on a plain colon would accidentally match inside a
# Windows absolute-path filename, so we must search for a colon
# followed by whitespace to find the divider between LHS and RHS
# of the Makefile rule.
rulesep = ': '
sep_idx = line.find(rulesep)
if sep_idx >= 0:
return line[sep_idx + 2:]
else:
return line
def path_to_node(base_node, path, cached_nodes):
# Take the base node and the path and return a node
# Results are cached because searching the node tree is expensive
# The following code is executed by threads, it is not safe, so a lock is needed...
if getattr(path, '__hash__'):
node_lookup_key = (base_node, path)
else:
# Not hashable, assume it is a list and join into a string
node_lookup_key = (base_node, os.path.sep.join(path))
try:
lock.acquire()
node = cached_nodes[node_lookup_key]
except KeyError:
node = base_node.find_resource(path)
cached_nodes[node_lookup_key] = node
finally:
lock.release()
return node
def post_run(self):
# The following code is executed by threads, it is not safe, so a lock is needed...
if not self.__class__.__name__ in self.env.ENABLE_GCCDEPS:
return self.no_gccdeps_post_run()
if getattr(self, 'cached', None):
return Task.Task.post_run(self)
name = self.outputs[0].abspath()
name = re_o.sub('.d', name)
txt = Utils.readf(name)
#os.remove(name)
# Compilers have the choice to either output the file's dependencies
# as one large Makefile rule:
#
# /path/to/file.o: /path/to/dep1.h \
# /path/to/dep2.h \
# /path/to/dep3.h \
# ...
#
# or as many individual rules:
#
# /path/to/file.o: /path/to/dep1.h
# /path/to/file.o: /path/to/dep2.h
# /path/to/file.o: /path/to/dep3.h
# ...
#
# So the first step is to sanitize the input by stripping out the left-
# hand side of all these lines. After that, whatever remains are the
# implicit dependencies of task.outputs[0]
txt = '\n'.join([remove_makefile_rule_lhs(line) for line in txt.splitlines()])
# Now join all the lines together
txt = txt.replace('\\\n', '')
val = txt.strip()
val = [x.replace('\\ ', ' ') for x in re_splitter.split(val) if x]
nodes = []
bld = self.generator.bld
# Dynamically bind to the cache
try:
cached_nodes = bld.cached_nodes
except AttributeError:
cached_nodes = bld.cached_nodes = {}
for x in val:
node = None
if os.path.isabs(x):
node = path_to_node(bld.root, x, cached_nodes)
else:
path = bld.bldnode
# when calling find_resource, make sure the path does not contain '..'
x = [k for k in Utils.split_path(x) if k and k != '.']
while '..' in x:
idx = x.index('..')
if idx == 0:
x = x[1:]
path = path.parent
else:
del x[idx]
del x[idx-1]
node = path_to_node(path, x, cached_nodes)
if not node:
raise ValueError('could not find %r for %r' % (x, self))
if id(node) == id(self.inputs[0]):
# ignore the source file, it is already in the dependencies
# this way, successful config tests may be retrieved from the cache
continue
nodes.append(node)
Logs.debug('deps: gccdeps for %s returned %s' % (str(self), str(nodes)))
bld.node_deps[self.uid()] = nodes
bld.raw_deps[self.uid()] = []
try:
del self.cache_sig
except:
pass
Task.Task.post_run(self)
def sig_implicit_deps(self):
if not self.__class__.__name__ in self.env.ENABLE_GCCDEPS:
return self.no_gccdeps_sig_implicit_deps()
try:
return Task.Task.sig_implicit_deps(self)
except Errors.WafError:
return Utils.SIG_NIL
for name in 'c cxx'.split():
try:
cls = Task.classes[name]
except KeyError:
pass
else:
cls.no_gccdeps_scan = cls.scan
cls.no_gccdeps_post_run = cls.post_run
cls.no_gccdeps_sig_implicit_deps = cls.sig_implicit_deps
cls.scan = scan
cls.post_run = post_run
cls.sig_implicit_deps = sig_implicit_deps
@before_method('process_source')
@feature('force_gccdeps')
def force_gccdeps(self):
self.env.ENABLE_GCCDEPS = ['c', 'cxx']
def configure(conf):
# record that the configuration was executed properly
conf.env.GCCDEPS = True
global gccdeps_flags
flags = conf.env.GCCDEPS_FLAGS or gccdeps_flags
if conf.env.CC_NAME in supported_compilers:
try:
conf.check(fragment='int main() { return 0; }', features='c force_gccdeps', cflags=flags, msg='Checking for c flags %r' % ''.join(flags))
except Errors.ConfigurationError:
pass
else:
conf.env.append_value('CFLAGS', gccdeps_flags)
conf.env.append_unique('ENABLE_GCCDEPS', 'c')
if conf.env.CXX_NAME in supported_compilers:
try:
conf.check(fragment='int main() { return 0; }', features='cxx force_gccdeps', cxxflags=flags, msg='Checking for cxx flags %r' % ''.join(flags))
except Errors.ConfigurationError:
pass
else:
conf.env.append_value('CXXFLAGS', gccdeps_flags)
conf.env.append_unique('ENABLE_GCCDEPS', 'cxx')
| gpl-3.0 | -6,775,665,167,069,047,000 | 27.222749 | 146 | 0.673048 | false |
bmazin/SDR | DataReadout/ReadoutControls/lib/LabJackPython-8-26-2011/src/u12.py | 2 | 114714 | """
Name: u12.py
Desc: Defines the U12 class, which makes working with a U12 much easier. The
functions of the U12 class are divided into two categories: UW and
low-level.
Most of the UW functions are exposed as functions of the U12 class. With
the exception of the "e" functions, UW functions are Windows only. The "e"
functions will work with both the UW and the Exodriver. Therefore, people
wishing to write cross-platform code should restrict themselves to using
only the "e" functions. The UW functions are described in Section 4 of the
U12 User's Guide:
http://labjack.com/support/u12/users-guide/4
All low-level functions of the U12 class begin with the word
raw. For example, the low-level function Counter can be called with
U12.rawCounter(). Currently, low-level functions are limited to the
Exodriver (Linux and Mac OS X). You can find descriptions of the low-level
functions in Section 5 of the U12 User's Guide:
http://labjack.com/support/u12/users-guide/5
"""
import platform
import ctypes
import os, atexit
import math
from time import time
import struct
WINDOWS = "Windows"
ON_WINDOWS = (os.name == 'nt')
class U12Exception(Exception):
"""Custom Exception meant for dealing specifically with U12 Exceptions.
Error codes are either going to be a LabJackUD error code or a -1. The -1 implies
a python wrapper specific error.
def __init__(self, ec = 0, errorString = ''):
self.errorCode = ec
self.errorString = errorString
if not self.errorString:
#try:
self.errorString = getErrorString(ec)
#except:
# self.errorString = str(self.errorCode)
def __str__(self):
return self.errorString
"""
pass
class BitField(object):
"""
Provides a method for working with bit fields.
>>> bf = BitField()
>>> print bf
[ bit7 = 0, bit6 = 0, bit5 = 0, bit4 = 0, bit3 = 0, bit2 = 0, bit1 = 0, bit0 = 0 ]
You can use attribute accessing for easy bit flipping:
>>> bf.bit4 = 1
>>> bf.bit7 = 1
>>> print bf
[ bit7 = 1, bit6 = 0, bit5 = 0, bit4 = 1, bit3 = 0, bit2 = 0, bit1 = 0, bit0 = 0 ]
You can also use list-style accessing. Counting starts on the left:
>>> print bf[0] # List index 0 is bit7
1
>>> print bf[3] # List index 3 is bit4
1
List-style slicing:
>>> print bf[3:]
[1, 0, 0, 0, 0]
List-style setting bits works as you would expect:
>>> bf[1] = 1
>>> print bf
[ bit7 = 1, bit6 = 1, bit5 = 0, bit4 = 1, bit3 = 0, bit2 = 0, bit1 = 0, bit0 = 0 ]
It provides methods for going to and from bytes:
>>> bf = BitField(123)
>>> print bf
[ bit7 = 0, bit6 = 1, bit5 = 1, bit4 = 1, bit3 = 1, bit2 = 0, bit1 = 1, bit0 = 1 ]
>>> bf = BitField()
>>> bf.fromByte(123) # Modifies bf in place
>>> print bf
[ bit7 = 0, bit6 = 1, bit5 = 1, bit4 = 1, bit3 = 1, bit2 = 0, bit1 = 1, bit0 = 1 ]
>>> bf.bit4 = 0
>>> print bf.asByte()
107
You can iterate of the raw bits ( 1 and 0 Vs. '1' and '0') easily:
>>> for i in bf:
... print i
0
1
1
0
1
0
1
1
You can also iterate over the labels and their data values using items():
>>> for label, data in bf.items():
... print label, data
bit7 0
bit6 1
bit5 1
bit4 0
bit3 1
bit2 0
bit1 1
bit0 1
As an added bonus, it can also be cast as an int or hex:
>>> int(bf)
107
>>> hex(bf)
'0x6b'
See the description of the __init__ method for setting the label parameters. """
def __init__(self, rawByte = None, labelPrefix = "bit", labelList = None, zeroLabel = "0", oneLabel = "1"):
"""
Name: BitField.__init__(rawByte = None, labelPrefix = "bit",
labelList = None, zeroLabel = "0",
oneLabel = "1")
Args: rawByte, a value to set the bit field values to.
labelPrefix, what should go before the labels in labelList
labelList, a list of labels to apply to each bit. If None, it
gets set to range(7,-1,-1).
zeroLabel, bits with a value of 0 will have this label
oneLabel, bits with a value of 1 will have this label
Desc: Creates a new bitfield and sets up the labels.
With out any arguments, you get a bit field that looks like this:
>>> bf = BitField()
>>> print bf
[ bit7 = 0, bit6 = 0, bit5 = 0, bit4 = 0, bit3 = 0, bit2 = 0, bit1 = 0,
bit0 = 0 ]
To make the labels, it iterates over all the labelList and adds the
labelPrefix to them. If you have less than 8 labels, then your bit field
will only work up to that many bits.
To make a BitField with labels for FIO0-7 you can do the following:
>>> bf = BitField(labelPrefix = "FIO")
>>> print bf
[ FIO7 = 0, FIO6 = 0, FIO5 = 0, FIO4 = 0, FIO3 = 0, FIO2 = 0, FIO1 = 0,
FIO0 = 0 ]
The labels don't have to be numbers, for example:
>>> names = [ "Goodreau", "Jerri", "Selena", "Allan", "Tania",
"Kathrine", "Jessie", "Zelma" ]
>>> bf = BitField( labelPrefix = "", labelList = names)
>>> print bf
[ Goodreau = 0, Jerri = 0, Selena = 0, Allan = 0, Tania = 0,
Kathrine = 0, Jessie = 0, Zelma = 0 ]
You can change the display value of zero and one to be whatever you
want. For example, if you have a BitField that represents FIO0-7
directions:
>>> dirs = BitField(rawByte = 5, labelPrefix = "FIO",
zeroLabel = "Output", oneLabel = "Input")
>>> print dirs
[ FIO7 = Output, FIO6 = Output, FIO5 = Output, FIO4 = Output,
FIO3 = Output, FIO2 = Input, FIO1 = Output, FIO0 = Input ]
Note, that when you access the value, you will get 1 or 0, not "Input"
or "Output. For example:
>>> print dirs.FIO3
0
"""
# Do labels first, so that self.something = something works.
self.__dict__['labels'] = []
self.labelPrefix = labelPrefix
if labelList is None:
self.labelList = range(8)
else:
self.labelList = list(reversed(labelList))
self.zeroLabel = zeroLabel
self.oneLabel = oneLabel
self.rawValue = 0
self.rawBits = [ 0 ] * 8
self.data = [ self.zeroLabel ] * 8
items = min(8, len(self.labelList))
for i in reversed(range(items)):
self.labels.append("%s%s" % (self.labelPrefix, self.labelList[i]))
if rawByte is not None:
self.fromByte(rawByte)
def fromByte(self, raw):
"""
Name: BitField.fromByte(raw)
Args: raw, the raw byte to make the BitField.
Desc: Takes a byte, and modifies self to match.
>>> bf = BitField()
>>> bf.fromByte(123) # Modifies bf in place
>>> print bf
[ bit7 = 0, bit6 = 1, bit5 = 1, bit4 = 1, bit3 = 1, bit2 = 0, bit1 = 1,
bit0 = 1 ]
"""
self.rawValue = raw
self.rawBits = []
self.data = []
items = min(8, len(self.labelList))
for i in reversed(range(items)):
self.rawBits.append( ((raw >> (i)) & 1) )
self.data.append(self.oneLabel if bool(((raw >> (i)) & 1)) else self.zeroLabel)
def asByte(self):
"""
Name: BitField.asByte()
Args: None
Desc: Returns the value of the bitfield as a byte.
>>> bf = BitField()
>>> bf.fromByte(123) # Modifies bf in place
>>> bf.bit4 = 0
>>> print bf.asByte()
107
"""
byteVal = 0
for i, v in enumerate(reversed(self.rawBits)):
byteVal += ( 1 << i ) * v
return byteVal
def asBin(self):
result = "0b"
for i in self.rawBits:
result += "%s" % i
return result
def __len__(self):
return len(self.data)
def __repr__(self):
result = "["
for i in range(len(self.data)):
result += " %s = %s (%s)," % (self.labels[i], self.data[i], self.rawBits[i])
result = result.rstrip(',')
result += " ]"
return "<BitField object: %s >" % result
def __str__(self):
result = "["
for i in range(len(self.data)):
result += " %s = %s," % (self.labels[i], self.data[i])
result = result.rstrip(',')
result += " ]"
return result
def __getattr__(self, label):
try:
i = self.labels.index(label)
return self.rawBits[i]
except ValueError:
raise AttributeError(label)
def __setattr__(self, label, value):
try:
i = self.labels.index(label)
self.rawBits[i] = int(bool(value))
self.data[i] = self.oneLabel if bool(value) else self.zeroLabel
except ValueError:
self.__dict__[label] = value
def __getitem__(self, key):
return self.rawBits[key]
def __setitem__(self, key, value):
self.rawBits[key] = int(bool(value))
self.data[key] = self.oneLabel if bool(value) else self.zeroLabel
def __iter__(self):
return iter(self.rawBits)
def items(self):
"""
Name: BitField.items()
Args: None
Desc: Returns a list of tuples where the first item is the label and the
second is the string value, like "High" or "Input"
>>> dirs = BitField(rawByte = 5, labelPrefix = "FIO",
zeroLabel = "Output", oneLabel = "Input")
>>> print dirs
[ FIO7 = Output, FIO6 = Output, FIO5 = Output, FIO4 = Output,
FIO3 = Output, FIO2 = Input, FIO1 = Output, FIO0 = Input ]
>>> for label, data in dirs.items():
... print label, data
...
FIO7 Output
FIO6 Output
FIO5 Output
FIO4 Output
FIO3 Output
FIO2 Input
FIO1 Output
FIO0 Input
"""
return zip(self.labels, self.data)
def __int__(self):
return self.asByte()
def __hex__(self):
return hex(self.asByte())
def __add__(self, other):
"""
A helper to prevent having to test if a variable is a bitfield or int.
"""
return other + self.asByte()
def errcheck(ret, func, args):
if ret == -1:
try:
ec = ctypes.get_errno()
raise U12Exception("Exodriver returned error number %s" % ec)
except AttributeError:
raise U12Exception("Exodriver returned an error, but LabJackPython is unable to read the error code. Upgrade to Python 2.6 for this functionality.")
else:
return ret
def _loadLinuxSo():
try:
l = ctypes.CDLL("liblabjackusb.so", use_errno=True)
except TypeError:
l = ctypes.CDLL("liblabjackusb.so")
l.LJUSB_Stream.errcheck = errcheck
l.LJUSB_Read.errcheck = errcheck
return l
def _loadMacDylib():
try:
l = ctypes.CDLL("liblabjackusb.dylib", use_errno=True)
except TypeError:
l = ctypes.CDLL("liblabjackusb.dylib")
l.LJUSB_Stream.errcheck = errcheck
l.LJUSB_Read.errcheck = errcheck
return l
staticLib = None
if os.name == 'posix':
try:
staticLib = _loadLinuxSo()
except OSError, e:
pass # We may be on Mac.
except Exception, e:
raise U12Exception("Could not load the Linux SO for some reason other than it not being installed. Ethernet connectivity only.\n\n The error was: %s" % e)
try:
if staticLib is None:
staticLib = _loadMacDylib()
except OSError, e:
raise U12Exception("Could not load the Exodriver driver. Ethernet connectivity only.\n\nCheck that the Exodriver is installed, and the permissions are set correctly.\nThe error message was: %s" % e)
except Exception, e:
raise U12Exception("Could not load the Mac Dylib for some reason other than it not being installed. Ethernet connectivity only.\n\n The error was: %s" % e)
else:
try:
staticLib = ctypes.windll.LoadLibrary("ljackuw")
except:
raise Exception, "Could not load LabJack UW driver."
class U12(object):
"""
U12 Class for all U12 specific commands.
u12 = U12()
"""
def __init__(self, id = -1, serialNumber = None, debug = False):
self.id = id
self.serialNumber = serialNumber
self.deviceName = "U12"
self.streaming = False
self.handle = None
self.debug = debug
self._autoCloseSetup = False
if not ON_WINDOWS:
# Save some variables to save state.
self.pwmAVoltage = 0
self.pwmBVoltage = 0
self.open(id, serialNumber)
def open(self, id = -1, serialNumber = None):
"""
Opens the U12.
The Windows UW driver opens the device every time a function is called.
The Exodriver, however, works like the UD family of devices and returns
a handle. On Windows, this method does nothing. On Mac OS X and Linux,
this method acquires a device handle and saves it to the U12 object.
"""
if ON_WINDOWS:
pass
else:
if self.debug: print "open called"
devType = ctypes.c_ulong(1)
openDev = staticLib.LJUSB_OpenDevice
openDev.restype = ctypes.c_void_p
if serialNumber is not None:
numDevices = staticLib.LJUSB_GetDevCount(devType)
for i in range(numDevices):
handle = openDev(i+1, 0, devType)
if handle != 0 and handle is not None:
self.handle = ctypes.c_void_p(handle)
try:
serial = self.rawReadSerial()
except Exception:
serial = self.rawReadSerial()
if serial == int(serialNumber):
break
else:
self.close()
if self.handle is None:
raise U12Exception("Couldn't find a U12 with a serial number matching %s" % serialNumber)
elif id != -1:
numDevices = staticLib.LJUSB_GetDevCount(devType)
for i in range(numDevices):
handle = openDev(i+1, 0, devType)
if handle != 0 and handle is not None:
self.handle = ctypes.c_void_p(handle)
try:
unitId = self.rawReadLocalId()
except Exception:
unitId = self.rawReadLocalId()
if unitId == int(id):
break
else:
self.close()
if self.handle is None:
raise U12Exception("Couldn't find a U12 with a local ID matching %s" % id)
elif id == -1:
handle = openDev(1, 0, devType)
if handle == 0 or handle is None:
raise Exception("Couldn't open a U12. Check that one is connected and try again.")
else:
self.handle = ctypes.c_void_p(handle)
# U12 ignores first command, so let's write a command.
command = [ 0 ] * 8
command[5] = 0x57 # 0b01010111
try:
self.write(command)
self.read()
except:
pass
self.id = self.rawReadLocalId()
else:
raise Exception("Invalid combination of parameters.")
if not self._autoCloseSetup:
# Only need to register auto-close once per device.
atexit.register(self.close)
self._autoCloseSetup = True
def close(self):
if ON_WINDOWS:
pass
else:
staticLib.LJUSB_CloseDevice(self.handle)
self.handle = None
def write(self, writeBuffer):
if ON_WINDOWS:
pass
else:
if self.handle is None:
raise U12Exception("The U12's handle is None. Please open a U12 with open()")
if self.debug: print "Writing:", hexWithoutQuotes(writeBuffer)
newA = (ctypes.c_byte*len(writeBuffer))(0)
for i in range(len(writeBuffer)):
newA[i] = ctypes.c_byte(writeBuffer[i])
writeBytes = staticLib.LJUSB_Write(self.handle, ctypes.byref(newA), len(writeBuffer))
if(writeBytes != len(writeBuffer)):
raise U12Exception( "Could only write %s of %s bytes." % (writeBytes, len(writeBuffer) ) )
return writeBuffer
def read(self, numBytes = 8):
if ON_WINDOWS:
pass
else:
if self.handle is None:
raise U12Exception("The U12's handle is None. Please open a U12 with open()")
newA = (ctypes.c_byte*numBytes)()
readBytes = staticLib.LJUSB_Read(self.handle, ctypes.byref(newA), numBytes)
# return a list of integers in command/response mode
result = [(newA[i] & 0xff) for i in range(readBytes)]
if self.debug: print "Received:", hexWithoutQuotes(result)
return result
# Low-level helpers
def rawReadSerial(self):
"""
Name: U12.rawReadSerial()
Args: None
Desc: Reads the serial number from internal memory.
Returns: The U12's serial number as an integer.
Example:
>>> import u12
>>> d = u12.U12()
>>> print d.rawReadSerial()
10004XXXX
"""
results = self.rawReadRAM()
return struct.unpack(">I", struct.pack("BBBB", results['DataByte3'], results['DataByte2'], results['DataByte1'], results['DataByte0']))[0]
def rawReadLocalId(self):
"""
Name: U12.rawReadLocalId()
Args: None
Desc: Reads the Local ID from internal memory.
Returns: The U12's Local ID as an integer.
Example:
>>> import u12
>>> d = u12.U12()
>>> print d.rawReadLocalId()
0
"""
results = self.rawReadRAM(0x08)
return results['DataByte0']
# Begin Section 5 Functions
def rawAISample(self, channel0PGAMUX = 8, channel1PGAMUX = 9, channel2PGAMUX = 10, channel3PGAMUX = 11, UpdateIO = False, LEDState = True, IO3toIO0States = 0, EchoValue = 0):
"""
Name: U12.rawAISample(channel0PGAMUX = 8, channel1PGAMUX = 9,
channel2PGAMUX = 10, channel3PGAMUX = 11,
UpdateIO = False, LEDState = True,
IO3toIO0States = 0, EchoValue = 0)
Args: channel0PGAMUX, A byte that contains channel0 information
channel1PGAMUX, A byte that contains channel1 information
channel2PGAMUX, A byte that contains channel2 information
channel3PGAMUX, A byte that contains channel3 information
IO3toIO0States, A byte that represents the states of IO0 to IO3
UpdateIO, If true, set IO0 to IO 3 to match IO3toIO0States
LEDState, Turns the status LED on or off.
EchoValue, Sometimes, you want what you put in.
Desc: Collects readings from 4 analog inputs. It can also toggle the
status LED and update the state of the IOs. See Section 5.1 of
the User's Guide.
By default it will read AI0-3 (single-ended).
Returns: A dictionary with the following keys:
PGAOvervoltage, A bool representing if the U12 detected overvoltage
IO3toIO0States, a BitField representing the state of IO0 to IO3
Channel0-3, the analog voltage for the channel
EchoValue, a repeat of the value passed in.
Example:
>>> import u12
>>> d = u12.U12()
>>> d.rawAISample()
{
'IO3toIO0States':
<BitField object: [ IO3 = Low (0), IO2 = Low (0),
IO1 = Low (0), IO0 = Low (0) ] >,
'Channel0': 1.46484375,
'Channel1': 1.4501953125,
'Channel2': 1.4599609375,
'Channel3': 1.4306640625,
'PGAOvervoltage': False,
'EchoValue': 0
}
"""
command = [ 0 ] * 8
# Bits 6-4: PGA for 1st Channel
# Bits 3-0: MUX command for 1st Channel
command[0] = int(channel0PGAMUX)
tempNum = command[0] & 7 # 7 = 0b111
channel0Number = tempNum if (command[0] & 0xf) > 7 else tempNum+8
channel0Gain = (command[0] >> 4) & 7 # 7 = 0b111
command[1] = int(channel1PGAMUX)
tempNum = command[1] & 7 # 7 = 0b111
channel1Number = tempNum if (command[1] & 0xf) > 7 else tempNum+8
channel1Gain = (command[1] >> 4) & 7 # 7 = 0b111
command[2] = int(channel2PGAMUX)
tempNum = command[2] & 7 # 7 = 0b111
channel2Number = tempNum if (command[2] & 0xf) > 7 else tempNum+8
channel2Gain = (command[2] >> 4) & 7 # 7 = 0b111
command[3] = int(channel3PGAMUX)
tempNum = command[3] & 7 # 7 = 0b111
channel3Number = tempNum if (command[3] & 0xf) > 7 else tempNum+8
channel3Gain = (command[3] >> 4) & 7 # 7 = 0b111
# Bit 1: Update IO
# Bit 0: LED State
bf = BitField()
bf.bit1 = int(UpdateIO)
bf.bit0 = int(LEDState)
command[4] = int(bf)
# Bit 7-4: 1100 (Command/Response)
# Bit 3-0: Bits for IO3 through IO0 States
bf.fromByte(0)
bf.bit7 = 1
bf.bit6 = 1
bf.fromByte( int(bf) | int(IO3toIO0States) )
command[5] = int(bf)
command[7] = EchoValue
self.write(command)
results = self.read()
bf = BitField()
bf.fromByte(results[0])
if bf.bit7 != 1 or bf.bit6 != 0:
raise U12Exception("Expected a AIStream response, got %s instead." % results[0])
returnDict = {}
returnDict['EchoValue'] = results[1]
returnDict['PGAOvervoltage'] = bool(bf.bit4)
returnDict['IO3toIO0States'] = BitField(results[0], "IO", range(3, -1, -1), "Low", "High")
channel0 = (results[2] >> 4) & 0xf
channel1 = (results[2] & 0xf)
channel2 = (results[5] >> 4) & 0xf
channel3 = (results[5] & 0xf)
channel0 = (channel0 << 8) + results[3]
returnDict['Channel0'] = self.bitsToVolts(channel0Number, channel0Gain, channel0)
channel1 = (channel1 << 8) + results[4]
returnDict['Channel1'] = self.bitsToVolts(channel1Number, channel1Gain, channel1)
channel2 = (channel2 << 8) + results[6]
returnDict['Channel2'] = self.bitsToVolts(channel2Number, channel2Gain, channel2)
channel3 = (channel3 << 8) + results[7]
returnDict['Channel3'] = self.bitsToVolts(channel3Number, channel3Gain, channel3)
return returnDict
def rawDIO(self, D15toD8Directions = 0, D7toD0Directions = 0, D15toD8States = 0, D7toD0States = 0, IO3toIO0DirectionsAndStates = 0, UpdateDigital = False):
"""
Name: U12.rawDIO(D15toD8Directions = 0, D7toD0Directions = 0,
D15toD8States = 0, D7toD0States = 0,
IO3toIO0DirectionsAndStates = 0, UpdateDigital = 1)
Args: D15toD8Directions, A byte where 0 = Output, 1 = Input for D15-8
D7toD0Directions, A byte where 0 = Output, 1 = Input for D7-0
D15toD8States, A byte where 0 = Low, 1 = High for D15-8
D7toD0States, A byte where 0 = Low, 1 = High for D7-0
IO3toIO0DirectionsAndStates, Bits 7-4: Direction, 3-0: State
UpdateDigital, True if you want to update the IO/D line. False to
False to just read their values.
Desc: This commands reads the direction and state of all the digital
I/O. See Section 5.2 of the U12 User's Guide.
By default, it just reads the directions and states.
Returns: A dictionary with the following keys:
D15toD8Directions, a BitField representing the directions of D15-D8
D7toD0Directions, a BitField representing the directions of D7-D0.
D15toD8States, a BitField representing the states of D15-D8.
D7toD0States, a BitField representing the states of D7-D0.
IO3toIO0States, a BitField representing the states of IO3-IO0.
D15toD8OutputLatchStates, BitField of output latch states for D15-8
D7toD0OutputLatchStates, BitField of output latch states for D7-0
Example:
>>> import u12
>>> d = u12.U12()
>>> d.rawDIO()
{
'D15toD8Directions':
<BitField object: [ D15 = Input (1), D14 = Input (1),
D13 = Input (1), D12 = Input (1),
D11 = Input (1), D10 = Input (1),
D9 = Input (1), D8 = Input (1) ] >,
'D7toD0Directions':
<BitField object: [ D7 = Input (1), D6 = Input (1), D5 = Input (1),
D4 = Input (1), D3 = Input (1), D2 = Input (1),
D1 = Input (1), D0 = Input (1) ] >,
'D15toD8States':
<BitField object: [ D15 = Low (0), D14 = Low (0), D13 = Low (0),
D12 = Low (0), D11 = Low (0), D10 = Low (0),
D9 = Low (0), D8 = Low (0) ] >,
'D7toD0States':
<BitField object: [ D7 = Low (0), D6 = Low (0), D5 = Low (0),
D4 = Low (0), D3 = Low (0), D2 = Low (0),
D1 = Low (0), D0 = Low (0) ] >,
'IO3toIO0States':
<BitField object: [ IO3 = Low (0), IO2 = Low (0), IO1 = Low (0),
IO0 = Low (0) ] >,
'D15toD8OutputLatchStates':
<BitField object: [ D15 = 0 (0), D14 = 0 (0), D13 = 0 (0),
D12 = 0 (0), D11 = 0 (0), D10 = 0 (0),
D9 = 0 (0), D8 = 0 (0) ] >,
'D7toD0OutputLatchStates':
<BitField object: [ D7 = 0 (0), D6 = 0 (0), D5 = 0 (0), D4 = 0 (0),
D3 = 0 (0), D2 = 0 (0), D1 = 0 (0),
D0 = 0 (0) ] >
}
"""
command = [ 0 ] * 8
# Bits for D15 through D8 Direction
command[0] = int(D15toD8Directions)
# Bits for D7 through D0 Direction ( 0 = Output, 1 = Input)
command[1] = int(D7toD0Directions)
# Bits for D15 through D8 State ( 0 = Low, 1 = High)
command[2] = int(D15toD8States)
# Bits for D7 through D0 State ( 0 = Low, 1 = High)
command[3] = int(D7toD0States)
# Bits 7-4: Bits for IO3 through IO0 Direction
# Bits 3-0: Bits for IO3 through IO0 State
command[4] = int(IO3toIO0DirectionsAndStates)
# 01X10111 (DIO)
command[5] = 0x57 # 0b01010111
# Bit 0: Update Digital
command[6] = int(bool(UpdateDigital))
#XXXXXXXX
# command[7] = XXXXXXXX
self.write(command)
results = self.read()
returnDict = {}
if results[0] != 87:
raise U12Exception("Expected a DIO response, got %s instead." % results[0])
returnDict['D15toD8States'] = BitField(results[1], "D", range(15, 7, -1), "Low", "High")
returnDict['D7toD0States'] = BitField(results[2], "D", range(7, -1, -1), "Low", "High")
returnDict['D15toD8Directions'] = BitField(results[4], "D", range(15, 7, -1), "Output", "Input")
returnDict['D7toD0Directions'] = BitField(results[5], "D", range(7, -1, -1), "Output", "Input")
returnDict['D15toD8OutputLatchStates'] = BitField(results[6], "D", range(15, 7, -1))
returnDict['D7toD0OutputLatchStates'] = BitField(results[7], "D", range(7, -1, -1))
returnDict['IO3toIO0States'] = BitField((results[3] >> 4), "IO", range(3, -1, -1), "Low", "High")
return returnDict
def rawCounter(self, StrobeEnabled = False, ResetCounter = False):
"""
Name: U12.rawCounter(StrobeEnabled = False, ResetCounter = False)
Args: StrobeEnable, set to True to enable strobe.
ResetCounter, set to True to reset the counter AFTER reading.
Desc: This command controls and reads the 32-bit counter. See
Section 5.3 of the User's Guide.
Returns: A dictionary with the following keys:
D15toD8States, a BitField representing the states of D15-D8.
D7toD0States, a BitField representing the states of D7-D0.
IO3toIO0States, a BitField representing the states of IO3-IO0.
Counter, the value of the counter
Example:
>>> import u12
>>> d = u12.U12()
>>> d.rawCounter()
{
'D15toD8States':
<BitField object: [ D15 = Low (0), D14 = Low (0), D13 = Low (0),
D12 = Low (0), D11 = Low (0), D10 = Low (0),
D9 = Low (0), D8 = Low (0) ] >,
'D7toD0States':
<BitField object: [ D7 = Low (0), D6 = Low (0), D5 = Low (0),
D4 = Low (0), D3 = Low (0), D2 = Low (0),
D1 = Low (0), D0 = Low (0) ] >,
'IO3toIO0States':
<BitField object: [ IO3 = Low (0), IO2 = Low (0), IO1 = Low (0),
IO0 = Low (0) ] >,
'Counter': 0
}
"""
command = [ 0 ] * 8
bf = BitField()
bf.bit1 = int(StrobeEnabled)
bf.bit0 = int(ResetCounter)
command[0] = int(bf)
bf.fromByte(0)
bf.bit6 = 1
bf.bit4 = 1
bf.bit1 = 1
command[5] = int(bf)
self.write(command)
results = self.read()
returnDict = {}
if results[0] != command[5]:
raise U12Exception("Expected a Counter response, got %s instead." % results[0])
returnDict['D15toD8States'] = BitField(results[1], "D", range(15, 7, -1), "Low", "High")
returnDict['D7toD0States'] = BitField(results[2], "D", range(7, -1, -1), "Low", "High")
returnDict['IO3toIO0States'] = BitField((results[3] >> 4), "IO", range(3, -1, -1), "Low", "High")
counter = results[7]
counter += results[6] << 8
counter += results[5] << 16
counter += results[4] << 24
returnDict['Counter'] = counter
return returnDict
def rawCounterPWMDIO(self, D15toD8Directions = 0, D7toD0Directions = 0, D15toD8States = 0, D7toD0States = 0, IO3toIO0DirectionsAndStates = 0, ResetCounter = False, UpdateDigital = 0, PWMA = 0, PWMB = 0):
"""
Name: U12.rawCounterPWMDIO( D15toD8Directions = 0, D7toD0Directions = 0,
D15toD8States = 0, D7toD0States = 0,
IO3toIO0DirectionsAndStates = 0,
ResetCounter = False, UpdateDigital = 0,
PWMA = 0, PWMB = 0)
Args: D15toD8Directions, A byte where 0 = Output, 1 = Input for D15-8
D7toD0Directions, A byte where 0 = Output, 1 = Input for D7-0
D15toD8States, A byte where 0 = Low, 1 = High for D15-8
D7toD0States, A byte where 0 = Low, 1 = High for D7-0
IO3toIO0DirectionsAndStates, Bits 7-4: Direction, 3-0: State
ResetCounter, If True, reset the counter after reading.
UpdateDigital, True if you want to update the IO/D line. False to
False to just read their values.
PWMA, Voltage to set AO0 to output.
PWMB, Voltage to set AO1 to output.
Desc: This command controls all 20 digital I/O, and the 2 PWM outputs.
The response provides the state of all I/O and the current count.
See Section 5.4 of the User's Guide.
By default, sets the AOs to 0 and reads the states and counters.
Returns: A dictionary with the following keys:
D15toD8States, a BitField representing the states of D15-D8.
D7toD0States, a BitField representing the states of D7-D0.
IO3toIO0States, a BitField representing the states of IO3-IO0.
Counter, the value of the counter
Example:
>>> import u12
>>> d = u12.U12()
>>> d.rawCounterPWMDIO()
{
'D15toD8States':
<BitField object: [ D15 = Low (0), D14 = Low (0), D13 = Low (0),
D12 = Low (0), D11 = Low (0), D10 = Low (0),
D9 = Low (0), D8 = Low (0) ] >,
'D7toD0States':
<BitField object: [ D7 = Low (0), D6 = Low (0), D5 = Low (0),
D4 = Low (0), D3 = Low (0), D2 = Low (0),
D1 = Low (0), D0 = Low (0) ] >,
'IO3toIO0States':
<BitField object: [ IO3 = Low (0), IO2 = Low (0),
IO1 = Low (0), IO0 = Low (0) ] >,
'Counter': 0
}
"""
command = [ 0 ] * 8
# Bits for D15 through D8 Direction
command[0] = int(D15toD8Directions)
# Bits for D7 through D0 Direction ( 0 = Output, 1 = Input)
command[1] = int(D7toD0Directions)
# Bits for D15 through D8 State ( 0 = Low, 1 = High)
command[2] = int(D15toD8States)
# Bits for D7 through D0 State ( 0 = Low, 1 = High)
command[3] = int(D7toD0States)
# Bits 7-4: Bits for IO3 through IO0 Direction
# Bits 3-0: Bits for IO3 through IO0 State
command[4] = int(IO3toIO0DirectionsAndStates)
bf = BitField()
bf.bit5 = int(ResetCounter)
bf.bit4 = int(UpdateDigital)
binPWMA = int((1023 * (float(PWMA)/5.0)))
binPWMB = int((1023 * (float(PWMB)/5.0)))
bf2 = BitField()
bf2.fromByte( binPWMA & 3 ) # 3 = 0b11
bf.bit3 = bf2.bit1
bf.bit2 = bf2.bit0
bf2.fromByte( binPWMB & 3 ) # 3 = 0b11
bf.bit1 = bf2.bit1
bf.bit0 = bf2.bit0
command[5] = int(bf)
command[6] = (binPWMA >> 2) & 0xff
command[7] = (binPWMB >> 2) & 0xff
self.write(command)
results = self.read()
returnDict = {}
returnDict['D15toD8States'] = BitField(results[1], "D", range(15, 7, -1), "Low", "High")
returnDict['D7toD0States'] = BitField(results[2], "D", range(7, -1, -1), "Low", "High")
returnDict['IO3toIO0States'] = BitField((results[3] >> 4), "IO", range(3, -1, -1), "Low", "High")
counter = results[7]
counter += results[6] << 8
counter += results[5] << 16
counter += results[4] << 24
returnDict['Counter'] = counter
return returnDict
def rawAIBurst(self, channel0PGAMUX = 8, channel1PGAMUX = 9, channel2PGAMUX = 10, channel3PGAMUX = 11, NumberOfScans = 8, TriggerIONum = 0, TriggerState = 0, UpdateIO = False, LEDState = True, IO3ToIO0States = 0, FeatureReports = False, TriggerOn = False, SampleInterval = 15000):
"""
Name: U12.rawAIBurst( channel0PGAMUX = 8, channel1PGAMUX = 9,
channel2PGAMUX = 10, channel3PGAMUX = 11,
NumberOfScans = 8, TriggerIONum = 0,
TriggerState = 0, UpdateIO = False,
LEDState = True, IO3ToIO0States = 0,
FeatureReports = False, TriggerOn = False,
SampleInterval = 15000 )
Args: channel0PGAMUX, A byte that contains channel0 information
channel1PGAMUX, A byte that contains channel1 information
channel2PGAMUX, A byte that contains channel2 information
channel3PGAMUX, A byte that contains channel3 information
NumberOfScans, The number of scans you wish to take. Rounded up
to a power of 2.
TriggerIONum, IO to trigger burst on.
TriggerState, State to trigger on.
UpdateIO, True if you want to update the IO/D line. False to
False to just read their values.
LEDState, Turns the status LED on or off.
IO3ToIO0States, 4 bits for IO3-0 states
FeatureReports, Use feature reports, or not.
TriggerOn, Use trigger to start acquisition.
SampleInterval, = int(6000000.0/(ScanRate * NumberOfChannels))
must be greater than (or equal to) 733.
Desc: After receiving a AIBurst command, the LabJack collects 4
channels at the specified data rate, and puts data in the buffer.
This continues until the buffer is full, at which time the
LabJack starts sending the data to the host. Data is sent to the
host 1 scan at a time while checking for a command from the host.
If a command is received the burst operation is canceled and the
command is executed normally. If the LED is enabled, it blinks at
4 Hz while waiting for a trigger, is off during acquisition,
blinks at about 8 Hz during data delivery, and is set on when
done or stopped. See Section 5.5 of the User's Guide.
This function sends the AIBurst command, then reads all the
responses. Separating the write and read is not currently
supported (like in the UW driver).
By default, it does single-ended readings on AI0-4 at 100Hz for 8
scans.
Returns: A dictionary with the following keys:
Channel0-3, A list of the readings on the channels
PGAOvervoltages, A list of the over-voltage flags
IO3toIO0State, A list of the IO states
IterationCounters, A list of the values of the iteration counter
Backlogs, value*256 = number of packets in the backlog.
BufferOverflowOrChecksumErrors, If True and Backlog = 31,
then a buffer overflow occurred. If
True and Backlog = 0, then Checksum
error occurred.
Example:
>>> import u12
>>> d = u12.U12()
>>> d.rawAIBurst()
{
'Channel0': [1.484375, 1.513671875, ... , 1.46484375],
'Channel1': [1.455078125, 1.455078125, ... , 1.455078125],
'Channel2': [1.46484375, 1.474609375, ... , 1.46484375],
'Channel3': [1.435546875, 1.42578125, ... , 1.435546875],
'PGAOvervoltages': [False, False, ..., False],
'IO3toIO0States':
[<BitField object: [ IO3 = Low (0), IO2 = Low (0), IO1 = Low (0),
IO0 = Low (0) ] >, ... ],
'IterationCounters': [0, 1, 2, 3, 4, 5, 6, 0],
'Backlogs': [0, 0, 0, 0, 0, 0, 0, 0],
'BufferOverflowOrChecksumErrors': [False, False, ... , False]
}
"""
command = [ 0 ] * 8
# Bits 6-4: PGA for 1st Channel
# Bits 3-0: MUX command for 1st Channel
command[0] = int(channel0PGAMUX)
tempNum = command[0] & 7 # 7 = 0b111
channel0Number = tempNum if (command[0] & 0xf) > 7 else tempNum+8
channel0Gain = (command[0] >> 4) & 7 # 7 = 0b111
command[1] = int(channel1PGAMUX)
tempNum = command[1] & 7 # 7 = 0b111
channel1Number = tempNum if (command[1] & 0xf) > 7 else tempNum+8
channel1Gain = (command[1] >> 4) & 7 # 7 = 0b111
command[2] = int(channel2PGAMUX)
tempNum = command[2] & 7 # 7 = 0b111
channel2Number = tempNum if (command[2] & 0xf) > 7 else tempNum+8
channel2Gain = (command[2] >> 4) & 7 # 7 = 0b111
command[3] = int(channel3PGAMUX)
tempNum = command[3] & 7 # 7 = 0b111
channel3Number = tempNum if (command[3] & 0xf) > 7 else tempNum+8
channel3Gain = (command[3] >> 4) & 7 # 7 = 0b111
if NumberOfScans > 1024 or NumberOfScans < 8:
raise U12Exception("The number of scans must be between 1024 and 8 (inclusive)")
NumScansExponentMod = 10 - int(math.ceil(math.log(NumberOfScans, 2)))
NumScans = 2 ** (10 - NumScansExponentMod)
bf = BitField( rawByte = (NumScansExponentMod << 5) )
# bits 4-3: IO to Trigger on
bf.bit2 = 0
bf.bit1 = int(bool(UpdateIO))
bf.bit0 = int(bool(LEDState))
command[4] = int(bf)
bf2 = BitField(rawByte = int(IO3ToIO0States))
#Bits 7-4: 1010 (Start Burst)
bf2.bit7 = 1
bf2.bit5 = 1
command[5] = int(bf2)
if SampleInterval < 733:
raise U12Exception("SampleInterval must be greater than 733.")
bf3 = BitField( rawByte = ((SampleInterval >> 8) & 0xf) )
bf3.bit7 = int(bool(FeatureReports))
bf3.bit6 = int(bool(TriggerOn))
command[6] = int(bf3)
command[7] = SampleInterval & 0xff
self.write(command)
resultsList = []
for i in range(NumScans):
resultsList.append(self.read())
returnDict = {}
returnDict['BufferOverflowOrChecksumErrors'] = list()
returnDict['PGAOvervoltages'] = list()
returnDict['IO3toIO0States'] = list()
returnDict['IterationCounters'] = list()
returnDict['Backlogs'] = list()
returnDict['Channel0'] = list()
returnDict['Channel1'] = list()
returnDict['Channel2'] = list()
returnDict['Channel3'] = list()
for results in resultsList:
bf = BitField(rawByte = results[0])
if bf.bit7 != 1 or bf.bit6 != 0:
raise U12Exception("Expected a AIBurst response, got %s instead." % results[0])
returnDict['BufferOverflowOrChecksumErrors'].append(bool(bf.bit5))
returnDict['PGAOvervoltages'].append(bool(bf.bit4))
returnDict['IO3toIO0States'].append(BitField(results[0], "IO", range(3, -1, -1), "Low", "High"))
returnDict['IterationCounters'].append((results[1] >> 5))
returnDict['Backlogs'].append(results[1] & 0xf)
channel0 = (results[2] >> 4) & 0xf
channel1 = (results[2] & 0xf)
channel2 = (results[5] >> 4) & 0xf
channel3 = (results[5] & 0xf)
channel0 = (channel0 << 8) + results[3]
returnDict['Channel0'].append(self.bitsToVolts(channel0Number, channel0Gain, channel0))
channel1 = (channel1 << 8) + results[4]
returnDict['Channel1'].append(self.bitsToVolts(channel1Number, channel1Gain, channel1))
channel2 = (channel2 << 8) + results[6]
returnDict['Channel2'].append(self.bitsToVolts(channel2Number, channel2Gain, channel2))
channel3 = (channel3 << 8) + results[7]
returnDict['Channel3'].append(self.bitsToVolts(channel3Number, channel3Gain, channel3))
return returnDict
def rawAIContinuous(self, channel0PGAMUX = 8, channel1PGAMUX = 9, channel2PGAMUX = 10, channel3PGAMUX = 11, FeatureReports = False, CounterRead = False, UpdateIO = False, LEDState = True, IO3ToIO0States = 0, SampleInterval = 15000):
"""
Currently in development.
The function is mostly implemented, but is currently too slow to be
useful.
"""
command = [ 0 ] * 8
# Bits 6-4: PGA for 1st Channel
# Bits 3-0: MUX command for 1st Channel
command[0] = int(channel0PGAMUX)
tempNum = command[0] & 7 # 7 = 0b111
channel0Number = tempNum if (command[0] & 0xf) > 7 else tempNum+8
channel0Gain = (command[0] >> 4) & 7 # 7 = 0b111
command[1] = int(channel1PGAMUX)
tempNum = command[1] & 7 # 7 = 0b111
channel1Number = tempNum if (command[1] & 0xf) > 7 else tempNum+8
channel1Gain = (command[1] >> 4) & 7 # 7 = 0b111
command[2] = int(channel2PGAMUX)
tempNum = command[2] & 7 # 7 = 0b111
channel2Number = tempNum if (command[2] & 0xf) > 7 else tempNum+8
channel2Gain = (command[2] >> 4) & 7 # 7 = 0b111
command[3] = int(channel3PGAMUX)
tempNum = command[3] & 7 # 7 = 0b111
channel3Number = tempNum if (command[3] & 0xf) > 7 else tempNum+8
channel3Gain = (command[3] >> 4) & 7 # 7 = 0b111
bf = BitField()
bf.bit7 = int(bool(FeatureReports))
bf.bit6 = int(bool(CounterRead))
bf.bit1 = int(bool(UpdateIO))
bf.bit0 = int(bool(LEDState))
command[4] = int(bf)
# Bits 7-4: 1001 (Start Continuous)
bf2 = BitField( rawByte = int(IO3ToIO0States) )
bf2.bit7 = 1
bf2.bit4 = 1
command[5] = int(bf2)
command[6] = ( SampleInterval >> 8)
command[7] = SampleInterval & 0xff
byte0bf = BitField()
returnDict = dict()
self.write(command)
while True:
results = self.read()
byte0bf.fromByte(results[0])
returnDict['Byte0'] = byte0bf
returnDict['IterationCounter'] = (results[1] >> 5)
returnDict['Backlog'] = results[1] & 0xf
yield returnDict
def rawPulseout(self, B1 = 10, C1 = 2, B2 = 10, C2 = 2, D7ToD0PulseSelection = 1, ClearFirst = False, NumberOfPulses = 5):
"""
Name: U12.rawPulseout( B1 = 10, C1 = 2, B2 = 10, C2 = 2,
D7ToD0PulseSelection = 1, ClearFirst = False,
NumberOfPulses = 5)
Args: B1, the B component of the first half cycle
C1, the C component of the first half cycle
B2, the B component of the second half cycle
C2, the C component of the second half cycle
D7ToD0PulseSelection, which D lines to pulse.
ClearFirst, True = Start Low.
NumberOfPulses, the number of pulses
Desc: This command creates pulses on any, or all, of D0-D7. The desired
D lines must be set to output with some other function. See
Section 5.7 of the User's Guide.
By default, pulses D0 5 times at 400us high, then 400 us low.
Returns: None
Example:
Have a jumper wire connected from D0 to CNT.
>>> import u12
>>> d = u12.U12()
>>> d.rawDIO(D7toD0Directions = 0, UpdateDigital = True)
>>> d.rawCounter(ResetCounter = True)
>>> d.rawPulseout(ClearFirst = True)
>>> print d.rawCounter()
{ 'IO3toIO0States': ... ,
'Counter': 5,
'D7toD0States': ... ,
'D15toD8States': ...
}
"""
command = [ 0 ] * 8
command[0] = B1
command[1] = C1
command[2] = B2
command[3] = C2
command[4] = int(D7ToD0PulseSelection)
# 01100100 (Pulseout)
bf = BitField()
bf.bit6 = 1
bf.bit5 = 1
bf.bit2 = 1
command[5] = int(bf)
bf2 = BitField( rawByte = ( NumberOfPulses >> 8 ) )
bf2.bit7 = int(bool(ClearFirst))
command[6] = int(bf2)
command[7] = NumberOfPulses & 0xff
self.write(command)
results = self.read()
if command[5] != results[5]:
raise U12Exception("Expected Pulseout response, got %s instead." % results[5])
if results[4] != 0:
errors = BitField(rawByte = command[4], labelPrefix = "D", zeroLabel = "Ok", oneLabel = "Error")
raise U12Exception("D7-D0 Direction error detected: %s" % errors)
return None
def rawReset(self):
"""
Name: U12.rawReset()
Desc: Sits in an infinite loop until micro watchdog timeout after about
2 seconds. See Section 5.8 of the User's Guide.
Note: The function will close the device after it has written the
command.
Returns: None
Example:
>>> import u12
>>> d = u12.U12()
>>> d.rawReset()
"""
command = [ 0 ] * 8
# 0b01011111 ( Reset )
bf = BitField()
bf.bit6 = 1
bf.bit4 = 1
bf.bit3 = 1
bf.bit2 = 1
bf.bit1 = 1
bf.bit0 = 1
command[5] = int(bf)
self.write(command)
self.close()
def rawReenumerate(self):
"""
Name: U12.rawReenumerate()
Desc: Detaches from the USB, reloads config parameters, and then
reattaches so the device can be re-enumerated. See Section 5.9 of
the User's Guide.
Note: The function will close the device after it has written the
command.
Returns: None
Example:
>>> import u12
>>> d = u12.U12()
>>> d.rawReenumerate()
"""
command = [ 0 ] * 8
# 0b01000000 (Re-Enumerate)
bf = BitField()
bf.bit6 = 1
command[5] = int(bf)
self.write(command)
self.close()
def rawWatchdog(self, IgnoreCommands = False, D0Active = False, D0State = False, D1Active = False, D1State = False, D8Active = False, D8State = False, ResetOnTimeout = False, WatchdogActive = False, Timeout = 60):
"""
Name: U12.rawWatchdog( IgnoreCommands = False, D0Active = False,
D0State = False, D1Active = False,
D1State = False, D8Active = False,
D8State = False, ResetOnTimeout = False,
WatchdogActive = False, Timeout = 60)
Desc: Sets the settings for the watchdog, or just reads the firmware
version of the U12. See section 5.10 of the User's Guide.
By defaults, just reads the firmware version.
Returns: A dictionary with the following keys:
FirmwareVersion, the firmware version of the U12.
Example:
>>> import u12
>>> d = u12.U12()
>>> print d.rawWatchdog()
{'FirmwareVersion': '1.10'}
"""
command = [ 0 ] * 8
command[0] = int(bool(IgnoreCommands))
bf = BitField()
bf.bit7 = int(D0Active)
bf.bit6 = int(D0State)
bf.bit5 = int(D1Active)
bf.bit4 = int(D1State)
bf.bit3 = int(D8Active)
bf.bit2 = int(D8State)
bf.bit1 = int(ResetOnTimeout)
bf.bit0 = int(WatchdogActive)
command[4] = int(bf)
# 01X1X011 (Watchdog)
bf2 = BitField()
bf2.bit6 = 1
bf2.bit4 = 1
bf2.bit1 = 1
bf2.bit0 = 1
command[5] = int(bf2)
# Timeout is increments of 2^16 cycles.
# 2^16 cycles is about 0.01 seconds.
binTimeout = int((float(Timeout) / 0.01))
command[6] = ( binTimeout >> 8 ) & 0xff
command[7] = binTimeout & 0xff
self.write(command)
results = self.read()
returnDict = dict()
returnDict['FirmwareVersion'] = "%s.%.2d" % (results[0], results[1])
return returnDict
def rawReadRAM(self, Address = 0):
"""
Name: U12.rawReadRAM(Address = 0)
Args: Address, the starting address to read from
Desc: Reads 4 bytes out of the U12's internal memory. See section 5.11
of the User's Guide.
By default, reads the bytes that make up the serial number.
Returns: A dictionary with the following keys:
DataByte0, the data byte at Address - 0
DataByte1, the data byte at Address - 1
DataByte2, the data byte at Address - 2
DataByte3, the data byte at Address - 3
Example:
>>> import u12, struct
>>> d = u12.U12()
>>> r = d.rawReadRAM()
>>> print r
{'DataByte3': 5, 'DataByte2': 246, 'DataByte1': 139, 'DataByte0': 170}
>>> bytes = [ r['DataByte3'], r['DataByte2'], r['DataByte1'], r['DataByte0'] ]
>>> print struct.unpack(">I", struct.pack("BBBB", *bytes))[0]
100043690
"""
command = [ 0 ] * 8
# 01010000 (Read RAM)
bf = BitField()
bf.bit6 = 1
bf.bit4 = 1
command[5] = int(bf)
command[6] = (Address >> 8) & 0xff
command[7] = Address & 0xff
self.write(command)
results = self.read()
if results[0] != int(bf):
raise U12Exception("Expected ReadRAM response, got %s" % results[0])
if (results[6] != command[6]) or (results[7] != command[7]):
receivedAddress = (results[6] << 8) + results[7]
raise U12Exception("Wanted address %s got address %s" % (Address, receivedAddress))
returnDict = dict()
returnDict['DataByte3'] = results[1]
returnDict['DataByte2'] = results[2]
returnDict['DataByte1'] = results[3]
returnDict['DataByte0'] = results[4]
return returnDict
def rawWriteRAM(self, Data, Address):
"""
Name: U12.rawWriteRAM(Data, Address)
Args: Data, a list of 4 bytes to write to memory.
Address, the starting address to write to.
Desc: Writes 4 bytes to the U12's internal memory. See section 5.13 of
the User's Guide.
No default behavior, you must pass Data and Address.
Returns: A dictionary with the following keys:
DataByte0, the data byte at Address - 0
DataByte1, the data byte at Address - 1
DataByte2, the data byte at Address - 2
DataByte3, the data byte at Address - 3
Example:
>>> import u12
>>> d = u12.U12()
>>> print d.rawWriteRAM([1, 2, 3, 4], 0x200)
{'DataByte3': 4, 'DataByte2': 3, 'DataByte1': 2, 'DataByte0': 1}
"""
command = [ 0 ] * 8
if not isinstance(Data, list) or len(Data) > 4:
raise U12Exception("Data wasn't a list, or was too long.")
Data.reverse()
command[:len(Data)] = Data
# 01010001 (Write RAM)
bf = BitField()
bf.bit6 = 1
bf.bit4 = 1
bf.bit0 = 1
command[5] = int(bf)
command[6] = (Address >> 8) & 0xff
command[7] = Address & 0xff
self.write(command)
results = self.read()
if results[0] != int(bf):
raise U12Exception("Expected ReadRAM response, got %s" % results[0])
if (results[6] != command[6]) or (results[7] != command[7]):
receivedAddress = (results[6] << 8) + results[7]
raise U12Exception("Wanted address %s got address %s" % (Address, receivedAddress))
returnDict = dict()
returnDict['DataByte3'] = results[1]
returnDict['DataByte2'] = results[2]
returnDict['DataByte1'] = results[3]
returnDict['DataByte0'] = results[4]
return returnDict
def rawAsynch(self, Data, AddDelay = False, TimeoutActive = False, SetTransmitEnable = False, PortB = False, NumberOfBytesToWrite = 0, NumberOfBytesToRead = 0):
"""
Name: U12.rawAsynch(Data, AddDelay = False, TimeoutActive = False,
SetTransmitEnable = False, PortB = False,
NumberOfBytesToWrite = 0, NumberOfBytesToRead = 0)
Args: Data, A list of bytes to write.
AddDelay, True to add a 1 bit delay between each transmit byte.
TimeoutActive, True to enable timeout for the receive phase.
SetTransmitEnable, True to set Transmit Enable to high during
transmit and low during receive.
PortB, True to use PortB instead of PortA.
NumberOfBytesToWrite, Number of bytes to write.
NumberOfBytesToRead, Number of bytes to read.
Desc: Requires firmware V1.1 or higher. This function writes and then
reads half-duplex asynchronous data on 1 of two pairs of D lines.
See section 5.13 of the User's Guide.
Returns: A dictionary with the following keys,
DataByte0-3, the first four data bytes read over the RX line
ErrorFlags, a BitField representing the error flags.
Example:
>>> import u12
>>> d = u12.U12()
>>> # Set the full and half A,B,C to 9600
>>> d.rawWriteRAM([0, 1, 1, 200], 0x073)
>>> d.rawWriteRAM([5, 1, 2, 48], 0x076)
>>> print d.rawAsynch([1, 2, 3, 4], NumberOfBytesToWrite = 4, NumberOfBytesToRead = 4)
{
'DataByte3': 4,
'DataByte2': 3,
'DataByte1': 2,
'DataByte0': 1,
'ErrorFlags': <BitField object: [ Timeout Error Flag = 0 (0), ... ] >
}
"""
command = [ 0 ] * 8
if not isinstance(Data, list) or len(Data) > 4:
raise U12Exception("Data wasn't a list, or was too long.")
NumberOfBytesToWrite = NumberOfBytesToRead & 0xff
NumberOfBytesToRead = NumberOfBytesToRead & 0xff
if NumberOfBytesToWrite > 18:
raise U12Exception("Can only write 18 or fewer bytes at a time.")
if NumberOfBytesToRead > 18:
raise U12Exception("Can only read 18 or fewer bytes at a time.")
Data.reverse()
command[:len(Data)] = Data
bf = BitField()
bf.bit3 = int(bool(AddDelay))
bf.bit2 = int(bool(TimeoutActive))
bf.bit1 = int(bool(SetTransmitEnable))
bf.bit0 = int(bool(PortB))
command[4] = int(bf)
#01100001 (Asynch)
bf2 = BitField()
bf2.bit6 = 1
bf2.bit5 = 1
bf2.bit0 = 1
command[5] = int(bf2)
command[6] = NumberOfBytesToWrite
command[7] = NumberOfBytesToRead
self.write(command)
results = self.read()
if command[5] != results[5]:
raise U12Exception("Expected Asynch response, got %s instead." % results[5])
returnDict = dict()
returnDict['DataByte3'] = results[0]
returnDict['DataByte2'] = results[1]
returnDict['DataByte1'] = results[2]
returnDict['DataByte0'] = results[3]
bfLabels = ["Timeout Error Flag", "STRT Error Flag", "FRM Error Flag", "RXTris Error Flag", "TETris Error Flag", "TXTris Error Flag"]
bf = BitField( rawByte = results[4], labelPrefix = "", labelList = bfLabels )
returnDict["ErrorFlags"] = bf
return returnDict
SPIModes = ['A', 'B', 'C', 'D']
def rawSPI(self, Data, AddMsDelay = False, AddHundredUsDelay = False, SPIMode = 'A', NumberOfBytesToWriteRead = 0, ControlCS = False, StateOfActiveCS = False, CSLineNumber = 0):
"""
Name: U12.rawSPI( Data, AddMsDelay = False, AddHundredUsDelay = False,
SPIMode = 'A', NumberOfBytesToWriteRead = 0,
ControlCS = False, StateOfActiveCS = False,
CSLineNumber = 0)
Args: Data, A list of four bytes to write using SPI
AddMsDelay, If True, a 1 ms delay is added between each bit
AddHundredUsDelay, if True, 100us delay is added
SPIMode, 'A', 'B', 'C', or 'D'
NumberOfBytesToWriteRead, number of bytes to write and read.
ControlCS, D0-D7 is automatically controlled as CS. The state and
direction of CS is only tested if control is enabled.
StateOfActiveCS, Active state for CS line.
CSLineNumber, D line to use as CS if enabled (0-7).
Desc: This function performs SPI communication. See Section 5.14 of the
User's Guide.
Returns: A dictionary with the following keys,
DataByte0-3, the first four data bytes read
ErrorFlags, a BitField representing the error flags.
Example:
>>> import u12
>>> d = u12.U12()
>>> d.rawSPI([1,2,3,4], NumberOfBytesToWriteRead = 4)
{
'DataByte3': 4,
'DataByte2': 3,
'DataByte1': 2,
'DataByte0': 1,
'ErrorFlags':
<BitField object: [ CSStateTris Error Flag = 0 (0), ... ] >
}
"""
command = [ 0 ] * 8
if not isinstance(Data, list) or len(Data) > 4:
raise U12Exception("Data wasn't a list, or was too long.")
NumberOfBytesToWriteRead = NumberOfBytesToWriteRead & 0xff
if NumberOfBytesToWriteRead == 0:
NumberOfBytesToWriteRead = len(Data)
if NumberOfBytesToWriteRead > 18 or NumberOfBytesToWriteRead < 1:
raise U12Exception("Can only read/write 1 to 18 bytes at a time.")
Data.reverse()
command[:len(Data)] = Data
bf = BitField()
bf.bit7 = int(bool(AddMsDelay))
bf.bit6 = int(bool(AddHundredUsDelay))
modeIndex = self.SPIModes.index(SPIMode)
bf[7-modeIndex] = 1
command[4] = int(bf)
# 01100010 (SPI)
bf2 = BitField()
bf2.bit6 = 1
bf2.bit5 = 1
bf2.bit1 = 1
command[5] = int(bf2)
command[6] = NumberOfBytesToWriteRead
bf3 = BitField(rawByte = CSLineNumber)
bf3.bit7 = int(bool(ControlCS))
bf3.bit6 = int(bool(StateOfActiveCS))
command[7] = int(bf3)
self.write(command)
results = self.read()
if results[5] != command[5]:
raise U12Exception("Expected SPI response, got %s instead." % results[5])
returnDict = dict()
returnDict['DataByte3'] = results[0]
returnDict['DataByte2'] = results[1]
returnDict['DataByte1'] = results[2]
returnDict['DataByte0'] = results[3]
bfLabels = ["CSStateTris Error Flag", "SCKTris Error Flag", "MISOTris Error Flag", "MOSITris Error Flag"]
bf = BitField( rawByte = results[4], labelPrefix = "", labelList = bfLabels )
returnDict["ErrorFlags"] = bf
return returnDict
def rawSHT1X(self, Data = [3,0,0,0], WaitForMeasurementReady = True, IssueSerialReset = False, Add1MsDelay = False, Add300UsDelay = False, IO3State = 1, IO2State = 1, IO3Direction = 1, IO2Direction = 1, NumberOfBytesToWrite = 1, NumberOfBytesToRead = 3):
"""
Name: U12.rawSHT1X( Data = [3, 0, 0, 0],
WaitForMeasurementReady = True,
IssueSerialReset = False, Add1MsDelay = False,
Add300UsDelay = False, IO3State = 1, IO2State = 1,
IO3Direction = 1, IO2Direction = 1,
NumberOfBytesToWrite = 1, NumberOfBytesToRead = 3)
Args: Data, a list of bytes to write to the SHT.
WaitForMeasurementReady, Wait for the measurement ready signal.
IssueSerialReset, perform a serial reset
Add1MsDelay, adds 1ms delay
Add300UsDelay, adds a 300us delay
IO3State, sets the state of IO3
IO2State, sets the state of IO2
IO3Direction, sets the direction of IO3 ( 1 = Output )
IO2Direction, sets the direction of IO3 ( 1 = Output )
NumberOfBytesToWrite, how many bytes to write
NumberOfBytesToRead, how may bytes to read back
Desc: Sends and receives data from a SHT1X T/RH sensor from Sensirion.
See Section 5.15 of the User's Guide.
By default, reads the temperature from the SHT.
Returns: A dictionary with the following keys,
DataByte0-3, the four data bytes read
ErrorFlags, a BitField representing the error flags.
Example:
Uses an EI-1050 Temp/Humidity probe wired as follows:
Data ( Green ) -> IO0
Clock ( White ) -> IO1
Ground ( Black ) -> GND
Power ( Red ) -> +5V
Enable ( Brown ) -> IO2
>>> import u12
>>> d = u12.U12()
>>> results = d.rawSHT1X()
>>> print results
{
'DataByte3': 0,
'DataByte2': 69,
'DataByte1': 48,
'DataByte0': 25,
'ErrorFlags':
<BitField object: [ Serial Reset Error Flag = 0 (0), ... ] >
}
>>> tempC = (results['DataByte0'] * 256 ) + results['DataByte1']
>>> tempC = (tempC * 0.01) - 40
>>> print tempC
24.48
>>> results = d.rawSHT1X(Data = [5,0,0,0])
>>> print results
{
'DataByte3': 0,
'DataByte2': 200,
'DataByte1': 90,
'DataByte0': 2,
'ErrorFlags':
<BitField object: [ Serial Reset Error Flag = 0 (0), ... ] >
}
>>> sorh = (results['DataByte0'] * 256 ) + results['DataByte1']
>>> rhlinear = (-0.0000028*sorh*sorh)+(0.0405*sorh)-4.0
>>> rh = ((tempC-25.0)*(0.01+(0.00008*sorh)))+rhlinear
>>> print rh
19.3360256
"""
command = [ 0 ] * 8
if NumberOfBytesToWrite != 0:
if not isinstance(Data, list) or len(Data) > 4:
raise U12Exception("Data wasn't a list, or was too long.")
Data.reverse()
command[:len(Data)] = Data
if max(NumberOfBytesToWrite, NumberOfBytesToRead) > 4:
raise U12Exception("Can only read/write up to 4 bytes at a time.")
bf = BitField()
bf.bit7 = int(bool(WaitForMeasurementReady))
bf.bit6 = int(bool(IssueSerialReset))
bf.bit5 = int(bool(Add1MsDelay))
bf.bit4 = int(bool(Add300UsDelay))
bf.bit3 = int(bool(IO3State))
bf.bit2 = int(bool(IO2State))
bf.bit1 = int(bool(IO3Direction))
bf.bit0 = int(bool(IO2Direction))
command[4] = int(bf)
# 01101000 (SHT1X)
bf2 = BitField()
bf2.bit6 = 1
bf2.bit5 = 1
bf2.bit3 = 1
command[5] = int(bf2)
command[6] = NumberOfBytesToWrite
command[7] = NumberOfBytesToRead
self.write(command)
results = self.read()
if results[5] != command[5]:
raise U12Exception("Expected SHT1x response, got %s instead." % results[5])
returnDict = dict()
returnDict['DataByte3'] = results[0]
returnDict['DataByte2'] = results[1]
returnDict['DataByte1'] = results[2]
returnDict['DataByte0'] = results[3]
bfLabels = ["Serial Reset Error Flag", "Measurement Ready Error Flag", "Ack Error Flag"]
bf = BitField( rawByte = results[4], labelPrefix = "", labelList = bfLabels )
returnDict["ErrorFlags"] = bf
return returnDict
def eAnalogIn(self, channel, idNum = None, demo=0, gain=0):
"""
Name: U12.eAnalogIn(channel, idNum = None, demo=0, gain=0)
Args: See section 4.1 of the User's Guide
Desc: This is a simplified version of AISample. Reads the voltage from 1 analog input
>>> import u12
>>> d = u12.U12()
>>> d.eAnalogIn(0)
{'overVoltage': 0, 'idnum': 1, 'voltage': 1.435546875}
"""
if idNum is None:
idNum = self.id
if ON_WINDOWS:
ljid = ctypes.c_long(idNum)
ad0 = ctypes.c_long(999)
ad1 = ctypes.c_float(999)
ecode = staticLib.EAnalogIn(ctypes.byref(ljid), demo, channel, gain, ctypes.byref(ad0), ctypes.byref(ad1))
if ecode != 0: raise U12Exception(ecode)
return {"idnum":ljid.value, "overVoltage":ad0.value, "voltage":ad1.value}
else:
# Bits 6-4: PGA for 1st Channel
# Bits 3-0: MUX command for 1st Channel
channel0PGAMUX = ( ( gain & 7 ) << 4)
channel0PGAMUX += channel-8 if channel > 7 else channel+8
results = self.rawAISample(channel0PGAMUX = channel0PGAMUX)
return {"idnum" : self.id, "overVoltage" : int(results['PGAOvervoltage']), 'voltage' : results['Channel0']}
def eAnalogOut(self, analogOut0, analogOut1, idNum = None, demo=0):
"""
Name: U12.eAnalogOut(analogOut0, analogOut1, idNum = None, demo=0)
Args: See section 4.2 of the User's Guide
Desc: This is a simplified version of AOUpdate. Sets the voltage of both analog outputs.
>>> import u12
>>> d = u12.U12()
>>> d.eAnalogOut(2, 2)
{'idnum': 1}
"""
if idNum is None:
idNum = self.id
if ON_WINDOWS:
ljid = ctypes.c_long(idNum)
ecode = staticLib.EAnalogOut(ctypes.byref(ljid), demo, ctypes.c_float(analogOut0), ctypes.c_float(analogOut1))
if ecode != 0: raise U12Exception(ecode)
return {"idnum":ljid.value}
else:
if analogOut0 < 0:
analogOut0 = self.pwmAVoltage
if analogOut1 < 0:
analogOut1 = self.pwmBVoltage
self.rawCounterPWMDIO(PWMA = analogOut0, PWMB = analogOut1)
self.pwmAVoltage = analogOut0
self.pwmBVoltage = analogOut1
return {"idnum": self.id}
def eCount(self, idNum = None, demo = 0, resetCounter = 0):
"""
Name: U12.eCount(idNum = None, demo = 0, resetCounter = 0)
Args: See section 4.3 of the User's Guide
Desc: This is a simplified version of Counter. Reads & resets the counter (CNT).
>>> import u12
>>> d = u12.U12()
>>> d.eCount()
{'count': 1383596032.0, 'ms': 251487257.0}
"""
# Check id num
if idNum is None:
idNum = self.id
if ON_WINDOWS:
ljid = ctypes.c_long(idNum)
count = ctypes.c_double()
ms = ctypes.c_double()
ecode = staticLib.ECount(ctypes.byref(ljid), demo, resetCounter, ctypes.byref(count), ctypes.byref(ms))
if ecode != 0: raise U12Exception(ecode)
return {"idnum":ljid.value, "count":count.value, "ms":ms.value}
else:
results = self.rawCounter( ResetCounter = resetCounter)
return {"idnum":self.id, "count":results['Counter'], "ms": (time() * 1000)}
def eDigitalIn(self, channel, idNum = None, demo = 0, readD=0):
"""
Name: U12.eDigitalIn(channel, idNum = None, demo = 0, readD=0)
Args: See section 4.4 of the User's Guide
Desc: This is a simplified version of DigitalIO that reads the state of
one digital input. Also configures the requested pin to input and
leaves it that way.
>>> import u12
>>> d = u12.U12()
>>> d.eDigitalIn(0)
{'state': 0, 'idnum': 1}
"""
# Check id num
if idNum is None:
idNum = self.id
if ON_WINDOWS:
ljid = ctypes.c_long(idNum)
state = ctypes.c_long(999)
ecode = staticLib.EDigitalIn(ctypes.byref(ljid), demo, channel, readD, ctypes.byref(state))
if ecode != 0: raise U12Exception(ecode)
return {"idnum":ljid.value, "state":state.value}
else:
oldstate = self.rawDIO()
if readD:
if channel > 7:
channel = channel-7
direction = BitField(rawByte = oldstate['D15toD8Directions'])
direction[7-channel] = 1
results = self.rawDIO(D15toD8Directions = direction, UpdateDigital = True)
state = results["D15toD8States"][7-channel]
else:
direction = BitField(rawByte = oldstate['D7toD0Directions'])
direction[7-channel] = 1
results = self.rawDIO(D7ToD0Directions = direction, UpdateDigital = True)
state = results["D15toD8States"][7-channel]
else:
results = self.rawDIO(IO3toIO0DirectionsAndStates = 255, UpdateDigital = True)
state = results["IO3toIO0States"][3-channel]
return {"idnum" : self.id, "state" : state}
def eDigitalOut(self, channel, state, idNum = None, demo = 0, writeD=0):
"""
Name: U12.eDigitalOut(channel, state, idNum = None, demo = 0, writeD=0)
Args: See section 4.5 of the User's Guide
Desc: This is a simplified version of DigitalIO that sets/clears the
state of one digital output. Also configures the requested pin to
output and leaves it that way.
>>> import u12
>>> d = u12.U12()
>>> d.eDigitalOut(0, 1)
{idnum': 1}
"""
# Check id num
if idNum is None:
idNum = self.id
if ON_WINDOWS:
ljid = ctypes.c_long(idNum)
ecode = staticLib.EDigitalOut(ctypes.byref(ljid), demo, channel, writeD, state)
if ecode != 0: raise U12Exception(ecode)
return {"idnum":ljid.value}
else:
oldstate = self.rawDIO()
if writeD:
if channel > 7:
channel = channel-7
direction = BitField(rawByte = int(oldstate['D15toD8Directions']))
direction[7-channel] = 0
states = BitField(rawByte = int(oldstate['D15toD8States']))
states[7-channel] = state
self.rawDIO(D15toD8Directions = direction, D15toD8States = state, UpdateDigital = True)
else:
direction = BitField(rawByte = int(oldstate['D7toD0Directions']))
direction[7-channel] = 0
states = BitField(rawByte = int(oldstate['D7toD0States']))
states[7-channel] = state
self.rawDIO(D7toD0Directions = direction, D7toD0States = states, UpdateDigital = True)
else:
bf = BitField()
bf[7-(channel+4)] = 0
bf[7-channel] = state
self.rawDIO(IO3toIO0DirectionsAndStates = bf, UpdateDigital = True)
return {"idnum" : self.id}
def aiSample(self, numChannels, channels, idNum=None, demo=0, stateIOin=0, updateIO=0, ledOn=0, gains=[0, 0, 0, 0], disableCal=0):
"""
Name: U12.aiSample(channels, idNum=None, demo=0, stateIOin=0, updateIO=0, ledOn=0, gains=[0, 0, 0, 0], disableCal=0)
Args: See section 4.6 of the User's Guide
Desc: Reads the voltages from 1,2, or 4 analog inputs. Also controls/reads the 4 IO ports.
>>> dev = U12()
>>> dev.aiSample(2, [0, 1])
{'stateIO': [0, 0, 0, 0], 'overVoltage': 0, 'idnum': 1, 'voltages': [1.4208984375, 1.4306640625]}
"""
# Check id num
if idNum is None:
idNum = self.id
idNum = ctypes.c_long(idNum)
# Check to make sure that everything is checked
if not isIterable(channels): raise TypeError("channels must be iterable")
if not isIterable(gains): raise TypeError("gains must be iterable")
if len(channels) < numChannels: raise ValueError("channels must have atleast numChannels elements")
if len(gains) < numChannels: raise ValueError("gains must have atleast numChannels elements")
# Convert lists to arrays and create other ctypes
channelsArray = listToCArray(channels, ctypes.c_long)
gainsArray = listToCArray(gains, ctypes.c_long)
overVoltage = ctypes.c_long(999)
longArrayType = (ctypes.c_long * 4)
floatArrayType = (ctypes.c_float * 4)
voltages = floatArrayType(0, 0, 0, 0)
stateIOin = ctypes.c_long(stateIOin)
ecode = staticLib.AISample(ctypes.byref(idNum), demo, ctypes.byref(stateIOin), updateIO, ledOn, numChannels, ctypes.byref(channelsArray), ctypes.byref(gainsArray), disableCal, ctypes.byref(overVoltage), ctypes.byref(voltages))
if ecode != 0: raise U12Exception(ecode)
return {"idnum":idNum.value, "stateIO":stateIOin.value, "overVoltage":overVoltage.value, "voltages":voltages[0:numChannels]}
def aiBurst(self, numChannels, channels, scanRate, numScans, idNum=None, demo=0, stateIOin=0, updateIO=0, ledOn=0, gains=[0, 0, 0, 0], disableCal=0, triggerIO=0, triggerState=0, timeout=1, transferMode=0):
"""
Name: U12.aiBurst(numChannels, channels, scanRate, numScans, idNum=None, demo=0, stateIOin=[0, 0, 0, 0], updateIO=0, ledOn=0, gains=[0, 0, 0, 0], disableCal=0, triggerIO=0, triggerState=0, timeout=1, transferMode=0)
Args: See section 4.7 of the User's Guide
Desc: Reads a specified number of scans (up to 4096) at a specified scan rate (up to 8192 Hz) from 1,2, or 4 analog inputs
>>> dev = U12()
>>> dev.aiBurst(1, [0], 400, 10)
{'overVoltage': 0, 'scanRate': 400.0, 'stateIOout': <u12.c_long_Array_4096 object at 0x00DB4BC0>, 'idnum': 1, 'voltages': <u12.c_float_Array_4096_Array_4 object at 0x00DB4B70>}
"""
# Check id number
if idNum is None:
idNum = self.id
idNum = ctypes.c_long(idNum)
# check list sizes
if len(channels) < numChannels: raise ValueError("channels must have atleast numChannels elements")
if len(gains) < numChannels: raise ValueError("gains must have atleast numChannels elements")
# Convert lists to arrays and create other ctypes
channelsArray = listToCArray(channels, ctypes.c_long)
gainsArray = listToCArray(gains, ctypes.c_long)
scanRate = ctypes.c_float(scanRate)
pointerArray = (ctypes.c_void_p * 4)
arr4096_type = ctypes.c_float * 4096
voltages_type = arr4096_type * 4
voltages = voltages_type()
stateIOout = (ctypes.c_long * 4096)()
overVoltage = ctypes.c_long(999)
ecode = staticLib.AIBurst(ctypes.byref(idNum), demo, stateIOin, updateIO, ledOn, numChannels, ctypes.byref(channelsArray), ctypes.byref(gainsArray), ctypes.byref(scanRate), disableCal, triggerIO, triggerState, numScans, timeout, ctypes.byref(voltages), ctypes.byref(stateIOout), ctypes.byref(overVoltage), transferMode)
if ecode != 0: raise U12Exception(ecode)
return {"idnum":idNum.value, "scanRate":scanRate.value, "voltages":voltages, "stateIOout":stateIOout, "overVoltage":overVoltage.value}
def aiStreamStart(self, numChannels, channels, scanRate, idNum=None, demo=0, stateIOin=0, updateIO=0, ledOn=0, gains=[0, 0, 0, 0], disableCal=0, readCount=0):
"""
Name: U12.aiStreamStart(numChannels, channels, scanRate, idNum=None, demo=0, stateIOin=0, updateIO=0, ledOn=0, gains=[0, 0, 0, 0], disableCal=0, readCount=0)
Args: See section 4.8 of the User's Guide
Desc: Starts a hardware timed continuous acquisition
>>> dev = U12()
>>> dev.aiStreamStart(1, [0], 200)
{'scanRate': 200.0, 'idnum': 1}
"""
# Configure return type
staticLib.AIStreamStart.restype = ctypes.c_long
# check list sizes
if len(channels) < numChannels: raise ValueError("channels must have atleast numChannels elements")
if len(gains) < numChannels: raise ValueError("gains must have atleast numChannels elements")
#if len(stateIOin) < 4: raise ValueError("stateIOin must have atleast 4 elements")
# Check id number
if idNum is None:
idNum = self.id
idNum = ctypes.c_long(idNum)
# Convert lists to arrays and create other ctypes
channelsArray = listToCArray(channels, ctypes.c_long)
gainsArray = listToCArray(gains, ctypes.c_long)
scanRate = ctypes.c_float(scanRate)
ecode = staticLib.AIStreamStart(ctypes.byref(idNum), demo, stateIOin, updateIO, ledOn, numChannels, ctypes.byref(channelsArray), ctypes.byref(gainsArray), ctypes.byref(scanRate), disableCal, 0, readCount)
if ecode != 0: raise U12Exception(ecode) # TODO: Switch this out for exception
# The ID number must be saved for AIStream
self.id = idNum.value
self.streaming = True
return {"idnum":idNum.value, "scanRate":scanRate.value}
def aiStreamRead(self, numScans, localID=None, timeout=1):
"""
Name: U12.aiStreamRead(numScans, localID=None, timeout=1)
Args: See section 4.9 of the User's Guide
Desc: Waits for a specified number of scans to be available and reads them.
>>> dev = U12()
>>> dev.aiStreamStart(1, [0], 200)
>>> dev.aiStreamRead(10)
{'overVoltage': 0, 'ljScanBacklog': 0, 'stateIOout': <u12.c_long_Array_4096 object at 0x00DF4AD0>, 'reserved': 0, 'voltages': <u12.c_float_Array_4096_Array_4 object at 0x00DF4B20>}
"""
# Check to make sure that we are streaming
if not self.streaming:
raise U12Exception(-1, "Streaming has not started")
# Check id number
if localID is None:
localID = self.id
# Create arrays and other ctypes
arr4096_type = ctypes.c_float * 4096
voltages_type = arr4096_type * 4
voltages = voltages_type()
stateIOout = (ctypes.c_long * 4096)()
reserved = ctypes.c_long(0)
ljScanBacklog = ctypes.c_long(99999)
overVoltage = ctypes.c_long(999)
ecode = staticLib.AIStreamRead(localID, numScans, timeout, ctypes.byref(voltages), ctypes.byref(stateIOout), ctypes.byref(reserved), ctypes.byref(ljScanBacklog), ctypes.byref(overVoltage))
if ecode != 0: raise U12Exception(ecode) # TODO: Switch this out for exception
return {"voltages":voltages, "stateIOout":stateIOout, "reserved":reserved.value, "ljScanBacklog":ljScanBacklog.value, "overVoltage":overVoltage.value}
def aiStreamClear(self, localID=None):
"""
Name: U12.aiClear()
Args: See section 4.10 of the User's Guide
Desc: This function stops the continuous acquisition. It should be called once when finished with the stream.
>>> dev = U12()
>>> dev.aiStreamStart(1, [0], 200)
>>> dev.aiStreamRead(10)
>>> dev.aiStreamClear()
"""
# Check to make sure that we are streaming
if not self.streaming:
raise U12Exception(-1, "Streaming has not started")
# Check id number
if localID is None:
localID = self.id
ecode = staticLib.AIStreamClear(localID)
if ecode != 0: raise U12Exception(ecode) # TODO: Switch this out for exception
def aoUpdate(self, idNum=None, demo=0, trisD=None, trisIO=None, stateD=None, stateIO=None, updateDigital=0, resetCounter=0, analogOut0=0, analogOut1=0):
"""
Name: U12.aoUpdate()
Args: See section 4.11 of the User's Guide
Desc: Sets the voltages of the analog outputs. Also controls/reads all 20 digital I/O and the counter.
>>> dev = U12()
>>> dev.aoUpdate()
>>> {'count': 2, 'stateIO': 3, 'idnum': 1, 'stateD': 0}
"""
# Check id number
if idNum is None:
idNum = self.id
idNum = ctypes.c_long(idNum)
# Check tris and state arguments
if updateDigital > 0:
if trisD is None: raise ValueError("keyword argument trisD must be set")
if trisIO is None: raise ValueError("keyword argument trisIO must be set")
if stateD is None: raise ValueError("keyword argument stateD must be set")
if stateIO is None: raise ValueError("keyword argument stateIO must be set")
# Create ctypes
if stateD is None: stateD = ctypes.c_long(0)
else: stateD = ctypes.c_long(stateD)
if stateIO is None: stateIO = ctypes.c_long(0)
else: stateIO = ctypes.c_long(stateIO)
count = ctypes.c_ushort(999)
# Create arrays and other ctypes
ecode = staticLib.AOUpdate(ctypes.byref(idNum), demo, trisD, trisIO, ctypes.byref(stateD), ctypes.byref(stateIO), updateDigital, resetCounter, ctypes.byref(count), ctypes.c_float(analogOut0), ctypes.c_float(analogOut1))
if ecode != 0: raise U12Exception(ecode) # TODO: Switch this out for exception
return {"idnum":idNum.value, "stateD":stateD.value, "stateIO":stateIO.value, "count":count.value}
def asynchConfig(self, fullA, fullB, fullC, halfA, halfB, halfC, idNum=None, demo=None, timeoutMult=1, configA=0, configB=0, configTE=0):
"""
Name: U12.asynchConfig(fullA, fullB, fullC, halfA, halfB, halfC, idNum=None, demo=None, timeoutMult=1, configA=0, configB=0, configTE=0)
Args: See section 4.12 of the User's Guide
Desc: Requires firmware V1.1 or higher. This function writes to the asynch registers and sets the direction of the D lines (input/output) as needed.
>>> dev = U12()
>>> dev.asynchConfig(96,1,1,22,2,1)
>>> {'idNum': 1}
"""
#Check id number
if idNum is None:
idNum = self.id
idNum = ctypes.c_long(idNum)
ecode = staticLib.AsynchConfig(ctypes.byref(idNum), demo, timeoutMult, configA, configB, configTE, fullA, fullB, fullC, halfA, halfB, halfC)
if ecode != 0: raise U12Exception(ecode) # TODO: Switch this out for exception
return {"idNum":idNum.value}
def asynch(self, baudrate, data, idNum=None, demo=0, portB=0, enableTE=0, enableTO=0, enableDel=0, numWrite=0, numRead=0):
"""
Name: U12.asynchConfig(fullA, fullB, fullC, halfA, halfB, halfC, idNum=None, demo=None, timeoutMult=1, configA=0, configB=0, configTE=0)
Args: See section 4.13 of the User's Guide
Desc: Requires firmware V1.1 or higher. This function writes to the asynch registers and sets the direction of the D lines (input/output) as needed.
>>> dev = U12()
>>> dev.asynch(96,1,1,22,2,1)
>>> dev.asynch(19200, [0, 0])
>>> {'data': <u12.c_long_Array_18 object at 0x00DEFB70>, 'idnum': <type 'long'>}
"""
#Check id number
if idNum is None:
idNum = self.id
idNum = ctypes.c_long(idNum)
# Check size of data
if len(data) > 18: raise ValueError("data can not be larger than 18 elements")
# Make data 18 elements large
dataArray = [0] * 18
for i in range(0, len(data)):
dataArray[i] = data[i]
print dataArray
dataArray = listToCArray(dataArray, ctypes.c_long)
ecode = staticLib.Asynch(ctypes.byref(idNum), demo, portB, enableTE, enableTO, enableDel, baudrate, numWrite, numRead, ctypes.byref(dataArray))
if ecode != 0: raise U12Exception(ecode) # TODO: Switch this out for exception
return {"idnum":long, "data":dataArray}
GainMapping = [ 1.0, 2.0, 4.0, 5.0, 8.0, 10.0, 16.0, 20.0 ]
def bitsToVolts(self, chnum, chgain, bits):
"""
Name: U12.bitsToVolts(chnum, chgain, bits)
Args: See section 4.14 of the User's Guide
Desc: Converts a 12-bit (0-4095) binary value into a LabJack voltage. No hardware communication is involved.
>>> dev = U12()
>>> dev.bitsToVolts(0, 0, 2662)
>>> {'volts': 2.998046875}
"""
if ON_WINDOWS:
volts = ctypes.c_float()
ecode = staticLib.BitsToVolts(chnum, chgain, bits, ctypes.byref(volts))
if ecode != 0: print ecode
return volts.value
else:
if chnum < 8:
return ( float(bits) * 20.0 / 4096.0 ) - 10.0
else:
volts = ( float(bits) * 40.0 / 4096.0 ) - 20.0
return volts / self.GainMapping[chgain]
def voltsToBits(self, chnum, chgain, volts):
"""
Name: U12.voltsToBits(chnum, chgain, bits)
Args: See section 4.15 of the User's Guide
Desc: Converts a voltage to it's 12-bit (0-4095) binary representation. No hardware communication is involved.
>>> dev = U12()
>>> dev.voltsToBits(0, 0, 3)
>>> {'bits': 2662}
"""
if ON_WINDOWS:
bits = ctypes.c_long(999)
ecode = staticLib.VoltsToBits(chnum, chgain, ctypes.c_float(volts), ctypes.byref(bits))
if ecode != 0: raise U12Exception(ecode)
return bits.value
else:
pass
#*bits = RoundFL((volts+10.0F)/(20.0F/4096.0F));
def counter(self, idNum=None, demo=0, resetCounter=0, enableSTB=1):
"""
Name: U12.counter(idNum=None, demo=0, resetCounter=0, enableSTB=1)
Args: See section 4.15 of the User's Guide
Desc: Converts a voltage to it's 12-bit (0-4095) binary representation. No hardware communication is involved.
>>> dev = U12()
>>> dev.counter(0, 0, 3)
>>> {'bits': 2662}
"""
#Check id number
if idNum is None:
idNum = self.id
idNum = ctypes.c_long(idNum)
# Create ctypes
stateD = ctypes.c_long(999)
stateIO = ctypes.c_long(999)
count = ctypes.c_ulong(999)
print idNum
ecode = staticLib.Counter(ctypes.byref(idNum), demo, ctypes.byref(stateD), ctypes.byref(stateIO), resetCounter, enableSTB, ctypes.byref(count))
if ecode != 0: raise U12Exception(ecode)
return {"idnum":idNum.value, "stateD": stateD.value, "stateIO":stateIO.value, "count":count.value}
def digitalIO(self, idNum=None, demo=0, trisD=None, trisIO=None, stateD=None, stateIO=None, updateDigital=0):
"""
Name: U12.digitalIO(idNum=None, demo=0, trisD=None, trisIO=None, stateD=None, stateIO=None, updateDigital=0)
Args: See section 4.17 of the User's Guide
Desc: Reads and writes to all 20 digital I/O.
>>> dev = U12()
>>> dev.digitalIO()
>>> {'stateIO': 0, 'stateD': 0, 'idnum': 1, 'outputD': 0, 'trisD': 0}
"""
#Check id number
if idNum is None:
idNum = self.id
idNum = ctypes.c_long(idNum)
# Check tris and state parameters
if updateDigital > 0:
if trisD is None: raise ValueError("keyword argument trisD must be set")
if trisIO is None: raise ValueError("keyword argument trisIO must be set")
if stateD is None: raise ValueError("keyword argument stateD must be set")
if stateIO is None: raise ValueError("keyword argument stateIO must be set")
# Create ctypes
if trisD is None: trisD = ctypes.c_long(999)
else:trisD = ctypes.c_long(trisD)
if stateD is None:stateD = ctypes.c_long(999)
else: stateD = ctypes.c_long(stateD)
if stateIO is None: stateIO = ctypes.c_long(0)
else: stateIO = ctypes.c_long(stateIO)
outputD = ctypes.c_long(999)
# Check trisIO
if trisIO is None: trisIO = 0
ecode = staticLib.DigitalIO(ctypes.byref(idNum), demo, ctypes.byref(trisD), trisIO, ctypes.byref(stateD), ctypes.byref(stateIO), updateDigital, ctypes.byref(outputD))
if ecode != 0: raise U12Exception(ecode)
return {"idnum":idNum.value, "trisD":trisD.value, "stateD":stateD.value, "stateIO":stateIO.value, "outputD":outputD.value}
def getDriverVersion(self):
"""
Name: U12.getDriverVersion()
Args: See section 4.18 of the User's Guide
Desc: Returns the version number of ljackuw.dll. No hardware communication is involved.
>>> dev = U12()
>>> dev.getDriverVersion()
>>> 1.21000003815
"""
staticLib.GetDriverVersion.restype = ctypes.c_float
return staticLib.GetDriverVersion()
def getFirmwareVersion(self, idNum=None):
"""
Name: U12.getErrorString(idnum=None)
Args: See section 4.20 of the User's Guide
Desc: Retrieves the firmware version from the LabJack's processor
>>> dev = U12()
>>> dev.getFirmwareVersion()
>>> Unkown error
"""
# Check ID number
if idNum is None: idNum = self.id
idNum = ctypes.c_long(idNum)
staticLib.GetFirmwareVersion.restype = ctypes.c_float
firmware = staticLib.GetFirmwareVersion(ctypes.byref(idNum))
if firmware > 512: raise U12Exception(firmware-512)
return {"idnum" : idNum.value, "firmware" : firmware}
def getWinVersion(self):
"""
Name: U12.getErrorString()
Args: See section 4.21 of the User's Guide
Desc: Uses a Windows API function to get the OS version
>>> dev = U12()
>>> dev.getWinVersion()
>>> {'majorVersion': 5L, 'minorVersion': 1L, 'platformID': 2L, 'buildNumber': 2600L, 'servicePackMajor': 2L, 'servicePackMinor': 0L}
"""
# Create ctypes
majorVersion = ctypes.c_ulong()
minorVersion = ctypes.c_ulong()
buildNumber = ctypes.c_ulong()
platformID = ctypes.c_ulong()
servicePackMajor = ctypes.c_ulong()
servicePackMinor = ctypes.c_ulong()
ecode = staticLib.GetWinVersion(ctypes.byref(majorVersion), ctypes.byref(minorVersion), ctypes.byref(buildNumber), ctypes.byref(platformID), ctypes.byref(servicePackMajor), ctypes.byref(servicePackMinor))
if ecode != 0: raise U12Exception(ecode)
return {"majorVersion":majorVersion.value, "minorVersion":minorVersion.value, "buildNumber":buildNumber.value, "platformID":platformID.value, "servicePackMajor":servicePackMajor.value, "servicePackMinor":servicePackMinor.value}
def listAll(self):
"""
Name: U12.listAll()
Args: See section 4.22 of the User's Guide
Desc: Searches the USB for all LabJacks, and returns the serial number and local ID for each
>>> dev = U12()
>>> dev.listAll()
>>> {'serialnumList': <u12.c_long_Array_127 object at 0x00E2AD50>, 'numberFound': 1, 'localIDList': <u12.c_long_Array_127 object at 0x00E2ADA0>}
"""
# Create arrays and ctypes
productIDList = listToCArray([0]*127, ctypes.c_long)
serialnumList = listToCArray([0]*127, ctypes.c_long)
localIDList = listToCArray([0]*127, ctypes.c_long)
powerList = listToCArray([0]*127, ctypes.c_long)
arr127_type = ctypes.c_long * 127
calMatrix_type = arr127_type * 20
calMatrix = calMatrix_type()
reserved = ctypes.c_long()
numberFound = ctypes.c_long()
ecode = staticLib.ListAll(ctypes.byref(productIDList), ctypes.byref(serialnumList), ctypes.byref(localIDList), ctypes.byref(powerList), ctypes.byref(calMatrix), ctypes.byref(numberFound), ctypes.byref(reserved), ctypes.byref(reserved))
if ecode != 0: raise U12Exception(ecode)
return {"serialnumList": serialnumList, "localIDList":localIDList, "numberFound":numberFound.value}
def localID(self, localID, idNum=None):
"""
Name: U12.localID(localID, idNum=None)
Args: See section 4.23 of the User's Guide
Desc: Changes the local ID of a specified LabJack
>>> dev = U12()
>>> dev.localID(1)
>>> {'idnum':1}
"""
#Check id number
if idNum is None:
idNum = self.id
idNum = ctypes.c_long(idNum)
ecode = staticLib.LocalID(ctypes.byref(idNum), localID)
if ecode != 0: raise U12Exception(ecode)
return {"idnum":idNum.value}
def noThread(self, noThread, idNum=None):
"""
Name: U12.localID(noThread, idNum=None)
Args: See section 4.24 of the User's Guide
Desc: This function is needed when interfacing TestPoint to the LabJack DLL on Windows 98/ME
>>> dev = U12()
>>> dev.noThread(1)
>>> {'idnum':1}
"""
#Check id number
if idNum is None:
idNum = self.id
idNum = ctypes.c_long(idNum)
ecode = staticLib.NoThread(ctypes.byref(idNum), noThread)
if ecode != 0: raise U12Exception(ecode)
return {"idnum":idNum.value}
def pulseOut(self, bitSelect, numPulses, timeB1, timeC1, timeB2, timeC2, idNum=None, demo=0, lowFirst=0):
"""
Name: U12.pulseOut(bitSelect, numPulses, timeB1, timeC1, timeB2, timeC2, idNum=None, demo=0, lowFirst=0)
Args: See section 4.25 of the User's Guide
Desc: This command creates pulses on any/all of D0-D7
>>> dev = U12()
>>> dev.pulseOut(0, 1, 1, 1, 1, 1)
>>> {'idnum':1}
"""
#Check id number
if idNum is None:
idNum = self.id
idNum = ctypes.c_long(idNum)
ecode = staticLib.PulseOut(ctypes.byref(idNum), demo, lowFirst, bitSelect, numPulses, timeB1, timeC1, timeB2, timeC2)
if ecode != 0: raise U12Exception(ecode)
return {"idnum":idNum.value}
def pulseOutStart(self, bitSelect, numPulses, timeB1, timeC1, timeB2, timeC2, idNum=None, demo=0, lowFirst=0):
"""
Name: U12.pulseOutStart(bitSelect, numPulses, timeB1, timeC1, timeB2, timeC2, idNum=None, demo=0, lowFirst=0)
Args: See section 4.26 of the User's Guide
Desc: PulseOutStart and PulseOutFinish are used as an alternative to PulseOut (See PulseOut for more information)
>>> dev = U12()
>>> dev.pulseOutStart(0, 1, 1, 1, 1, 1)
>>> {'idnum':1}
"""
#Check id number
if idNum is None:
idNum = self.id
idNum = ctypes.c_long(idNum)
ecode = staticLib.PulseOutStart(ctypes.byref(idNum), demo, lowFirst, bitSelect, numPulses, timeB1, timeC1, timeB2, timeC2)
if ecode != 0: raise U12Exception(ecode)
return {"idnum":idNum.value}
def pulseOutFinish(self, timeoutMS, idNum=None, demo=0):
"""
Name: U12.pulseOutFinish(timeoutMS, idNum=None, demo=0)
Args: See section 4.27 of the User's Guide
Desc: See PulseOutStart for more information
>>> dev = U12()
>>> dev.pulseOutStart(0, 1, 1, 1, 1, 1)
>>> dev.pulseOutFinish(100)
>>> {'idnum':1}
"""
#Check id number
if idNum is None:
idNum = self.id
idNum = ctypes.c_long(idNum)
ecode = staticLib.PulseOutFinish(ctypes.byref(idNum), demo, timeoutMS)
if ecode != 0: raise U12Exception(ecode)
return {"idnum":idNum.value}
def pulseOutCalc(self, frequency):
"""
Name: U12.pulseOutFinish(frequency)
Args: See section 4.28 of the User's Guide
Desc: This function can be used to calculate the cycle times for PulseOut or PulseOutStart.
>>> dev = U12()
>>> dev.pulseOutCalc(100)
>>> {'frequency': 100.07672882080078, 'timeB': 247, 'timeC': 1}
"""
# Create ctypes
frequency = ctypes.c_float(frequency)
timeB = ctypes.c_long(0)
timeC = ctypes.c_long(0)
ecode = staticLib.PulseOutCalc(ctypes.byref(frequency), ctypes.byref(timeB), ctypes.byref(timeC))
if ecode != 0: raise U12Exception(ecode)
return {"frequency":frequency.value, "timeB":timeB.value, "timeC":timeC.value}
def reEnum(self, idNum=None):
"""
Name: U12.reEnum(idNum=None)
Args: See section 4.29 of the User's Guide
Desc: Causes the LabJack to electrically detach from and re-attach to the USB so it will re-enumerate
>>> dev = U12()
>>> dev.reEnum()
>>> {'idnum': 1}
"""
#Check id number
if idNum is None:
idNum = self.id
idNum = ctypes.c_long(idNum)
ecode = staticLib.ReEnum(ctypes.byref(idNum))
if ecode != 0: raise U12Exception(ecode)
return {"idnum":idNum.value}
def reset(self, idNum=None):
"""
Name: U12.reset(idNum=None)
Args: See section 4.30 of the User's Guide
Desc: Causes the LabJack to reset after about 2 seconds
>>> dev = U12()
>>> dev.reset()
>>> {'idnum': 1}
"""
#Check id number
if idNum is None:
idNum = self.id
idNum = ctypes.c_long(idNum)
ecode = staticLib.Reset(ctypes.byref(idNum))
if ecode != 0: raise U12Exception(ecode)
return {"idnum":idNum.value}
def resetLJ(self, idNum=None):
"""
Name: U12.resetLJ(idNum=None)
Args: See section 4.30 of the User's Guide
Desc: Causes the LabJack to reset after about 2 seconds
>>> dev = U12()
>>> dev.resetLJ()
>>> {'idnum': 1}
"""
return reset(idNum)
def sht1X(self, idNum=None, demo=0, softComm=0, mode=0, statusReg=0):
"""
Name: U12.sht1X(idNum=None, demo=0, softComm=0, mode=0, statusReg=0)
Args: See section 4.31 of the User's Guide
Desc: This function retrieves temperature and/or humidity readings from an SHT1X sensor.
>>> dev = U12()
>>> dev.sht1X()
>>> {'tempC': 24.69999885559082, 'rh': 39.724445343017578, 'idnum': 1, 'tempF': 76.459999084472656}
"""
#Check id number
if idNum is None:
idNum = self.id
idNum = ctypes.c_long(idNum)
# Create ctypes
tempC = ctypes.c_float(0)
tempF = ctypes.c_float(0)
rh = ctypes.c_float(0)
ecode = staticLib.SHT1X(ctypes.byref(idNum), demo, softComm, mode, statusReg, ctypes.byref(tempC), ctypes.byref(tempF), ctypes.byref(rh))
if ecode != 0: raise U12Exception(ecode)
return {"idnum":idNum.value, "tempC":tempC.value, "tempF":tempF.value, "rh":rh.value}
def shtComm(self, numWrite, numRead, datatx, idNum=None, softComm=0, waitMeas=0, serialReset=0, dataRate=0):
"""
Name: U12.shtComm(numWrite, numRead, datatx, idNum=None, softComm=0, waitMeas=0, serialReset=0, dataRate=0)
Args: See section 4.32 of the User's Guide
Desc: Low-level public function to send and receive up to 4 bytes to from an SHT1X sensor
"""
#Check id number
if idNum is None:
idNum = self.id
idNum = ctypes.c_long(idNum)
# Check size of datatx
if len(datatx) != 4: raise ValueError("datatx must have exactly 4 elements")
# Create ctypes
datatx = listToCArray(datatx, ctypes.c_ubyte)
datarx = (ctypes.c_ubyte * 4)((0) * 4)
ecode = staticLib.SHTComm(ctypes.byref(idNum), softComm, waitMeas, serialReset, dataRate, numWrite, numRead, ctypes.byref(datatx), ctypes.byref(datarx))
if ecode != 0: raise U12Exception(ecode)
return {"idnum":idNum.value, "datarx":datarx}
def shtCRC(self, numWrite, numRead, datatx, datarx, statusReg=0):
"""
Name: U12.shtCRC(numWrite, numRead, datatx, datarx, statusReg=0)
Args: See section 4.33 of the User's Guide
Desc: Checks the CRC on an SHT1X communication
"""
# Create ctypes
datatx = listToCArray(datatx, ctypes.c_ubyte)
datarx = listToCArray(datarx, ctypes.c_ubyte)
return staticLib.SHTCRC(statusReg, numWrite, numRead, ctypes.byref(datatx), ctypes.byref(datarx))
def synch(self, mode, numWriteRead, data, idNum=None, demo=0, msDelay=0, husDelay=0, controlCS=0, csLine=None, csState=0, configD=0):
"""
Name: U12.synch(mode, numWriteRead, data, idNum=None, demo=0, msDelay=0, husDelay=0, controlCS=0, csLine=None, csState=0, configD=0)
Args: See section 4.35 of the User's Guide
Desc: This function retrieves temperature and/or humidity readings from an SHT1X sensor.
"""
#Check id number
if idNum is None:
idNum = self.id
idNum = ctypes.c_long(idNum)
if controlCS > 0 and csLine is None: raise ValueError("csLine must be specified")
# Make sure data is 18 elements
cData = [0] * 18
for i in range(0, len(data)):
cData[i] = data[i]
cData = listToCArray(cData, ctypes.c_long)
ecode = staticLib.Synch(ctypes.byref(idNum), demo, mode, msDelay, husDelay, controlCS, csLine, csState, configD, numWriteRead, ctypes.byref(cData))
if ecode != 0: raise U12Exception(ecode)
return {"idnum":idNum.value, "data":cData}
def watchdog(self, active, timeout, activeDn, stateDn, idNum=None, demo=0, reset=0):
"""
Name: U12.watchdog(active, timeout, activeDn, stateDn, idNum=None, demo=0, reset=0)
Args: See section 4.35 of the User's Guide
Desc: Controls the LabJack watchdog function.
>>> dev = U12()
>>> dev.watchdog(1, 1, [0, 0, 0], [0, 0, 0])
>>> {'idnum': 1}
"""
#Check id number
if idNum is None:
idNum = self.id
idNum = ctypes.c_long(idNum)
if len(activeDn) is not 3: raise ValueError("activeDn must have 3 elements")
if len(stateDn) is not 3: raise Value("stateDn must have 3 elements")
ecode = staticLib.Watchdog(ctypes.byref(idNum), demo, active, timeout, reset, activeDn[0], activeDn[1], activeDn[2], stateDn[0], stateDn[1], stateDn[2])
if ecode != 0: raise U12Exception(ecode)
return {"idnum":idNum.value}
def readMem(self, address, idnum = None):
"""
Name: U12.readMem(address, idnum=None)
Args: See section 4.36 of the User's Guide
Desc: Reads 4 bytes from a specified address in the LabJack's nonvolatile memory
>>> dev = U12()
>>> dev.readMem(0)
>>> [5, 246, 16, 59]
"""
if address is None:
raise Exception, "Must give an Address."
if idnum is None:
idnum = self.id
ljid = ctypes.c_ulong(idnum)
ad0 = ctypes.c_ulong()
ad1 = ctypes.c_ulong()
ad2 = ctypes.c_ulong()
ad3 = ctypes.c_ulong()
ec = staticLib.ReadMem(ctypes.byref(ljid), ctypes.c_long(address), ctypes.byref(ad3), ctypes.byref(ad2), ctypes.byref(ad1), ctypes.byref(ad0))
if ec != 0: raise U12Exception(ec)
addr = [0] * 4
addr[0] = int(ad3.value & 0xff)
addr[1] = int(ad2.value & 0xff)
addr[2] = int(ad1.value & 0xff)
addr[3] = int(ad0.value & 0xff)
return addr
def writeMem(self, address, data, idnum=None, unlocked=False):
"""
Name: U12.writeMem(self, address, data, idnum=None, unlocked=False)
Args: See section 4.37 of the User's Guide
Desc: Writes 4 bytes to the LabJack's 8,192 byte nonvolatile memory at a specified address.
>>> dev = U12()
>>> dev.writeMem(0, [5, 246, 16, 59])
>>> 1
"""
if address is None or data is None:
raise Exception, "Must give both an Address and data."
if type(data) is not list or len(data) != 4:
raise Exception, "Data must be a list and have a length of 4"
if idnum is None:
idnum = self.id
ljid = ctypes.c_ulong(idnum)
ec = staticLib.WriteMem(ctypes.byref(ljid), int(unlocked), address, data[3] & 0xff, data[2] & 0xff, data[1] & 0xff, data[0] & 0xff)
if ec != 0: raise U12Exception(ec)
return ljid.value
def LJHash(self, hashStr, size):
outBuff = (ctypes.c_char * 16)()
retBuff = ''
staticLib = ctypes.windll.LoadLibrary("ljackuw")
ec = staticLib.LJHash(ctypes.cast(hashStr, ctypes.POINTER(ctypes.c_char)),
size,
ctypes.cast(outBuff, ctypes.POINTER(ctypes.c_char)),
0)
if ec != 0: raise U12Exception(ec)
for i in range(16):
retBuff += outBuff[i]
return retBuff
def isIterable(var):
try:
iter(var)
return True
except:
return False
def listToCArray(list, dataType):
arrayType = dataType * len(list)
array = arrayType()
for i in range(0,len(list)):
array[i] = list[i]
return array
def cArrayToList(array):
list = []
for item in array:
list.append(item)
return list
def getErrorString(errorcode):
"""
Name: U12.getErrorString(errorcode)
Args: See section 4.19 of the User's Guide
Desc: Converts a LabJack errorcode, returned by another function, into a string describing the error. No hardware communication is involved.
>>> dev = U12()
>>> dev.getErrorString(1)
>>> Unkown error
"""
errorString = ctypes.c_char_p(" "*50)
staticLib.GetErrorString(errorcode, errorString)
return errorString.value
def hexWithoutQuotes(l):
""" Return a string listing hex without all the single quotes.
>>> l = range(10)
>>> print hexWithoutQuotes(l)
[0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9]
"""
return str([hex (i) for i in l]).replace("'", "")
| gpl-2.0 | 4,888,426,049,671,642,000 | 37.468813 | 327 | 0.540318 | false |
osonwanne/virgilglobal | portfolio/models.py | 1 | 1641 | from __future__ import unicode_literals
from django.db import models
# Create your models here.
from modelcluster.fields import ParentalKey
from wagtail.wagtailcore.models import Page, Orderable
from wagtail.wagtailcore.fields import RichTextField
from wagtail.wagtailadmin.edit_handlers import FieldPanel, InlinePanel
from wagtail.wagtailimages.edit_handlers import ImageChooserPanel
from wagtail.wagtailsearch import index
class PortfolioIndexPage(Page):
intro = RichTextField(blank=True)
content_panels = Page.content_panels + [
FieldPanel('intro', classname="full")
]
class PortfolioPage(Page):
date = models.DateField("Post date")
intro = models.CharField(max_length=250)
body = RichTextField(blank=True)
def main_image(self):
gallery_item = self.gallery_images.first()
if gallery_item:
return gallery_item.image
else:
return None
search_fields = Page.search_fields + [
index.SearchField('intro'),
index.SearchField('body'),
]
content_panels = Page.content_panels + [
FieldPanel('date'),
FieldPanel('intro'),
FieldPanel('body', classname="full"),
InlinePanel('gallery_images', label="Gallery images"),
]
class PortfolioPageGalleryImage(Orderable):
page = ParentalKey(PortfolioPage, related_name='gallery_images')
image = models.ForeignKey(
'wagtailimages.Image', on_delete=models.CASCADE, related_name='+'
)
caption = models.CharField(blank=True, max_length=250)
panels = [
ImageChooserPanel('image'),
FieldPanel('caption'),
] | gpl-3.0 | -8,844,760,692,456,477,000 | 27.310345 | 73 | 0.687386 | false |
brownrout/EECS-337-Recipes | Team4/chinese_transformations.py | 1 | 3471 | chinese_spicy_list = {
'jalapeno' : ['curry paste', 'chili sauce', 'curry paste'],
'wasabi': ['curry paste', 'chili sauce', 'curry paste'],
'tobasco': ['curry paste', 'chili sauce', 'curry paste'],
'hot sauce': ['curry paste', 'chili sauce', 'curry paste'],
'habanero pepper' : ['curry paste', 'chili sauce', 'curry paste'],
}
chinese_spicy_stopwords = ['paste', 'sauce', 'ground', 'canadian-style', 'canadian']
chinese_spices = [
'soy sauce powder',
'five spice',
'ground ginger',
'ground fennel seed',
'ground canelia',
'ground star anise',
'szechuan peppercorns',
'cracked fennel seed',
'whole fennel seed',
'ginger root',
'lemongrass',
'tumeric',
'cloves',
'hot mustard',
'birds eye chili pepper',
'cinnamon sticks',
'kaffir lime leaves'
]
chinese_cheeses = {
'mozzerella': 'rushan',
'cheddar' : 'rushan',
'anejo enchilado' :'rushan',
'asiago':'rushan',
'blue': 'rushan',
'cotija' : 'rushan',
'gruyere' : 'rushan',
'parmesan': 'rushan',
'romano' : 'rushan',
'gouda' : 'rushan',
'raclette': 'rushan',
'swiss': 'rushan',
'cheddar' : 'rushan',
'chevres' : 'rushan',
'edam' : 'rushan',
'monterey jack': 'rushan',
'provolone' : 'rushan',
'butterkase' : 'rushan',
'colby': 'rushan',
'fontina val daosta':'rushan',
'havarti': 'rushan',
'brie' : 'rushan',
'camembert de normandie' :'rushan',
'gorgonzola' : 'rushan',
'limburger' : 'rushan',
'feta': 'rushan',
'munster' : 'rushan',
'neufchatel' : 'rushan',
'queso' : 'rushan',
'burrata' : 'rushan',
'curd' : 'rushan',
'chechil' : 'rushan',
'chura kampo' : 'rushan',
'american cheese' : 'rushan',
'roquefort': 'rushan',
'camembert': 'rushan',
'cotija': 'rushan',
'chevre': 'rushan',
'emmental': 'rushan',
'taleggio': 'rushan',
'parmigiano-reggiano': 'rushan',
'manchego': 'rushan',
'cream cheese' : 'rubing'
}
chinese_sauce_list = {
'yogurt' : 'nai lao',
'gravy' : 'chinese brown gravy'
}
chinese_sauces = [
'soy sauce',
'light soy sauce',
'dark soy sauce',
'mushroom-flavored dark soy sauce',
'seasoned soy sauce',
'kikkoman soy sauce',
'tamari',
'sesame oil',
'rice wine'
]
chinese_meats = {
'steak': ['duck', 'chicken'],
'beef': ['duck', 'chicken'],
'sausage':['duck', 'chicken'],
'tuna' : ['shrimp', 'oyster'],
'anchovies' : ['shrimp', 'salmon'],
'pork' : ['chicken', 'duck'],
'eel' : ['chicken', 'duck'],
'tofu' : ['seitan', 'seitan'],
'bacon' : ['chicken', 'chicken']
}
chinese_vegetables_list = {
'okra' : 'bok choy',
'asparagus' : 'bok choy',
'squash' : 'choy sum',
'corn' : 'baby corn',
'refried beans' : 'green beans',
'chick peas' : 'peking chick peas',
'butternut squash' : 'oriental squash',
'lettuce' : 'celtuce',
'broccoli' : 'kai-lan',
'blueberries' : 'chinese blueberries',
'watermelon' : 'wax gourd',
'arugula' : 'chinese cabbage',
'olives' : 'chinese olives',
'spaghetti squash' : 'string beans',
'penne pasta' : 'noodles',
'spaghetti' :'noodles',
'elbow macaroni' : 'rice',
'farfalle pasta': 'rice',
'macaroni pasta': 'rice',
'macaroni': 'rice',
'elbow pasta' : 'rice',
'pasta' :'rice',
'saucepan': 'wok',
'pan' : 'wok',
'olive oil': 'peanut oil',
}
| mit | -1,479,469,051,572,116,500 | 23.443662 | 84 | 0.551426 | false |
wrightflyer/test | avrread.py | 1 | 27950 | #########################################################################################################
# AVRRead #
# A utility to read the XML files (probably .atdf in fact) in a Studio 6/7 installation and use #
# the data within to define one big structure to overlay the SFR area of any AVR. Just use: #
# #
# python avrread.py -i ATmgea16.atdf #
# #
# or any other .atdf to generat an ATmega16.h (or whatever) header file for use in programming the #
# AVR. In this case don't use "#include <avr/io.h>" but instead just do something like this: #
# #
# #include "ATmega16.h" #
# #
# USE_SFRS(pSFR); #
# #
# int main(void) #
# { #
# pSFR->_DDRB.byte = 0xFF; #
# pSFR->_UCSRB.bit._TXEN = 1; #
# while (1) #
# { #
# pSFR->_PORTB.byte ^= (1 << 3); #
# } #
# } #
# #
# Use the USE_SFR() macro to name a struct pointer variable (like "pSFR") that you then want to #
# use to access the registers in the code. #
# #
# by Cliff Lawson #
# #
# Licence: I need beer - see what you can do! #
#########################################################################################################
# following ("from...") line is useful if you are trying to make Python 3 code run in Python 2
# (however things like "argparse" here means this program is 3.2+ anyway.
# from __future__ import print_function
import sys
import os
import argparse
import xml.etree.ElementTree as et
from collections import namedtuple
# found this on StackOverflow - it simply returns the lowest bit that is set in a byte
def lowestSet(int_type):
low = (int_type & -int_type)
lowBit = -1
while (low):
low >>= 1
lowBit += 1
return(lowBit)
# often find this useful to check the raw command line args
# for n in range(0, len(sys.argv)):
# print(n, sys.argv[n])
# I like argparse - it makes for very clean command line interfaces
parser = argparse.ArgumentParser(description='Read Atmel XML (version 1.3)')
parser.add_argument("-i", "--input", dest='in_fname', help="name of .XML file to read as input")
parser.add_argument("-o", "--output", dest='out_name', help="Name of output file (overides default)")
parser.add_argument("-q", "--quiet", dest='quiet', action="store_true", help="Don't print to console")
parser.add_argument("-v", "--verbose", dest='verbose', action="store_true", help="Show developer info")
parser.add_argument("-a", "--anonymous", dest='anon', action="store_true", help="Use anonymous structs (GCC only)")
parser.add_argument("-n", "--no_union", dest='nounion', action="store_true", help="No unions - just one entry per register")
parser.add_argument("-m", "--multiple", dest='multiple', action="store_true", help="process multiple files")
# my one argument with argparse is that if you run the app without args it doesn't show help info, so
# this will achieve that...
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
# this actually runs the argument parser on sys.argv
args = parser.parse_args()
flist = []
if args.in_fname is not None and os.path.isfile(args.in_fname):
flist.append(args.in_fname)
elif args.multiple is True and args.in_fname is None:
for f in os.listdir("."):
if ".atdf" in f:
flist.append(f)
if len(flist) >= 1:
for fname in flist:
# The following creates an empty list. As the XML is parsed this will be appened()'d too to build a complete
# picture of the AVR layout as a list of dictionaries
mainlist = []
# Assuming the user has given a name then do our thing!
if fname is not None and os.path.isfile(fname):
# the user has the opportunity to use -o to set an output filename but if they haven't used that this
# takes the input .atdf/.xml filename and replaces the extension with ".h" to create the output name
if args.out_name is None:
out_name = os.path.splitext(fname)[0]+".h"
if args.multiple is True or args.quiet is not True:
print("Creating:", out_name)
# ===================================== PART 1 (process the XML) ======================================
# following two lines are the classic way to invoke ElementTree to read an XML then get access to the
# root from which access to all other data then occurs
tree = et.parse(fname)
root = tree.getroot()
# So the first thing I search for is the "memory-segment" entry with attribute name='MAPPED_IO'
# this has the start/length of the SFRs (start is bound to be 0x20, it's the length I need to
# later build the struct that covers the entire SFR region)
io_node = root.find(".//memory-segment[@name='MAPPED_IO']")
sfr_start = io_node.attrib['start']
sfr_size = io_node.attrib['size']
# The "interesting stuff" (as far as this program is concerned) is then the "modules" which are things
# like "UART", "TIMER_COUNTER_1" and so on.
modules = root.find("modules")
# this then iterates over each module found...
for mod in modules.findall("module"):
# rather curiously there are two "modules" called "FUSE" and "LOCKBIT" - I want to ignore them...
if mod.attrib['name'] in ['FUSE', 'LOCKBIT']:
continue
# To keep the user entertained with some output - print the name of each module as I find it...
if not args.quiet:
print("===============", mod.attrib['name'], "===============")
# Now there's a load of data for each "module" that I'm not interested in. All I want are the registers
# and bits and these appear under one or more "register-groups"
rg = mod.find("register-group")
# then in each register group iterate through the individual registers..
for reg in rg.findall("register"):
# for each register pick out the useful bits of information
addr = int(reg.attrib['offset'], 0)
name = reg.attrib['name']
capt = reg.attrib['caption']
if capt is None:
capt = "missing caption"
sz = int(reg.attrib['size'])
# only used for whole registers - some are not full 8 or 16 bits
try:
main_mask = int(reg.attrib['mask'], 0)
except KeyError:
if sz == 1:
main_mask = 0xFF
elif sz == 2:
main_mask = 0xFFFF
# use the following to add extra detail if more than one byte involved
xtra = ""
if sz != 1:
xtra = str(sz) + " bytes"
if not args.quiet:
print(name, "addr=", hex(addr), xtra, "// ", capt)
# Have a look to see if there is a "bitfield" defined for this register
bits = reg.findall("bitfield")
# going to create a list of tuples (eventually sorted and duplicates removed) for any groups of bits
bitlist = []
if len(bits) != 0:
# if there is/are bitfields then work through each entry in turn
for bit in bits:
# int(x, 0) converts "0xNN" into an integer value - unfortunately the XML only holds these
# as a "mask" so if it's bits 3,4,5,6 the mask will be 0x78 and so on - need to process this
# later to get the lowest bit (3 in this example) and the starting bit position. For the
# lowest bit set I found something useful on StackOverflow which is at the start of this file
# for the number of bits set in a mask I found a very clever technique in another answer
# on StackOverflow - you use bit() to convert the mask to a "10101.." string then use count('1')
# on this to find out how many bits there are (as it happens they're always adjacent in fact)
mask = int(bit.attrib['mask'], 0)
try:
captn = bit.attrib['caption']
except KeyError:
captn = "caption missing"
try:
lsb = int(bit.attrib['lsb'])
except KeyError:
lsb = 0
numbits = bin(mask).count('1')
# @@@ need to consider non-adjacent bits in a mask such as 0xB0 (10110000) - e.g. mega16 SM bits
# <thinks> - split the bits in a "multi-mask" at this stage and make multiple 1 bit appends
# rather than trying to unwind this later.
# OK change of plan - because entries like the SM bits have a mask of 0xB0 (10110000) with non-
# adjacent bits then don't just store the low bit and numbits, but iterate through the bitmask now
# and add individual :1 entries for multiple bits (adding the lsb suffix and an incrementing count)
if numbits == 1:
bitinfo = namedtuple("bitinfo", "bitpos name caption")
bitinfo.bitpos = lowestSet(mask)
bitinfo.name = bit.attrib['name']
bitinfo.caption = captn
bitlist.append(bitinfo)
else:
suffix = lsb # this starts at 0 if there is no lsb=
bitpos = 0
while numbits:
while mask & (1 << bitpos) == 0:
bitpos += 1
bitinfo = namedtuple("bitinfo", "bitpos name caption")
bitinfo.bitpos = bitpos
bitpos += 1
bitinfo.name = bit.attrib['name'] + str(suffix)
suffix += 1
bitinfo.caption = captn
bitlist.append(bitinfo)
numbits -= 1
if not args.quiet:
for n in sorted(bitlist, key=lambda x: x.bitpos):
print(n.name, "bit=" + str(n.bitpos), n.caption)
# now we assume we are going to this register/bits as a whole new entry in mainlist[]. However it turns
# out the XML may have several different register/bit definitions in different places for the same
# register - but you can spot this because you have already seen the address used. (BTW this all occurs
# with a register like TIMSK that may have some TMR_0 bits defined under the section for "TIMER 0" but
# then has some more bits defined later under "TIMER 1" and so on)
do_add = 1
# so now we check to see if the address of the register we're currently looking at was already seen
# and recorded in mainlist[]. If it has been then what we need to do is extract the existing "bitlist"
# (which is a list of tuples), then append each new tuple we've just found to this. However that may
# lead to duplicates.
for n in mainlist:
if n['addr'] == addr:
# so pull the "bits" from the existing entry that was found to have the same address
updated_bits = n['bits']
# then append each new bit entry tuple to this..
for entry in bitlist:
do_add = 1
for eb in updated_bits:
if entry.bitpos == eb.bitpos:
do_add = 0
if do_add:
updated_bits.append(entry)
# I'll leave this (one of my development print()s as I found it MOST useful!)
# print("YOIKS!", "now=", sorted(nodups))
# now search the entrie mainlist[] again to find the index (i) of the one where we found the
# existing entry for the same address
for i in range(0, len(mainlist)):
# and when we stumble upon it..
if mainlist[i]['addr'] == addr:
# replace what was there with new details including the sorted, duplicate removed list of bits
mainlist[i] = {'addr': addr, 'name': name, 'size': sz, 'main_mask': main_mask, 'caption': capt, 'bits': sorted(updated_bits, key=lambda x: x.bitpos)}
# as we've updated an existing entry we don't want to add the data as a new one so..
do_add = 0
# if the address has not occurred before then just add the details including the sorted list of bits -
# it sort by default on the first item in the tuple which is the bit position
if do_add:
mainlist.append({'addr': addr, 'name': name, 'size': sz, 'main_mask': main_mask, 'caption': capt, 'bits': sorted(bitlist, key=lambda x: x.bitpos)})
# The order of the "modules" in the XML is arbitrary and does not follow address order so now we sort the mainlist
# of dictionaries using the 'addr' field in each one. Again this clever technique came from Stack Overflow
mainlist = sorted(mainlist, key=lambda k: k['addr'])
# finally entertain the user with something interesting looking (good for debugging too!)
if args.verbose:
print("\n++++++++++ All of that data from XML now stored as.... +++++++++++\n")
for ent in mainlist:
print(hex(ent['addr']), ent)
# ===================================== PART 2 (generate the output) ======================================
# So we arrive here with mainlist[] fully populated with the complete info of all registers/bits stripped
# from the XMLso and the list now ordered by address. Now it's time to generate some output
regidx = 0
# remember the "MAPPED_IO" which was the first thing taken from the XML - this is where we use the sfr_start/size
# we pulled from it at that time
addr = int(sfr_start, 0)
# this is just standard Python file IO - open xxx.h as a writable text file..
hdr = open(out_name, "wt")
# the preamble is a fixed text so write that now...
hdr.write("#include <stdint.h>\n\ntypedef struct {\n")
# now we build a struct that will span sfr_start to sfr_start+sfr_size (remember int(x,0) converts "0xNN" to int)
# Oh and if you are wondering why this is a while() loop and not a for() loop it's because in Python (I found
# out the hard way!) you cannot use "for x in range(start,end)" and the modify x within the loop to skip some values
# the range() builds a list at the very start and x will be set to every member in that list for each iteration
# of the loop - updates to the iteration variable are over-written!
while addr < (int(sfr_start, 0) + int(sfr_size,0)):
# now for each address in the SFR range we see if the next mainlist[] entry has something for it
# first pull the mainlist entries into more readable varaible names:
main_addr = mainlist[regidx]['addr']
byts = int(mainlist[regidx]['size'])
uint_sz = byts * 8
main_mask = int(mainlist[regidx]['main_mask'])
regbits = bin(main_mask).count('1')
name = mainlist[regidx]['name'] # .lower()
caption = mainlist[regidx]['caption']
if main_addr == addr:
# if here then this address has an entry in mainlist[] so now the question is "is it a whole register or
# is it a group of bits?". Whole registers are things like PORTB, ADC, OCR0 and so on, while registers with
# (named) bits are things like UCSRA, TWCR and so on. For a whole register with anonymous bits then
# just generate something like:
#
# union {
# uint8_t reg; // (@ 0x32) Port D Data Register
# struct {
# unsigned int b0:1;
# unsigned int b1:1;
# unsigned int b2:1;
# unsigned int b3:1;
# unsigned int b4:1;
# unsigned int b5:1;
# unsigned int b6:1;
# unsigned int b7:1;
# } bit;
# } _PORTD;
#
# while for a register with named bits generate something like:
#
# union {
# uint8_t reg; // (@ 0x2e) SPI Status Register
# struct {
# int _SPI2X:1; // b0 Double SPI Speed Bit
# int :5; // b1 - unused
# int _WCOL:1; // b6 Write Collision Flag
# int _SPIF:1; // b7 SPI Interrupt Flag
# } bit;
# } _SPSR;
#
#
# If it is a whole register then it might be more than one byte so need to decide to uint8_t, uint16_t
# and so on (I'm hoping they haven't got one defined as 'size':3 because that would lead to uint24_t
# as I just multiply by 8!!)
whole_reg = len(mainlist[regidx]['bits']) == 0
if args.nounion:
if whole_reg:
if regbits == 8 or regbits == 16:
hdr.write("\tuint" + str(uint_sz) + "_t " + name + "; // (@ " + str(hex(addr)) + ") " + caption)
else:
hdr.write("\tunsigned int " + name + ":" + str(regbits) + "; // (@ " + str(hex(addr)) + ") " + caption)
else:
hdr.write("\tstruct { // (@ " + str(hex(addr)) + ") " + caption + "\n")
else:
if regbits == 8 or regbits == 16:
hdr.write("\tunion {\n\t\tuint" + str(uint_sz) + "_t reg; // (@ " + str(hex(addr)) + ") " + caption + "\n\t\tstruct {\n")
else:
hdr.write("\tunion {\n\t\tunsigned int reg:" + str(regbits) + "; // (@ " + str(hex(addr)) + ") " + caption + " (range: 0.." + str((1 << regbits) - 1) + ") \n\t\tstruct {\n")
# now for a whole register just write bN fields for the number of bits there are
if whole_reg and not args.nounion:
for b in range(0, bin(main_mask).count('1')):
hdr.write("\t\t\tunsigned int b" + str(b) + ":1;\n")
else:
# So this is the complicated bit when there are named bits defined
bitpos = 0
for b in mainlist[regidx]['bits']:
# We have tuples like (2, 5, 'FOO') which means FOO is at bit position 2 and spans 5 bits but
# some of the structs have "gaps" that are unused and we need to fill these with padding
# the gap is padded using the following...
if b.bitpos > bitpos:
nskip = b.bitpos - bitpos
hdr.write("\t\t\tunsigned int :" + str(b.bitpos - bitpos) + "; // b" + str(bitpos))
if nskip > 1:
hdr.write("...b" + str(b.bitpos - 1))
hdr.write(" - unused\n")
# and step bitpos on to the bit position of the enrty we're about to write
bitpos = b.bitpos
# then the actual named "FOO:5" entry is created by this...
hdr.write("\t\t\tunsigned int _" + b.name + ":1; // b" + str(b.bitpos) + " " + b.caption + "\n")
bitpos += 1 # b.numbits
if args.nounion:
if not whole_reg:
hdr.write("\t} " + name + ";\n")
else:
hdr.write("\n")
else:
if args.anon:
hdr.write("\t\t};\n\t} _" + name + ";\n")
else:
if uint_sz == 8:
hdr.write("\t\t} bit;\n\t} _" + name + ";\n")
else:
# just assume/handle uint16_t for now..
hdr.write("\t\t} bit;\n")
hdr.write("\t\tstruct {\n")
hdr.write("\t\t\tuint8_t low;\n")
if regbits == 16:
hdr.write("\t\t\tuint8_t high;\n")
else:
hdr.write("\t\t\tunsigned int high:" + str(regbits - 8) + ";\n")
hdr.write("\t\t} halves;\n")
hdr.write("\t} _" + name + ";\n")
# following adds 0 for size:1 entries but is mainly here for multi-byte entries so that addr can be
# stepped on for uint16_t registers and so on
addr += byts - 1
# now step the mainlist[] index on to the next entry
regidx += 1
# this may look "odd" but it prevents regidx trying to index beyond the end of mainlist and setting it
# to 1 is benign as "addr" has already moved on so there's no chance of it matching as it's now "behind"
# (hope that makes sense!)
if regidx >= len(mainlist):
regidx = 1
else:
# this just writes an unused0xNN entry for each byte that has nothing in mainlist[]
hdr.write("\tuint8_t unused" + str(hex(addr)) + ";\n")
addr += 1
# then just finish with the closing part of the struct{} definition and we're all done! :-)
# BTW I wanted to call the whole thing "AVR" not "SFRS" but the compiler alredy defines "AVR"
hdr.write("} SFRS_t;\n\n#define USE_SFRS() volatile SFRS_t * const pSFR = (SFRS_t *)" + sfr_start + "\n\n")
# now to make some easier to type/read symbols that actually hide some of that implementation
for entry in mainlist:
name = entry['name']
hdr.write("/* ================= (" + name + ") " + entry['caption'] + " ================ */\n")
hdr.write("#define " + name.lower() + " pSFR->_" + name + ".reg\n")
if len(entry['bits']) == 0:
for n in range(0, bin(entry['main_mask']).count('1')):
hdr.write("#define " + name.lower() + "_b" + str(n) + " pSFR->_" + name + ".bit.b" + str(n) + "\n")
# assume it's uint16_t so there are two "halves" too:
if int(entry['size']) > 1:
hdr.write("#define " + name.lower() + "l pSFR->_" + name + ".halves.low\n")
hdr.write("#define " + name.lower() + "h pSFR->_" + name + ".halves.high\n")
else:
for bit in entry['bits']:
bitname = bit.name.lower()
hdr.write("#define " + name.lower() + "_" + bitname + " pSFR->_" + name + ".bit._" + bitname.upper() + "\n")
for bit in entry['bits']:
bitname = bit.name.lower()
suffix = ""
if name == "SREG":
suffix = "_flag"
hdr.write("#define " + bitname + suffix + " (1 << " + str(bit.bitpos) + ")\n")
for bit in entry['bits']:
bitname = bit.name.lower()
hdr.write("#define " + bitname + "_bp " + str(bit.bitpos) + "\n")
hdr.write("\n")
hdr.close()
else:
print("No valid input file")
| mit | -7,481,435,471,951,080,000 | 61.111111 | 201 | 0.443971 | false |
sorrison/django-pbs | django_pbs/jobs/views.py | 1 | 1355 | # Copyright 2008 VPAC
#
# This file is part of django-pbs.
#
# django-pbs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# django-pbs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with django-pbs If not, see <http://www.gnu.org/licenses/>.
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.http import HttpResponse, Http404
from django_pbs.jobs.models import Job
from django_pbs.servers.models import Server
from django_pbs import serializers
def job_detail(request, job_id, server_id=None, xml=False):
id, server = job_id.split('.', 1)
try:
job = Job(Server(server), id)
except:
raise Http404
if xml:
return HttpResponse(serializers.serialize('xml', [job], indent=True), mimetype='text/xml')
return render_to_response('pbs_jobs/job_detail.html', locals(), context_instance=RequestContext(request))
| gpl-3.0 | -4,826,663,300,770,279,000 | 34.657895 | 109 | 0.739483 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.