repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
---|---|---|---|---|
ridfrustum/lettuce | refs/heads/master | tests/integration/lib/Django-1.3/django/contrib/webdesign/lorem_ipsum.py | 439 | """
Utility functions for generating "lorem ipsum" Latin text.
"""
import random
COMMON_P = 'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.'
WORDS = ('exercitationem', 'perferendis', 'perspiciatis', 'laborum', 'eveniet',
'sunt', 'iure', 'nam', 'nobis', 'eum', 'cum', 'officiis', 'excepturi',
'odio', 'consectetur', 'quasi', 'aut', 'quisquam', 'vel', 'eligendi',
'itaque', 'non', 'odit', 'tempore', 'quaerat', 'dignissimos',
'facilis', 'neque', 'nihil', 'expedita', 'vitae', 'vero', 'ipsum',
'nisi', 'animi', 'cumque', 'pariatur', 'velit', 'modi', 'natus',
'iusto', 'eaque', 'sequi', 'illo', 'sed', 'ex', 'et', 'voluptatibus',
'tempora', 'veritatis', 'ratione', 'assumenda', 'incidunt', 'nostrum',
'placeat', 'aliquid', 'fuga', 'provident', 'praesentium', 'rem',
'necessitatibus', 'suscipit', 'adipisci', 'quidem', 'possimus',
'voluptas', 'debitis', 'sint', 'accusantium', 'unde', 'sapiente',
'voluptate', 'qui', 'aspernatur', 'laudantium', 'soluta', 'amet',
'quo', 'aliquam', 'saepe', 'culpa', 'libero', 'ipsa', 'dicta',
'reiciendis', 'nesciunt', 'doloribus', 'autem', 'impedit', 'minima',
'maiores', 'repudiandae', 'ipsam', 'obcaecati', 'ullam', 'enim',
'totam', 'delectus', 'ducimus', 'quis', 'voluptates', 'dolores',
'molestiae', 'harum', 'dolorem', 'quia', 'voluptatem', 'molestias',
'magni', 'distinctio', 'omnis', 'illum', 'dolorum', 'voluptatum', 'ea',
'quas', 'quam', 'corporis', 'quae', 'blanditiis', 'atque', 'deserunt',
'laboriosam', 'earum', 'consequuntur', 'hic', 'cupiditate',
'quibusdam', 'accusamus', 'ut', 'rerum', 'error', 'minus', 'eius',
'ab', 'ad', 'nemo', 'fugit', 'officia', 'at', 'in', 'id', 'quos',
'reprehenderit', 'numquam', 'iste', 'fugiat', 'sit', 'inventore',
'beatae', 'repellendus', 'magnam', 'recusandae', 'quod', 'explicabo',
'doloremque', 'aperiam', 'consequatur', 'asperiores', 'commodi',
'optio', 'dolor', 'labore', 'temporibus', 'repellat', 'veniam',
'architecto', 'est', 'esse', 'mollitia', 'nulla', 'a', 'similique',
'eos', 'alias', 'dolore', 'tenetur', 'deleniti', 'porro', 'facere',
'maxime', 'corrupti')
COMMON_WORDS = ('lorem', 'ipsum', 'dolor', 'sit', 'amet', 'consectetur',
'adipisicing', 'elit', 'sed', 'do', 'eiusmod', 'tempor', 'incididunt',
'ut', 'labore', 'et', 'dolore', 'magna', 'aliqua')
def sentence():
"""
Returns a randomly generated sentence of lorem ipsum text.
The first word is capitalized, and the sentence ends in either a period or
question mark. Commas are added at random.
"""
# Determine the number of comma-separated sections and number of words in
# each section for this sentence.
sections = [u' '.join(random.sample(WORDS, random.randint(3, 12))) for i in range(random.randint(1, 5))]
s = u', '.join(sections)
# Convert to sentence case and add end punctuation.
return u'%s%s%s' % (s[0].upper(), s[1:], random.choice('?.'))
def paragraph():
"""
Returns a randomly generated paragraph of lorem ipsum text.
The paragraph consists of between 1 and 4 sentences, inclusive.
"""
return u' '.join([sentence() for i in range(random.randint(1, 4))])
def paragraphs(count, common=True):
"""
Returns a list of paragraphs as returned by paragraph().
If `common` is True, then the first paragraph will be the standard
'lorem ipsum' paragraph. Otherwise, the first paragraph will be random
Latin text. Either way, subsequent paragraphs will be random Latin text.
"""
paras = []
for i in range(count):
if common and i == 0:
paras.append(COMMON_P)
else:
paras.append(paragraph())
return paras
def words(count, common=True):
"""
Returns a string of `count` lorem ipsum words separated by a single space.
If `common` is True, then the first 19 words will be the standard
'lorem ipsum' words. Otherwise, all words will be selected randomly.
"""
if common:
word_list = list(COMMON_WORDS)
else:
word_list = []
c = len(word_list)
if count > c:
count -= c
while count > 0:
c = min(count, len(WORDS))
count -= c
word_list += random.sample(WORDS, c)
else:
word_list = word_list[:count]
return u' '.join(word_list)
|
sgiavasis/nipype | refs/heads/master | nipype/interfaces/minc/tests/test_auto_Beast.py | 7 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ....testing import assert_equal
from ..minc import Beast
def test_Beast_inputs():
input_map = dict(abspath=dict(argstr='-abspath',
usedefault=True,
),
args=dict(argstr='%s',
),
clobber=dict(argstr='-clobber',
usedefault=True,
),
confidence_level_alpha=dict(argstr='-alpha %s',
),
configuration_file=dict(argstr='-configuration %s',
),
environ=dict(nohash=True,
usedefault=True,
),
fill_holes=dict(argstr='-fill',
),
flip_images=dict(argstr='-flip',
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
input_file=dict(argstr='%s',
mandatory=True,
position=-2,
),
library_dir=dict(argstr='%s',
mandatory=True,
position=-3,
),
load_moments=dict(argstr='-load_moments',
),
median_filter=dict(argstr='-median',
),
nlm_filter=dict(argstr='-nlm_filter',
),
number_selected_images=dict(argstr='-selection_num %s',
),
output_file=dict(argstr='%s',
hash_files=False,
name_source=['input_file'],
name_template='%s_beast_mask.mnc',
position=-1,
),
patch_size=dict(argstr='-patch_size %s',
),
probability_map=dict(argstr='-probability',
),
same_resolution=dict(argstr='-same_resolution',
),
search_area=dict(argstr='-search_area %s',
),
smoothness_factor_beta=dict(argstr='-beta %s',
),
terminal_output=dict(nohash=True,
),
threshold_patch_selection=dict(argstr='-threshold %s',
),
voxel_size=dict(argstr='-voxel_size %s',
),
)
inputs = Beast.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_Beast_outputs():
output_map = dict(output_file=dict(),
)
outputs = Beast.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(outputs.traits()[key], metakey), value
|
juhnowski/FishingRod | refs/heads/master | production/pygsl-0.9.5/examples/integrate.py | 1 | #!/usr/bin/env python
# Author : Pierre Schnizer
"""
Integration of sin(x)/x from -infinity to + infinity.
Here the combined usage if qawf and qagp is shown. Qagp is used to integrate
the "numerical singularity" (please could someone change it to the approbriate
expression?) x = 0. qawf is used to integrate towards infinity.
"""
from pygsl import integrate
from pygsl import _numobj as numx
def run():
def f2(x,y):
return numx.sin(x) / x
sys2 = integrate.gsl_function(f2, None)
def f1(x,y):
return 1 / x
sys1 = integrate.gsl_function(f1, None)
def f3(x,y):
return 1 / -x
sys3 = integrate.gsl_function(f3, None)
w = integrate.workspace(1000000)
cyclew = integrate.workspace(1000000)
table1 = integrate.qawo_table(1, 100, integrate.SINE, 100)
table2 = integrate.qawo_table(-1, 100, integrate.SINE, 100)
# Borders and singualrity for gagp
pts = numx.array((-numx.pi, 0, numx.pi))
flag, result1, error = integrate.qagp(sys2, pts, 1e-8, 1e-8, 100000, w)
flag, result2, error = integrate.qawf(sys1, numx.pi, 1e-8, 100, w,
cyclew, table1)
flag, result3, error = integrate.qawf(sys3, numx.pi, 1e-8, 100, w,
cyclew, table2)
print "Result of integration is :", result1 + result2 + result3
if __name__ == '__main__':
run()
|
Oire/twython | refs/heads/master | examples/get_user_timeline.py | 9 | from twython import Twython, TwythonError
# Requires Authentication as of Twitter API v1.1
twitter = Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
try:
user_timeline = twitter.get_user_timeline(screen_name='ryanmcgrath')
except TwythonError as e:
print e
print user_timeline
|
thomaspatzke/EQUEL | refs/heads/master | equel/plugins/search.py | 1 | # Search Plugins
from .generic import BasePlugin, GenericPlugin, BaseShortcutPlugin, EQUELPluginException
class BaseSearchPlugin(GenericPlugin):
"""Search specific plugin attributes"""
# Allow return value of plugin to be used as search filter, e.g. in further search subexpressions
filterable = False
class GenericSearchPlugin(BaseSearchPlugin):
"""Convert EQUEL into JSON and wrap into query attribute"""
name = "Generic Search Plugin"
description = "Generic EQUEL to JSON conversion with wrapping into query attribute"
filterable = True
def apply(self, verb, params, parser, ctx):
return super().apply(verb, params, parser, ctx)
class ESQueryStringPlugin(BaseSearchPlugin):
"""Convert Elasticsearch query string into Query DSL structure"""
name = "Elasticsearch Query String Plugin"
description = "Convert Elasticsearch query string into Query DSL structure"
def apply(self, verb, query, parser, ctx):
return { "query": { "query_string": { "query": query } } }
class SearchShortcutPlugin(BaseShortcutPlugin):
"""
Converts given value into a query_string query. Prefixes:
':': Use ES default as default operator (currently OR)
'&': AND as default operator
"""
name = "Search shortcut plugin"
description = "Convert value into query_string query"
filterable = True
def apply(self, prefix, value, parser, ctx):
res = { "query_string": { "query": value } }
if prefix == "&":
res["query_string"]["default_operator"] = "AND"
return res
class SortPlugin(BaseSearchPlugin):
"""
Sort entries by addition of a 'sort' query option. Parameter s contains one or multiple field names.
If suffixed with + or - the sort order is added explicitely (asc/desc).
"""
name = "Search result sort plugin"
description = "Sort search results"
def __init__(self):
super().__init__()
self.sortfields = list()
def appendField(self, field):
if field.endswith("+"):
self.sortfields.append({ field[:-1] : { "order": "asc" } })
if field.endswith("-"):
self.sortfields.append({ field[:-1] : { "order": "desc" } })
else:
self.sortfields.append(field)
def apply(self, verb, params, parser, ctx):
try:
fields = params["unnamed_list"]
except KeyError:
raise EQUELPluginException("Expression 'sort' requires list of fields")
if len(fields) == 0:
raise EQUELPluginException("List of fields of sort expression must not be empty")
elif type(fields[0]) == list:
raise EQUELPluginException("Only one list of fields in sort expression is allowed")
for field in fields:
self.appendField(field)
return { "sort": self.sortfields }
class FieldFilterPlugin(BaseSearchPlugin):
"""
Filter fields from search result. Parameters:
[field,...]: include these fields
exclude=[field,...]: exclude these fields
"""
name = "Filter fields from search result plugin"
description = "Filters fields from search result documents"
def apply(self, verb, params, parser, ctx):
try:
include = params["unnamed_list"]
except KeyError:
include = None
try:
exclude = params["exclude"]
except KeyError:
exclude = None
if include and len(include) > 0 and type(include[0]) == list or exclude and len(exclude) > 0 and type(exclude[0]) == list:
raise EQUELPluginException("Only one list of fields in fields expression is allowed")
if not include and not exclude:
return {}
filters = dict()
if include:
filters["includes"] = include
if exclude:
filters["excludes"] = exclude
return { "_source": filters }
class NestQueryPlugin(BaseSearchPlugin):
"""Wrap current query into nested query"""
name = "Nest current query"
description = "Wraps current query into nested query"
# TODO: make it filterable - last created ES DSL expression has to be stored somewhere
def apply(self, verb, params, parser, ctx):
if 'path' not in params:
raise EQUELPluginException("Search subquery 'nest' requires path parameter")
query = parser.query['query']
query = { 'nested': { 'path': params['path'], 'query': query } }
parser.query['query'] = query
return {}
class ScriptQueryPlugin(BaseSearchPlugin):
"""Perform a script query"""
name = "Script query"
description = "Perform a script query (default: painless)"
filterable = True
def apply(self, verb, params, parser, ctx):
if 'unnamed' not in params:
raise EQUELPluginException("Search subquery 'script' requires a script as unnamed parameter")
script = params['unnamed']
try:
lang = params['lang']
except KeyError:
lang = "painless"
return { "script": { "script": { "lang": lang, "inline": script } } }
class ScriptFieldPlugin(BaseSearchPlugin):
"""Add fields that are calculated by scripts"""
name = "Script field"
description = "Add fields that are calculated by script code (default: painless)"
def apply(self, verb, params, parser, ctx):
try:
lang = params['_lang']
except KeyError:
lang = "painless"
res = { "script_fields": dict() }
for param in params:
if param.key == '_lang':
continue
res['script_fields'][param.key] = { "script": { "lang": lang, "inline": param.value } }
# normally, script_fields lets _source and friends disappear. Bring it back if not required explicitely by query and no fields are defined
if 'onlyscriptfields' not in params and 'fields' not in parser.query:
parser.query['stored_fields'] = [ '_source' ]
return res
class TimeRangePlugin(BaseSearchPlugin):
"""
Restricts search to given time range by adding an according filter to the query.
Parameters:
* from: start time
* to: end time (default: now)
* field: field used for time range filtering (default: @timestamp)
Times are expected as:
* Absolute: formats supported in Arrow default configuration.
* Relative: EQUEL relative time references:
* -: relative start to given end time
* +: relative end to given start time
* ~ in from time: set start/end time around given end time
Supported units are: s(econds), min(utes), h(ours), d(ays), w(eeks), m(onths), y(ears)
"""
name = "Time Range"
description = "Restrict query to time range"
def apply(self, verb, params, parser, ctx):
from equel.engine import EQUELTimeRange
if 'from' not in params:
raise EQUELPluginException("Time range filter at least requires a start time in the from parameter")
start = params['from']
try:
end = params['to']
except KeyError:
end = None
try:
field = params['field']
except KeyError:
field = "@timestamp"
tr = EQUELTimeRange(start, end, field=field)
parser.query = tr.wrapQuery(parser.query)
return {}
|
zitouni/gnuradio-3.6.1 | refs/heads/master | gnuradio-core/src/python/gnuradio/gr/qa_stream_mux.py | 18 | #!/usr/bin/env python
#
# Copyright 2004,2005,2007,2010 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
class test_stream_mux (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def help_stream_2ff(self, N, stream_sizes):
v0 = gr.vector_source_f(N*[1,], False)
v1 = gr.vector_source_f(N*[2,], False)
mux = gr.stream_mux(gr.sizeof_float, stream_sizes)
dst = gr.vector_sink_f ()
self.tb.connect (v0, (mux,0))
self.tb.connect (v1, (mux,1))
self.tb.connect (mux, dst)
self.tb.run ()
return dst.data ()
def help_stream_ramp_2ff(self, N, stream_sizes):
r1 = range(N)
r2 = range(N)
r2.reverse()
v0 = gr.vector_source_f(r1, False)
v1 = gr.vector_source_f(r2, False)
mux = gr.stream_mux(gr.sizeof_float, stream_sizes)
dst = gr.vector_sink_f ()
self.tb.connect (v0, (mux,0))
self.tb.connect (v1, (mux,1))
self.tb.connect (mux, dst)
self.tb.run ()
return dst.data ()
def test_stream_2NN_ff(self):
N = 40
stream_sizes = [10, 10]
result_data = self.help_stream_2ff(N, stream_sizes)
exp_data = (1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0)
self.assertEqual (exp_data, result_data)
def test_stream_ramp_2NN_ff(self):
N = 40
stream_sizes = [10, 10]
result_data = self.help_stream_ramp_2ff(N, stream_sizes)
exp_data = ( 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0,
39.0, 38.0, 37.0, 36.0, 35.0, 34.0, 33.0, 32.0, 31.0, 30.0,
10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0,
29.0, 28.0, 27.0, 26.0, 25.0, 24.0, 23.0, 22.0, 21.0, 20.0,
20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0,
19.0, 18.0, 17.0, 16.0, 15.0, 14.0, 13.0, 12.0, 11.0, 10.0,
30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0,
9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, 0.0)
self.assertEqual (exp_data, result_data)
def test_stream_2NM_ff(self):
N = 40
stream_sizes = [7, 9]
self.help_stream_2ff(N, stream_sizes)
result_data = self.help_stream_2ff(N, stream_sizes)
exp_data = (1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2.0, 2.0, 2.0, 2.0)
self.assertEqual (exp_data, result_data)
def test_stream_2MN_ff(self):
N = 37
stream_sizes = [7, 9]
self.help_stream_2ff(N, stream_sizes)
result_data = self.help_stream_2ff(N, stream_sizes)
exp_data = (1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2.0)
self.assertEqual (exp_data, result_data)
def test_stream_2N0_ff(self):
N = 30
stream_sizes = [7, 0]
self.help_stream_2ff(N, stream_sizes)
result_data = self.help_stream_2ff(N, stream_sizes)
exp_data = (1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0)
self.assertEqual (exp_data, result_data)
def test_stream_20N_ff(self):
N = 30
stream_sizes = [0, 9]
self.help_stream_2ff(N, stream_sizes)
result_data = self.help_stream_2ff(N, stream_sizes)
exp_data = (2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0,
2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0,
2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0,
2.0, 2.0, 2.0)
self.assertEqual (exp_data, result_data)
if __name__ == '__main__':
gr_unittest.run(test_stream_mux, "test_stream_mux.xml")
|
sport-monkey/GYP | refs/heads/master | test/standalone/gyptest-standalone.py | 314 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that a project hierarchy created with the --generator-output=
option can be built even when it's relocated to a different path.
"""
import TestGyp
import os
test = TestGyp.TestGyp()
test.run_gyp('standalone.gyp', '-Gstandalone')
# Look at all the files in the tree to make sure none
# of them reference the gyp file.
for root, dirs, files in os.walk("."):
for file in files:
# ignore ourself
if os.path.splitext(__file__)[0] in file:
continue
file = os.path.join(root, file)
contents = open(file).read()
if 'standalone.gyp' in contents:
print 'gyp file referenced in generated output: %s' % file
test.fail_test()
test.pass_test()
|
cngo-github/nupic | refs/heads/master | examples/opf/experiments/classification/category_SP_1/description.py | 32 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
import os
from nupic.frameworks.opf.expdescriptionhelpers import importBaseDescription
# the sub-experiment configuration
config = \
{
'dataSource': 'file://' + os.path.join(os.path.dirname(__file__),
'../datasets/category_SP_1.csv'),
'modelParams': { 'clParams': { 'clVerbosity': 0},
'inferenceType': 'NontemporalClassification',
'sensorParams': { 'encoders': { }, 'verbosity': 0},
'spParams': { },
'tpEnable': False,
'tpParams': { }}}
mod = importBaseDescription('../base_category/description.py', config)
locals().update(mod.__dict__)
|
bollig/pscf | refs/heads/master | tools/python/GDoc/file_util.py | 4 | #****M root/file_util ---------------------------------------------
# MODULE
# file_util
# PURPOSE
# Utility functions for manipulating files and paths
#*** --------------------------------------------------------------
import os
from os.path import *
#****f file_util/relative_path -----------------------------------
# FUNCTION
# relative_path(path1,path2)
# PURPOSE
# Generates relative path of path2 relative to path1.
# ARGUMENTS
# path1 and path2 are paths relative to a common directory
# RETURN
# Path for file2 relative to directory containing path1
#*** ---------------------------------------------------------------
def relative_path(path1,path2):
root = commonprefix([path1,path2])
root = dirname(root)
if (root == '/'):
raise 'Error in relative_path - common directory cannot be / '
if root :
path2 = path2[len(root)+1:]
path1 = dirname(path1)
while not ( root == path1 ):
path2 = '../' + path2
path1 = dirname(path1)
return path2
#****f file_util/chdirs -------------------------------------------
# FUNCTION
# chdirs(dir)
# PURPOSE
# Change current working directory to dir (like os.chdir), and
# create dir and any necessary parents if it does not yet exist.
#*** --------------------------------------------------------------
def chdirs(dir):
if not exists(dir):
print 'Creating directory ' + dir
os.makedirs(dir)
os.chdir(dir)
#****f file_util/open_w -------------------------------------------
# FUNCTION
# open_w(path)
# PURPOSE
# Open file with specified path for writing, return file object.
# Similar to built-in function open(path,'w'), except that open_w
# will create any non-existent directories in the path.
# RETURN
# file object with specified path, opened for writing
#*** --------------------------------------------------------------
def open_w(path):
dir = dirname(path)
if dir:
if not exists(dir):
print 'Creating directory ' + dir
os.makedirs(dir)
return open(path,'w')
#****f file_util/rename -------------------------------------------
# FUNCTION
# rename(old_path,new_path)
# ARGUMENTS
# old_path - path of file or directory to be
# PURPOSE
# Rename a file or directory. Similar to os.rename,but will
# create any non-existent directories in the path
# RETURN
# file object with specified path, opened for writing
#*** --------------------------------------------------------------
def rename(old_path,new_path):
if (not isfile(old_path)) and (not isdir(old_path) ):
print 'Path ' + old_path + ' is not a file or directory'
return
new_dir = dirname(new_path)
if new_dir:
if not exists(new_dir):
print 'Creating directory ' + new_dir
os.makedirs(new_dir)
return os.rename(old_path,new_path)
|
gorczynski/dotfiles | refs/heads/master | vim/bundle/powerline/tools/purge-PRs.py | 27 | #!/usr/bin/env python
# vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import argparse
from getpass import getpass
from github import Github
p = argparse.ArgumentParser(description='Powerline release script')
p.add_argument('-u', '--user', type=str, metavar='USER', help='Github username.', required=True)
p.add_argument('-p', '--password', type=str, metavar='PASS', help='Github password. You will be prompted if it is not supplied.')
if __name__ == '__main__':
args = p.parse_args()
user = args.user
password = args.password or getpass('Password for {0}: '.format(user))
gh = Github(user, password)
grepo = gh.get_repo('powerline/powerline')
for pr in grepo.get_pulls():
if pr.base.ref != 'develop':
issue = grepo.get_issue(pr.number)
issue.create_comment('PRs to any branch, but develop, are not accepted.', )
issue.add_to_labels('s:invalid')
issue.edit(state='closed')
|
rjw57/rbc | refs/heads/master | test/test_switch.py | 1 | def test_basic_switch(check_output):
check_output('''
main() {
extrn putchar;;
auto i;
i = 0; while(i <= 4) {
describe(i);
putchar('*n');
++i;
}
}
describe(val) {
extrn putstr, putnumb;
putnumb(val); putstr(" is ");
switch(val) {
case 0: putstr("zero"); break;
case 1: putstr("one"); break;
default: putstr("many"); break;
}
}
''', '0 is zero\n1 is one\n2 is many\n3 is many\n4 is many\n')
def test_fallthrough(check_output):
check_output('''
main() {
extrn putchar;;
auto i;
i = 0; while(i <= 4) {
describe(i);
putchar('*n');
++i;
}
}
describe(val) {
extrn putstr, putnumb;
putnumb(val); putstr(" is ");
switch(val) {
case 0: putstr("zero");
case 1: putstr("one"); break;
default: putstr("many");
}
}
''', '0 is zeroone\n1 is one\n2 is many\n3 is many\n4 is many\n')
|
paolodedios/tensorflow | refs/heads/master | tensorflow/python/keras/layers/pooling.py | 5 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Pooling layers."""
import functools
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import backend
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.engine.input_spec import InputSpec
from tensorflow.python.keras.utils import conv_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.util.tf_export import keras_export
class Pooling1D(Layer):
"""Pooling layer for arbitrary pooling functions, for 1D inputs.
This class only exists for code reuse. It will never be an exposed API.
Args:
pool_function: The pooling function to apply, e.g. `tf.nn.max_pool2d`.
pool_size: An integer or tuple/list of a single integer,
representing the size of the pooling window.
strides: An integer or tuple/list of a single integer, specifying the
strides of the pooling operation.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, steps, features)` while `channels_first`
corresponds to inputs with shape
`(batch, features, steps)`.
name: A string, the name of the layer.
"""
def __init__(self, pool_function, pool_size, strides,
padding='valid', data_format='channels_last',
name=None, **kwargs):
super(Pooling1D, self).__init__(name=name, **kwargs)
if data_format is None:
data_format = backend.image_data_format()
if strides is None:
strides = pool_size
self.pool_function = pool_function
self.pool_size = conv_utils.normalize_tuple(pool_size, 1, 'pool_size')
self.strides = conv_utils.normalize_tuple(strides, 1, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.input_spec = InputSpec(ndim=3)
def call(self, inputs):
pad_axis = 2 if self.data_format == 'channels_last' else 3
inputs = array_ops.expand_dims(inputs, pad_axis)
outputs = self.pool_function(
inputs,
self.pool_size + (1,),
strides=self.strides + (1,),
padding=self.padding,
data_format=self.data_format)
return array_ops.squeeze(outputs, pad_axis)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
steps = input_shape[2]
features = input_shape[1]
else:
steps = input_shape[1]
features = input_shape[2]
length = conv_utils.conv_output_length(steps,
self.pool_size[0],
self.padding,
self.strides[0])
if self.data_format == 'channels_first':
return tensor_shape.TensorShape([input_shape[0], features, length])
else:
return tensor_shape.TensorShape([input_shape[0], length, features])
def get_config(self):
config = {
'strides': self.strides,
'pool_size': self.pool_size,
'padding': self.padding,
'data_format': self.data_format,
}
base_config = super(Pooling1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.MaxPool1D', 'keras.layers.MaxPooling1D')
class MaxPooling1D(Pooling1D):
"""Max pooling operation for 1D temporal data.
Downsamples the input representation by taking the maximum value over a
spatial window of size `pool_size`. The window is shifted by `strides`. The
resulting output, when using the `"valid"` padding option, has a shape of:
`output_shape = (input_shape - pool_size + 1) / strides)`
The resulting output shape when using the `"same"` padding option is:
`output_shape = input_shape / strides`
For example, for `strides=1` and `padding="valid"`:
>>> x = tf.constant([1., 2., 3., 4., 5.])
>>> x = tf.reshape(x, [1, 5, 1])
>>> max_pool_1d = tf.keras.layers.MaxPooling1D(pool_size=2,
... strides=1, padding='valid')
>>> max_pool_1d(x)
<tf.Tensor: shape=(1, 4, 1), dtype=float32, numpy=
array([[[2.],
[3.],
[4.],
[5.]]], dtype=float32)>
For example, for `strides=2` and `padding="valid"`:
>>> x = tf.constant([1., 2., 3., 4., 5.])
>>> x = tf.reshape(x, [1, 5, 1])
>>> max_pool_1d = tf.keras.layers.MaxPooling1D(pool_size=2,
... strides=2, padding='valid')
>>> max_pool_1d(x)
<tf.Tensor: shape=(1, 2, 1), dtype=float32, numpy=
array([[[2.],
[4.]]], dtype=float32)>
For example, for `strides=1` and `padding="same"`:
>>> x = tf.constant([1., 2., 3., 4., 5.])
>>> x = tf.reshape(x, [1, 5, 1])
>>> max_pool_1d = tf.keras.layers.MaxPooling1D(pool_size=2,
... strides=1, padding='same')
>>> max_pool_1d(x)
<tf.Tensor: shape=(1, 5, 1), dtype=float32, numpy=
array([[[2.],
[3.],
[4.],
[5.],
[5.]]], dtype=float32)>
Args:
pool_size: Integer, size of the max pooling window.
strides: Integer, or None. Specifies how much the pooling window moves
for each pooling step.
If None, it will default to `pool_size`.
padding: One of `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, steps, features)` while `channels_first`
corresponds to inputs with shape
`(batch, features, steps)`.
Input shape:
- If `data_format='channels_last'`:
3D tensor with shape `(batch_size, steps, features)`.
- If `data_format='channels_first'`:
3D tensor with shape `(batch_size, features, steps)`.
Output shape:
- If `data_format='channels_last'`:
3D tensor with shape `(batch_size, downsampled_steps, features)`.
- If `data_format='channels_first'`:
3D tensor with shape `(batch_size, features, downsampled_steps)`.
"""
def __init__(self, pool_size=2, strides=None,
padding='valid', data_format='channels_last', **kwargs):
super(MaxPooling1D, self).__init__(
functools.partial(backend.pool2d, pool_mode='max'),
pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
**kwargs)
@keras_export('keras.layers.AveragePooling1D', 'keras.layers.AvgPool1D')
class AveragePooling1D(Pooling1D):
"""Average pooling for temporal data.
Downsamples the input representation by taking the average value over the
window defined by `pool_size`. The window is shifted by `strides`. The
resulting output when using "valid" padding option has a shape of:
`output_shape = (input_shape - pool_size + 1) / strides)`
The resulting output shape when using the "same" padding option is:
`output_shape = input_shape / strides`
For example, for strides=1 and padding="valid":
>>> x = tf.constant([1., 2., 3., 4., 5.])
>>> x = tf.reshape(x, [1, 5, 1])
>>> x
<tf.Tensor: shape=(1, 5, 1), dtype=float32, numpy=
array([[[1.],
[2.],
[3.],
[4.],
[5.]], dtype=float32)>
>>> avg_pool_1d = tf.keras.layers.AveragePooling1D(pool_size=2,
... strides=1, padding='valid')
>>> avg_pool_1d(x)
<tf.Tensor: shape=(1, 4, 1), dtype=float32, numpy=
array([[[1.5],
[2.5],
[3.5],
[4.5]]], dtype=float32)>
For example, for strides=2 and padding="valid":
>>> x = tf.constant([1., 2., 3., 4., 5.])
>>> x = tf.reshape(x, [1, 5, 1])
>>> x
<tf.Tensor: shape=(1, 5, 1), dtype=float32, numpy=
array([[[1.],
[2.],
[3.],
[4.],
[5.]], dtype=float32)>
>>> avg_pool_1d = tf.keras.layers.AveragePooling1D(pool_size=2,
... strides=2, padding='valid')
>>> avg_pool_1d(x)
<tf.Tensor: shape=(1, 2, 1), dtype=float32, numpy=
array([[[1.5],
[3.5]]], dtype=float32)>
For example, for strides=1 and padding="same":
>>> x = tf.constant([1., 2., 3., 4., 5.])
>>> x = tf.reshape(x, [1, 5, 1])
>>> x
<tf.Tensor: shape=(1, 5, 1), dtype=float32, numpy=
array([[[1.],
[2.],
[3.],
[4.],
[5.]], dtype=float32)>
>>> avg_pool_1d = tf.keras.layers.AveragePooling1D(pool_size=2,
... strides=1, padding='same')
>>> avg_pool_1d(x)
<tf.Tensor: shape=(1, 5, 1), dtype=float32, numpy=
array([[[1.5],
[2.5],
[3.5],
[4.5],
[5.]]], dtype=float32)>
Args:
pool_size: Integer, size of the average pooling windows.
strides: Integer, or None. Factor by which to downscale.
E.g. 2 will halve the input.
If None, it will default to `pool_size`.
padding: One of `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, steps, features)` while `channels_first`
corresponds to inputs with shape
`(batch, features, steps)`.
Input shape:
- If `data_format='channels_last'`:
3D tensor with shape `(batch_size, steps, features)`.
- If `data_format='channels_first'`:
3D tensor with shape `(batch_size, features, steps)`.
Output shape:
- If `data_format='channels_last'`:
3D tensor with shape `(batch_size, downsampled_steps, features)`.
- If `data_format='channels_first'`:
3D tensor with shape `(batch_size, features, downsampled_steps)`.
"""
def __init__(self, pool_size=2, strides=None,
padding='valid', data_format='channels_last', **kwargs):
super(AveragePooling1D, self).__init__(
functools.partial(backend.pool2d, pool_mode='avg'),
pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
**kwargs)
class Pooling2D(Layer):
"""Pooling layer for arbitrary pooling functions, for 2D inputs (e.g. images).
This class only exists for code reuse. It will never be an exposed API.
Args:
pool_function: The pooling function to apply, e.g. `tf.nn.max_pool2d`.
pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width)
specifying the size of the pooling window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the pooling operation.
Can be a single integer to specify the same value for
all spatial dimensions.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, height, width)`.
name: A string, the name of the layer.
"""
def __init__(self, pool_function, pool_size, strides,
padding='valid', data_format=None,
name=None, **kwargs):
super(Pooling2D, self).__init__(name=name, **kwargs)
if data_format is None:
data_format = backend.image_data_format()
if strides is None:
strides = pool_size
self.pool_function = pool_function
self.pool_size = conv_utils.normalize_tuple(pool_size, 2, 'pool_size')
self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.input_spec = InputSpec(ndim=4)
def call(self, inputs):
if self.data_format == 'channels_last':
pool_shape = (1,) + self.pool_size + (1,)
strides = (1,) + self.strides + (1,)
else:
pool_shape = (1, 1) + self.pool_size
strides = (1, 1) + self.strides
outputs = self.pool_function(
inputs,
ksize=pool_shape,
strides=strides,
padding=self.padding.upper(),
data_format=conv_utils.convert_data_format(self.data_format, 4))
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
rows = input_shape[2]
cols = input_shape[3]
else:
rows = input_shape[1]
cols = input_shape[2]
rows = conv_utils.conv_output_length(rows, self.pool_size[0], self.padding,
self.strides[0])
cols = conv_utils.conv_output_length(cols, self.pool_size[1], self.padding,
self.strides[1])
if self.data_format == 'channels_first':
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], rows, cols])
else:
return tensor_shape.TensorShape(
[input_shape[0], rows, cols, input_shape[3]])
def get_config(self):
config = {
'pool_size': self.pool_size,
'padding': self.padding,
'strides': self.strides,
'data_format': self.data_format
}
base_config = super(Pooling2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.MaxPool2D', 'keras.layers.MaxPooling2D')
class MaxPooling2D(Pooling2D):
"""Max pooling operation for 2D spatial data.
Downsamples the input along its spatial dimensions (height and width)
by taking the maximum value over an input window
(of size defined by `pool_size`) for each channel of the input.
The window is shifted by `strides` along each dimension.
The resulting output,
when using the `"valid"` padding option, has a spatial shape
(number of rows or columns) of:
`output_shape = math.floor((input_shape - pool_size) / strides) + 1`
(when `input_shape >= pool_size`)
The resulting output shape when using the `"same"` padding option is:
`output_shape = math.floor((input_shape - 1) / strides) + 1`
For example, for `strides=(1, 1)` and `padding="valid"`:
>>> x = tf.constant([[1., 2., 3.],
... [4., 5., 6.],
... [7., 8., 9.]])
>>> x = tf.reshape(x, [1, 3, 3, 1])
>>> max_pool_2d = tf.keras.layers.MaxPooling2D(pool_size=(2, 2),
... strides=(1, 1), padding='valid')
>>> max_pool_2d(x)
<tf.Tensor: shape=(1, 2, 2, 1), dtype=float32, numpy=
array([[[[5.],
[6.]],
[[8.],
[9.]]]], dtype=float32)>
For example, for `strides=(2, 2)` and `padding="valid"`:
>>> x = tf.constant([[1., 2., 3., 4.],
... [5., 6., 7., 8.],
... [9., 10., 11., 12.]])
>>> x = tf.reshape(x, [1, 3, 4, 1])
>>> max_pool_2d = tf.keras.layers.MaxPooling2D(pool_size=(2, 2),
... strides=(2, 2), padding='valid')
>>> max_pool_2d(x)
<tf.Tensor: shape=(1, 1, 2, 1), dtype=float32, numpy=
array([[[[6.],
[8.]]]], dtype=float32)>
Usage Example:
>>> input_image = tf.constant([[[[1.], [1.], [2.], [4.]],
... [[2.], [2.], [3.], [2.]],
... [[4.], [1.], [1.], [1.]],
... [[2.], [2.], [1.], [4.]]]])
>>> output = tf.constant([[[[1], [0]],
... [[0], [1]]]])
>>> model = tf.keras.models.Sequential()
>>> model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2),
... input_shape=(4, 4, 1)))
>>> model.compile('adam', 'mean_squared_error')
>>> model.predict(input_image, steps=1)
array([[[[2.],
[4.]],
[[4.],
[4.]]]], dtype=float32)
For example, for stride=(1, 1) and padding="same":
>>> x = tf.constant([[1., 2., 3.],
... [4., 5., 6.],
... [7., 8., 9.]])
>>> x = tf.reshape(x, [1, 3, 3, 1])
>>> max_pool_2d = tf.keras.layers.MaxPooling2D(pool_size=(2, 2),
... strides=(1, 1), padding='same')
>>> max_pool_2d(x)
<tf.Tensor: shape=(1, 3, 3, 1), dtype=float32, numpy=
array([[[[5.],
[6.],
[6.]],
[[8.],
[9.],
[9.]],
[[8.],
[9.],
[9.]]]], dtype=float32)>
Args:
pool_size: integer or tuple of 2 integers,
window size over which to take the maximum.
`(2, 2)` will take the max value over a 2x2 pooling window.
If only one integer is specified, the same window length
will be used for both dimensions.
strides: Integer, tuple of 2 integers, or None.
Strides values. Specifies how far the pooling window moves
for each pooling step. If None, it will default to `pool_size`.
padding: One of `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
4D tensor with shape `(batch_size, rows, cols, channels)`.
- If `data_format='channels_first'`:
4D tensor with shape `(batch_size, channels, rows, cols)`.
Output shape:
- If `data_format='channels_last'`:
4D tensor with shape `(batch_size, pooled_rows, pooled_cols, channels)`.
- If `data_format='channels_first'`:
4D tensor with shape `(batch_size, channels, pooled_rows, pooled_cols)`.
Returns:
A tensor of rank 4 representing the maximum pooled values. See above for
output shape.
"""
def __init__(self,
pool_size=(2, 2),
strides=None,
padding='valid',
data_format=None,
**kwargs):
super(MaxPooling2D, self).__init__(
nn.max_pool,
pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format, **kwargs)
@keras_export('keras.layers.AveragePooling2D', 'keras.layers.AvgPool2D')
class AveragePooling2D(Pooling2D):
"""Average pooling operation for spatial data.
Downsamples the input along its spatial dimensions (height and width)
by taking the average value over an input window
(of size defined by `pool_size`) for each channel of the input.
The window is shifted by `strides` along each dimension.
The resulting output when using `"valid"` padding option has a shape
(number of rows or columns) of:
`output_shape = math.floor((input_shape - pool_size) / strides) + 1`
(when `input_shape >= pool_size`)
The resulting output shape when using the `"same"` padding option is:
`output_shape = math.floor((input_shape - 1) / strides) + 1`
For example, for `strides=(1, 1)` and `padding="valid"`:
>>> x = tf.constant([[1., 2., 3.],
... [4., 5., 6.],
... [7., 8., 9.]])
>>> x = tf.reshape(x, [1, 3, 3, 1])
>>> avg_pool_2d = tf.keras.layers.AveragePooling2D(pool_size=(2, 2),
... strides=(1, 1), padding='valid')
>>> avg_pool_2d(x)
<tf.Tensor: shape=(1, 2, 2, 1), dtype=float32, numpy=
array([[[[3.],
[4.]],
[[6.],
[7.]]]], dtype=float32)>
For example, for `stride=(2, 2)` and `padding="valid"`:
>>> x = tf.constant([[1., 2., 3., 4.],
... [5., 6., 7., 8.],
... [9., 10., 11., 12.]])
>>> x = tf.reshape(x, [1, 3, 4, 1])
>>> avg_pool_2d = tf.keras.layers.AveragePooling2D(pool_size=(2, 2),
... strides=(2, 2), padding='valid')
>>> avg_pool_2d(x)
<tf.Tensor: shape=(1, 1, 2, 1), dtype=float32, numpy=
array([[[[3.5],
[5.5]]]], dtype=float32)>
For example, for `strides=(1, 1)` and `padding="same"`:
>>> x = tf.constant([[1., 2., 3.],
... [4., 5., 6.],
... [7., 8., 9.]])
>>> x = tf.reshape(x, [1, 3, 3, 1])
>>> avg_pool_2d = tf.keras.layers.AveragePooling2D(pool_size=(2, 2),
... strides=(1, 1), padding='same')
>>> avg_pool_2d(x)
<tf.Tensor: shape=(1, 3, 3, 1), dtype=float32, numpy=
array([[[[3.],
[4.],
[4.5]],
[[6.],
[7.],
[7.5]],
[[7.5],
[8.5],
[9.]]]], dtype=float32)>
Args:
pool_size: integer or tuple of 2 integers,
factors by which to downscale (vertical, horizontal).
`(2, 2)` will halve the input in both spatial dimension.
If only one integer is specified, the same window length
will be used for both dimensions.
strides: Integer, tuple of 2 integers, or None.
Strides values.
If None, it will default to `pool_size`.
padding: One of `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
4D tensor with shape `(batch_size, rows, cols, channels)`.
- If `data_format='channels_first'`:
4D tensor with shape `(batch_size, channels, rows, cols)`.
Output shape:
- If `data_format='channels_last'`:
4D tensor with shape `(batch_size, pooled_rows, pooled_cols, channels)`.
- If `data_format='channels_first'`:
4D tensor with shape `(batch_size, channels, pooled_rows, pooled_cols)`.
"""
def __init__(self,
pool_size=(2, 2),
strides=None,
padding='valid',
data_format=None,
**kwargs):
super(AveragePooling2D, self).__init__(
nn.avg_pool,
pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format, **kwargs)
class Pooling3D(Layer):
"""Pooling layer for arbitrary pooling functions, for 3D inputs.
This class only exists for code reuse. It will never be an exposed API.
Args:
pool_function: The pooling function to apply, e.g. `tf.nn.max_pool2d`.
pool_size: An integer or tuple/list of 3 integers:
(pool_depth, pool_height, pool_width)
specifying the size of the pooling window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 3 integers,
specifying the strides of the pooling operation.
Can be a single integer to specify the same value for
all spatial dimensions.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, depth, height, width, channels)`
while `channels_first` corresponds to
inputs with shape `(batch, channels, depth, height, width)`.
name: A string, the name of the layer.
"""
def __init__(self, pool_function, pool_size, strides,
padding='valid', data_format='channels_last',
name=None, **kwargs):
super(Pooling3D, self).__init__(name=name, **kwargs)
if data_format is None:
data_format = backend.image_data_format()
if strides is None:
strides = pool_size
self.pool_function = pool_function
self.pool_size = conv_utils.normalize_tuple(pool_size, 3, 'pool_size')
self.strides = conv_utils.normalize_tuple(strides, 3, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.input_spec = InputSpec(ndim=5)
def call(self, inputs):
pool_shape = (1,) + self.pool_size + (1,)
strides = (1,) + self.strides + (1,)
if self.data_format == 'channels_first':
# TF does not support `channels_first` with 3D pooling operations,
# so we must handle this case manually.
# TODO(fchollet): remove this when TF pooling is feature-complete.
inputs = array_ops.transpose(inputs, (0, 2, 3, 4, 1))
outputs = self.pool_function(
inputs,
ksize=pool_shape,
strides=strides,
padding=self.padding.upper())
if self.data_format == 'channels_first':
outputs = array_ops.transpose(outputs, (0, 4, 1, 2, 3))
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
len_dim1 = input_shape[2]
len_dim2 = input_shape[3]
len_dim3 = input_shape[4]
else:
len_dim1 = input_shape[1]
len_dim2 = input_shape[2]
len_dim3 = input_shape[3]
len_dim1 = conv_utils.conv_output_length(len_dim1, self.pool_size[0],
self.padding, self.strides[0])
len_dim2 = conv_utils.conv_output_length(len_dim2, self.pool_size[1],
self.padding, self.strides[1])
len_dim3 = conv_utils.conv_output_length(len_dim3, self.pool_size[2],
self.padding, self.strides[2])
if self.data_format == 'channels_first':
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], len_dim1, len_dim2, len_dim3])
else:
return tensor_shape.TensorShape(
[input_shape[0], len_dim1, len_dim2, len_dim3, input_shape[4]])
def get_config(self):
config = {
'pool_size': self.pool_size,
'padding': self.padding,
'strides': self.strides,
'data_format': self.data_format
}
base_config = super(Pooling3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.MaxPool3D', 'keras.layers.MaxPooling3D')
class MaxPooling3D(Pooling3D):
"""Max pooling operation for 3D data (spatial or spatio-temporal).
Downsamples the input along its spatial dimensions (depth, height, and width)
by taking the maximum value over an input window
(of size defined by `pool_size`) for each channel of the input.
The window is shifted by `strides` along each dimension.
Args:
pool_size: Tuple of 3 integers,
factors by which to downscale (dim1, dim2, dim3).
`(2, 2, 2)` will halve the size of the 3D input in each dimension.
strides: tuple of 3 integers, or None. Strides values.
padding: One of `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
5D tensor with shape:
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
- If `data_format='channels_first'`:
5D tensor with shape:
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
Output shape:
- If `data_format='channels_last'`:
5D tensor with shape:
`(batch_size, pooled_dim1, pooled_dim2, pooled_dim3, channels)`
- If `data_format='channels_first'`:
5D tensor with shape:
`(batch_size, channels, pooled_dim1, pooled_dim2, pooled_dim3)`
Example:
```python
depth = 30
height = 30
width = 30
input_channels = 3
inputs = tf.keras.Input(shape=(depth, height, width, input_channels))
layer = tf.keras.layers.MaxPooling3D(pool_size=3)
outputs = layer(inputs) # Shape: (batch_size, 10, 10, 10, 3)
```
"""
def __init__(self,
pool_size=(2, 2, 2),
strides=None,
padding='valid',
data_format=None,
**kwargs):
super(MaxPooling3D, self).__init__(
nn.max_pool3d,
pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format, **kwargs)
@keras_export('keras.layers.AveragePooling3D', 'keras.layers.AvgPool3D')
class AveragePooling3D(Pooling3D):
"""Average pooling operation for 3D data (spatial or spatio-temporal).
Downsamples the input along its spatial dimensions (depth, height, and width)
by taking the average value over an input window
(of size defined by `pool_size`) for each channel of the input.
The window is shifted by `strides` along each dimension.
Args:
pool_size: tuple of 3 integers,
factors by which to downscale (dim1, dim2, dim3).
`(2, 2, 2)` will halve the size of the 3D input in each dimension.
strides: tuple of 3 integers, or None. Strides values.
padding: One of `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
5D tensor with shape:
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
- If `data_format='channels_first'`:
5D tensor with shape:
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
Output shape:
- If `data_format='channels_last'`:
5D tensor with shape:
`(batch_size, pooled_dim1, pooled_dim2, pooled_dim3, channels)`
- If `data_format='channels_first'`:
5D tensor with shape:
`(batch_size, channels, pooled_dim1, pooled_dim2, pooled_dim3)`
Example:
```python
depth = 30
height = 30
width = 30
input_channels = 3
inputs = tf.keras.Input(shape=(depth, height, width, input_channels))
layer = tf.keras.layers.AveragePooling3D(pool_size=3)
outputs = layer(inputs) # Shape: (batch_size, 10, 10, 10, 3)
```
"""
def __init__(self,
pool_size=(2, 2, 2),
strides=None,
padding='valid',
data_format=None,
**kwargs):
super(AveragePooling3D, self).__init__(
nn.avg_pool3d,
pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format, **kwargs)
class GlobalPooling1D(Layer):
"""Abstract class for different global pooling 1D layers."""
def __init__(self, data_format='channels_last', keepdims=False, **kwargs):
super(GlobalPooling1D, self).__init__(**kwargs)
self.input_spec = InputSpec(ndim=3)
self.data_format = conv_utils.normalize_data_format(data_format)
self.keepdims = keepdims
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
if self.keepdims:
return tensor_shape.TensorShape([input_shape[0], input_shape[1], 1])
else:
return tensor_shape.TensorShape([input_shape[0], input_shape[1]])
else:
if self.keepdims:
return tensor_shape.TensorShape([input_shape[0], 1, input_shape[2]])
else:
return tensor_shape.TensorShape([input_shape[0], input_shape[2]])
def call(self, inputs):
raise NotImplementedError
def get_config(self):
config = {'data_format': self.data_format, 'keepdims': self.keepdims}
base_config = super(GlobalPooling1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.GlobalAveragePooling1D',
'keras.layers.GlobalAvgPool1D')
class GlobalAveragePooling1D(GlobalPooling1D):
"""Global average pooling operation for temporal data.
Examples:
>>> input_shape = (2, 3, 4)
>>> x = tf.random.normal(input_shape)
>>> y = tf.keras.layers.GlobalAveragePooling1D()(x)
>>> print(y.shape)
(2, 4)
Args:
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, steps, features)` while `channels_first`
corresponds to inputs with shape
`(batch, features, steps)`.
keepdims: A boolean, whether to keep the temporal dimension or not.
If `keepdims` is `False` (default), the rank of the tensor is reduced
for spatial dimensions.
If `keepdims` is `True`, the temporal dimension are retained with
length 1.
The behavior is the same as for `tf.reduce_mean` or `np.mean`.
Call arguments:
inputs: A 3D tensor.
mask: Binary tensor of shape `(batch_size, steps)` indicating whether
a given step should be masked (excluded from the average).
Input shape:
- If `data_format='channels_last'`:
3D tensor with shape:
`(batch_size, steps, features)`
- If `data_format='channels_first'`:
3D tensor with shape:
`(batch_size, features, steps)`
Output shape:
- If `keepdims`=False:
2D tensor with shape `(batch_size, features)`.
- If `keepdims`=True:
- If `data_format='channels_last'`:
3D tensor with shape `(batch_size, 1, features)`
- If `data_format='channels_first'`:
3D tensor with shape `(batch_size, features, 1)`
"""
def __init__(self, data_format='channels_last', **kwargs):
super(GlobalAveragePooling1D, self).__init__(data_format=data_format,
**kwargs)
self.supports_masking = True
def call(self, inputs, mask=None):
steps_axis = 1 if self.data_format == 'channels_last' else 2
if mask is not None:
mask = math_ops.cast(mask, inputs[0].dtype)
mask = array_ops.expand_dims(
mask, 2 if self.data_format == 'channels_last' else 1)
inputs *= mask
return backend.sum(
inputs, axis=steps_axis,
keepdims=self.keepdims) / math_ops.reduce_sum(
mask, axis=steps_axis, keepdims=self.keepdims)
else:
return backend.mean(inputs, axis=steps_axis, keepdims=self.keepdims)
def compute_mask(self, inputs, mask=None):
return None
@keras_export('keras.layers.GlobalMaxPool1D', 'keras.layers.GlobalMaxPooling1D')
class GlobalMaxPooling1D(GlobalPooling1D):
"""Global max pooling operation for 1D temporal data.
Downsamples the input representation by taking the maximum value over
the time dimension.
For example:
>>> x = tf.constant([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]])
>>> x = tf.reshape(x, [3, 3, 1])
>>> x
<tf.Tensor: shape=(3, 3, 1), dtype=float32, numpy=
array([[[1.], [2.], [3.]],
[[4.], [5.], [6.]],
[[7.], [8.], [9.]]], dtype=float32)>
>>> max_pool_1d = tf.keras.layers.GlobalMaxPooling1D()
>>> max_pool_1d(x)
<tf.Tensor: shape=(3, 1), dtype=float32, numpy=
array([[3.],
[6.],
[9.], dtype=float32)>
Args:
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, steps, features)` while `channels_first`
corresponds to inputs with shape
`(batch, features, steps)`.
keepdims: A boolean, whether to keep the temporal dimension or not.
If `keepdims` is `False` (default), the rank of the tensor is reduced
for spatial dimensions.
If `keepdims` is `True`, the temporal dimension are retained with
length 1.
The behavior is the same as for `tf.reduce_max` or `np.max`.
Input shape:
- If `data_format='channels_last'`:
3D tensor with shape:
`(batch_size, steps, features)`
- If `data_format='channels_first'`:
3D tensor with shape:
`(batch_size, features, steps)`
Output shape:
- If `keepdims`=False:
2D tensor with shape `(batch_size, features)`.
- If `keepdims`=True:
- If `data_format='channels_last'`:
3D tensor with shape `(batch_size, 1, features)`
- If `data_format='channels_first'`:
3D tensor with shape `(batch_size, features, 1)`
"""
def call(self, inputs):
steps_axis = 1 if self.data_format == 'channels_last' else 2
return backend.max(inputs, axis=steps_axis, keepdims=self.keepdims)
class GlobalPooling2D(Layer):
"""Abstract class for different global pooling 2D layers.
"""
def __init__(self, data_format=None, keepdims=False, **kwargs):
super(GlobalPooling2D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
self.input_spec = InputSpec(ndim=4)
self.keepdims = keepdims
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_last':
if self.keepdims:
return tensor_shape.TensorShape([input_shape[0], 1, 1, input_shape[3]])
else:
return tensor_shape.TensorShape([input_shape[0], input_shape[3]])
else:
if self.keepdims:
return tensor_shape.TensorShape([input_shape[0], input_shape[1], 1, 1])
else:
return tensor_shape.TensorShape([input_shape[0], input_shape[1]])
def call(self, inputs):
raise NotImplementedError
def get_config(self):
config = {'data_format': self.data_format, 'keepdims': self.keepdims}
base_config = super(GlobalPooling2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.GlobalAveragePooling2D',
'keras.layers.GlobalAvgPool2D')
class GlobalAveragePooling2D(GlobalPooling2D):
"""Global average pooling operation for spatial data.
Examples:
>>> input_shape = (2, 4, 5, 3)
>>> x = tf.random.normal(input_shape)
>>> y = tf.keras.layers.GlobalAveragePooling2D()(x)
>>> print(y.shape)
(2, 3)
Args:
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
keepdims: A boolean, whether to keep the spatial dimensions or not.
If `keepdims` is `False` (default), the rank of the tensor is reduced
for spatial dimensions.
If `keepdims` is `True`, the spatial dimensions are retained with
length 1.
The behavior is the same as for `tf.reduce_mean` or `np.mean`.
Input shape:
- If `data_format='channels_last'`:
4D tensor with shape `(batch_size, rows, cols, channels)`.
- If `data_format='channels_first'`:
4D tensor with shape `(batch_size, channels, rows, cols)`.
Output shape:
- If `keepdims`=False:
2D tensor with shape `(batch_size, channels)`.
- If `keepdims`=True:
- If `data_format='channels_last'`:
4D tensor with shape `(batch_size, 1, 1, channels)`
- If `data_format='channels_first'`:
4D tensor with shape `(batch_size, channels, 1, 1)`
"""
def call(self, inputs):
if self.data_format == 'channels_last':
return backend.mean(inputs, axis=[1, 2], keepdims=self.keepdims)
else:
return backend.mean(inputs, axis=[2, 3], keepdims=self.keepdims)
@keras_export('keras.layers.GlobalMaxPool2D', 'keras.layers.GlobalMaxPooling2D')
class GlobalMaxPooling2D(GlobalPooling2D):
"""Global max pooling operation for spatial data.
Examples:
>>> input_shape = (2, 4, 5, 3)
>>> x = tf.random.normal(input_shape)
>>> y = tf.keras.layers.GlobalMaxPool2D()(x)
>>> print(y.shape)
(2, 3)
Args:
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
keepdims: A boolean, whether to keep the spatial dimensions or not.
If `keepdims` is `False` (default), the rank of the tensor is reduced
for spatial dimensions.
If `keepdims` is `True`, the spatial dimensions are retained with
length 1.
The behavior is the same as for `tf.reduce_max` or `np.max`.
Input shape:
- If `data_format='channels_last'`:
4D tensor with shape `(batch_size, rows, cols, channels)`.
- If `data_format='channels_first'`:
4D tensor with shape `(batch_size, channels, rows, cols)`.
Output shape:
- If `keepdims`=False:
2D tensor with shape `(batch_size, channels)`.
- If `keepdims`=True:
- If `data_format='channels_last'`:
4D tensor with shape `(batch_size, 1, 1, channels)`
- If `data_format='channels_first'`:
4D tensor with shape `(batch_size, channels, 1, 1)`
"""
def call(self, inputs):
if self.data_format == 'channels_last':
return backend.max(inputs, axis=[1, 2], keepdims=self.keepdims)
else:
return backend.max(inputs, axis=[2, 3], keepdims=self.keepdims)
class GlobalPooling3D(Layer):
"""Abstract class for different global pooling 3D layers."""
def __init__(self, data_format=None, keepdims=False, **kwargs):
super(GlobalPooling3D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
self.input_spec = InputSpec(ndim=5)
self.keepdims = keepdims
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_last':
if self.keepdims:
return tensor_shape.TensorShape(
[input_shape[0], 1, 1, 1, input_shape[4]])
else:
return tensor_shape.TensorShape([input_shape[0], input_shape[4]])
else:
if self.keepdims:
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], 1, 1, 1])
else:
return tensor_shape.TensorShape([input_shape[0], input_shape[1]])
def call(self, inputs):
raise NotImplementedError
def get_config(self):
config = {'data_format': self.data_format, 'keepdims': self.keepdims}
base_config = super(GlobalPooling3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.GlobalAveragePooling3D',
'keras.layers.GlobalAvgPool3D')
class GlobalAveragePooling3D(GlobalPooling3D):
"""Global Average pooling operation for 3D data.
Args:
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
keepdims: A boolean, whether to keep the spatial dimensions or not.
If `keepdims` is `False` (default), the rank of the tensor is reduced
for spatial dimensions.
If `keepdims` is `True`, the spatial dimensions are retained with
length 1.
The behavior is the same as for `tf.reduce_mean` or `np.mean`.
Input shape:
- If `data_format='channels_last'`:
5D tensor with shape:
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
- If `data_format='channels_first'`:
5D tensor with shape:
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
Output shape:
- If `keepdims`=False:
2D tensor with shape `(batch_size, channels)`.
- If `keepdims`=True:
- If `data_format='channels_last'`:
5D tensor with shape `(batch_size, 1, 1, 1, channels)`
- If `data_format='channels_first'`:
5D tensor with shape `(batch_size, channels, 1, 1, 1)`
"""
def call(self, inputs):
if self.data_format == 'channels_last':
return backend.mean(inputs, axis=[1, 2, 3], keepdims=self.keepdims)
else:
return backend.mean(inputs, axis=[2, 3, 4], keepdims=self.keepdims)
@keras_export('keras.layers.GlobalMaxPool3D', 'keras.layers.GlobalMaxPooling3D')
class GlobalMaxPooling3D(GlobalPooling3D):
"""Global Max pooling operation for 3D data.
Args:
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
keepdims: A boolean, whether to keep the spatial dimensions or not.
If `keepdims` is `False` (default), the rank of the tensor is reduced
for spatial dimensions.
If `keepdims` is `True`, the spatial dimensions are retained with
length 1.
The behavior is the same as for `tf.reduce_max` or `np.max`.
Input shape:
- If `data_format='channels_last'`:
5D tensor with shape:
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
- If `data_format='channels_first'`:
5D tensor with shape:
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
Output shape:
- If `keepdims`=False:
2D tensor with shape `(batch_size, channels)`.
- If `keepdims`=True:
- If `data_format='channels_last'`:
5D tensor with shape `(batch_size, 1, 1, 1, channels)`
- If `data_format='channels_first'`:
5D tensor with shape `(batch_size, channels, 1, 1, 1)`
"""
def call(self, inputs):
if self.data_format == 'channels_last':
return backend.max(inputs, axis=[1, 2, 3], keepdims=self.keepdims)
else:
return backend.max(inputs, axis=[2, 3, 4], keepdims=self.keepdims)
# Aliases
AvgPool1D = AveragePooling1D
MaxPool1D = MaxPooling1D
AvgPool2D = AveragePooling2D
MaxPool2D = MaxPooling2D
AvgPool3D = AveragePooling3D
MaxPool3D = MaxPooling3D
GlobalMaxPool1D = GlobalMaxPooling1D
GlobalMaxPool2D = GlobalMaxPooling2D
GlobalMaxPool3D = GlobalMaxPooling3D
GlobalAvgPool1D = GlobalAveragePooling1D
GlobalAvgPool2D = GlobalAveragePooling2D
GlobalAvgPool3D = GlobalAveragePooling3D
|
zorroblue/scikit-learn | refs/heads/master | examples/svm/plot_svm_scale_c.py | 60 | """
==============================================
Scaling the regularization parameter for SVCs
==============================================
The following example illustrates the effect of scaling the
regularization parameter when using :ref:`svm` for
:ref:`classification <svm_classification>`.
For SVC classification, we are interested in a risk minimization for the
equation:
.. math::
C \sum_{i=1, n} \mathcal{L} (f(x_i), y_i) + \Omega (w)
where
- :math:`C` is used to set the amount of regularization
- :math:`\mathcal{L}` is a `loss` function of our samples
and our model parameters.
- :math:`\Omega` is a `penalty` function of our model parameters
If we consider the loss function to be the individual error per
sample, then the data-fit term, or the sum of the error for each sample, will
increase as we add more samples. The penalization term, however, will not
increase.
When using, for example, :ref:`cross validation <cross_validation>`, to
set the amount of regularization with `C`, there will be a
different amount of samples between the main problem and the smaller problems
within the folds of the cross validation.
Since our loss function is dependent on the amount of samples, the latter
will influence the selected value of `C`.
The question that arises is `How do we optimally adjust C to
account for the different amount of training samples?`
The figures below are used to illustrate the effect of scaling our
`C` to compensate for the change in the number of samples, in the
case of using an `l1` penalty, as well as the `l2` penalty.
l1-penalty case
-----------------
In the `l1` case, theory says that prediction consistency
(i.e. that under given hypothesis, the estimator
learned predicts as well as a model knowing the true distribution)
is not possible because of the bias of the `l1`. It does say, however,
that model consistency, in terms of finding the right set of non-zero
parameters as well as their signs, can be achieved by scaling
`C1`.
l2-penalty case
-----------------
The theory says that in order to achieve prediction consistency, the
penalty parameter should be kept constant
as the number of samples grow.
Simulations
------------
The two figures below plot the values of `C` on the `x-axis` and the
corresponding cross-validation scores on the `y-axis`, for several different
fractions of a generated data-set.
In the `l1` penalty case, the cross-validation-error correlates best with
the test-error, when scaling our `C` with the number of samples, `n`,
which can be seen in the first figure.
For the `l2` penalty case, the best result comes from the case where `C`
is not scaled.
.. topic:: Note:
Two separate datasets are used for the two different plots. The reason
behind this is the `l1` case works better on sparse data, while `l2`
is better suited to the non-sparse case.
"""
print(__doc__)
# Author: Andreas Mueller <[email protected]>
# Jaques Grobler <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import LinearSVC
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import GridSearchCV
from sklearn.utils import check_random_state
from sklearn import datasets
rnd = check_random_state(1)
# set up dataset
n_samples = 100
n_features = 300
# l1 data (only 5 informative features)
X_1, y_1 = datasets.make_classification(n_samples=n_samples,
n_features=n_features, n_informative=5,
random_state=1)
# l2 data: non sparse, but less features
y_2 = np.sign(.5 - rnd.rand(n_samples))
X_2 = rnd.randn(n_samples, n_features // 5) + y_2[:, np.newaxis]
X_2 += 5 * rnd.randn(n_samples, n_features // 5)
clf_sets = [(LinearSVC(penalty='l1', loss='squared_hinge', dual=False,
tol=1e-3),
np.logspace(-2.3, -1.3, 10), X_1, y_1),
(LinearSVC(penalty='l2', loss='squared_hinge', dual=True,
tol=1e-4),
np.logspace(-4.5, -2, 10), X_2, y_2)]
colors = ['navy', 'cyan', 'darkorange']
lw = 2
for fignum, (clf, cs, X, y) in enumerate(clf_sets):
# set up the plot for each regressor
plt.figure(fignum, figsize=(9, 10))
for k, train_size in enumerate(np.linspace(0.3, 0.7, 3)[::-1]):
param_grid = dict(C=cs)
# To get nice curve, we need a large number of iterations to
# reduce the variance
grid = GridSearchCV(clf, refit=False, param_grid=param_grid,
cv=ShuffleSplit(train_size=train_size,
n_splits=250, random_state=1))
grid.fit(X, y)
scores = grid.cv_results_['mean_test_score']
scales = [(1, 'No scaling'),
((n_samples * train_size), '1/n_samples'),
]
for subplotnum, (scaler, name) in enumerate(scales):
plt.subplot(2, 1, subplotnum + 1)
plt.xlabel('C')
plt.ylabel('CV Score')
grid_cs = cs * float(scaler) # scale the C's
plt.semilogx(grid_cs, scores, label="fraction %.2f" %
train_size, color=colors[k], lw=lw)
plt.title('scaling=%s, penalty=%s, loss=%s' %
(name, clf.penalty, clf.loss))
plt.legend(loc="best")
plt.show()
|
minhphung171093/GreenERP_V7 | refs/heads/master | openerp/addons/account_payment/__openerp__.py | 54 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Suppliers Payment Management',
'version': '1.1',
'author': 'OpenERP SA',
'category': 'Accounting & Finance',
'description': """
Module to manage the payment of your supplier invoices.
=======================================================
This module allows you to create and manage your payment orders, with purposes to
---------------------------------------------------------------------------------
* serve as base for an easy plug-in of various automated payment mechanisms.
* provide a more efficient way to manage invoice payment.
Warning:
~~~~~~~~
The confirmation of a payment order does _not_ create accounting entries, it just
records the fact that you gave your payment order to your bank. The booking of
your order must be encoded as usual through a bank statement. Indeed, it's only
when you get the confirmation from your bank that your order has been accepted
that you can book it in your accounting. To help you with that operation, you
have a new option to import payment orders as bank statement lines.
""",
'images': ['images/payment_mode.jpeg','images/payment_order.jpeg'],
'depends': ['account','account_voucher'],
'data': [
'security/account_payment_security.xml',
'security/ir.model.access.csv',
'wizard/account_payment_pay_view.xml',
'wizard/account_payment_populate_statement_view.xml',
'wizard/account_payment_create_order_view.xml',
'account_payment_view.xml',
'account_payment_workflow.xml',
'account_payment_sequence.xml',
'account_invoice_view.xml',
'account_payment_report.xml',
],
'demo': ['account_payment_demo.xml'],
'test': [
'test/account_payment_demo.yml',
'test/cancel_payment_order.yml',
'test/payment_order_process.yml',
'test/account_payment_report.yml',
],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
WSDC-NITWarangal/gunicorn | refs/heads/master | examples/frameworks/flaskapp.py | 41 | # Run with:
#
# $ gunicorn flaskapp:app
#
from flask import Flask
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello World!"
|
opengeogroep/inasafe | refs/heads/master | safe_qgis/test/test_qgis_environment.py | 2 | # coding=utf-8
"""
InaSAFE Disaster risk assessment tool developed by AusAid -
**ISClipper test suite.**
Contact : [email protected]
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
from safe.common.testing import get_qgis_app
__author__ = '[email protected]'
__date__ = '20/01/2011'
__copyright__ = ('Copyright 2012, Australia Indonesia Facility for '
'Disaster Reduction')
import os
import unittest
from qgis.core import (
QgsProviderRegistry,
QgsCoordinateReferenceSystem,
QgsRasterLayer)
from safe_qgis.safe_interface import EXPDATA
QGIS_APP = get_qgis_app()
class QGISTest(unittest.TestCase):
"""Test the QGIS Environment"""
def test_QGISEnvironment(self):
"""QGIS environment has the expected providers"""
r = QgsProviderRegistry.instance()
#for item in r.providerList():
# print str(item)
#print 'Provider count: %s' % len(r.providerList())
assert 'gdal' in r.providerList()
assert 'ogr' in r.providerList()
assert 'postgres' in r.providerList()
#assert 'wfs' in r.providerList()
def testProjInterpretation(self):
"""Test that QGIS properly parses a proj4 string.
see https://github.com/AIFDR/inasafe/issues/349
"""
myCrs = QgsCoordinateReferenceSystem()
myProj4 = (
'GEOGCS["GCS_WGS_1984",DATUM["D_WGS_1984",'
'SPHEROID["WGS_1984",6378137.0,298.257223563]],'
'PRIMEM["Greenwich",0.0],UNIT["Degree",'
'0.0174532925199433]]')
myCrs.createFromWkt(myProj4)
myAuthId = myCrs.authid()
myExpectedAuthId = 'EPSG:4326'
self.assertEqual(myAuthId, myExpectedAuthId)
# now test for a loaded layer
path = os.path.join(EXPDATA, 'glp10ag.asc')
myTitle = 'people'
layer = QgsRasterLayer(path, myTitle)
myAuthId = layer.crs().authid()
self.assertEqual(myAuthId, myExpectedAuthId)
if __name__ == '__main__':
unittest.main()
|
IvanGavran/scrapy | refs/heads/master | scrapy/http/__init__.py | 207 | """
Module containing all HTTP related classes
Use this module (instead of the more specific ones) when importing Headers,
Request and Response outside this module.
"""
from scrapy.http.headers import Headers
from scrapy.http.request import Request
from scrapy.http.request.form import FormRequest
from scrapy.http.request.rpc import XmlRpcRequest
from scrapy.http.response import Response
from scrapy.http.response.html import HtmlResponse
from scrapy.http.response.xml import XmlResponse
from scrapy.http.response.text import TextResponse
|
komarudin02/Public | refs/heads/master | node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/easy_xml.py | 1558 | # Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
import os
def XmlToString(content, encoding='utf-8', pretty=False):
""" Writes the XML content to disk, touching the file only if it has changed.
Visual Studio files have a lot of pre-defined structures. This function makes
it easy to represent these structures as Python data structures, instead of
having to create a lot of function calls.
Each XML element of the content is represented as a list composed of:
1. The name of the element, a string,
2. The attributes of the element, a dictionary (optional), and
3+. The content of the element, if any. Strings are simple text nodes and
lists are child elements.
Example 1:
<test/>
becomes
['test']
Example 2:
<myelement a='value1' b='value2'>
<childtype>This is</childtype>
<childtype>it!</childtype>
</myelement>
becomes
['myelement', {'a':'value1', 'b':'value2'},
['childtype', 'This is'],
['childtype', 'it!'],
]
Args:
content: The structured content to be converted.
encoding: The encoding to report on the first XML line.
pretty: True if we want pretty printing with indents and new lines.
Returns:
The XML content as a string.
"""
# We create a huge list of all the elements of the file.
xml_parts = ['<?xml version="1.0" encoding="%s"?>' % encoding]
if pretty:
xml_parts.append('\n')
_ConstructContentList(xml_parts, content, pretty)
# Convert it to a string
return ''.join(xml_parts)
def _ConstructContentList(xml_parts, specification, pretty, level=0):
""" Appends the XML parts corresponding to the specification.
Args:
xml_parts: A list of XML parts to be appended to.
specification: The specification of the element. See EasyXml docs.
pretty: True if we want pretty printing with indents and new lines.
level: Indentation level.
"""
# The first item in a specification is the name of the element.
if pretty:
indentation = ' ' * level
new_line = '\n'
else:
indentation = ''
new_line = ''
name = specification[0]
if not isinstance(name, str):
raise Exception('The first item of an EasyXml specification should be '
'a string. Specification was ' + str(specification))
xml_parts.append(indentation + '<' + name)
# Optionally in second position is a dictionary of the attributes.
rest = specification[1:]
if rest and isinstance(rest[0], dict):
for at, val in sorted(rest[0].iteritems()):
xml_parts.append(' %s="%s"' % (at, _XmlEscape(val, attr=True)))
rest = rest[1:]
if rest:
xml_parts.append('>')
all_strings = reduce(lambda x, y: x and isinstance(y, str), rest, True)
multi_line = not all_strings
if multi_line and new_line:
xml_parts.append(new_line)
for child_spec in rest:
# If it's a string, append a text node.
# Otherwise recurse over that child definition
if isinstance(child_spec, str):
xml_parts.append(_XmlEscape(child_spec))
else:
_ConstructContentList(xml_parts, child_spec, pretty, level + 1)
if multi_line and indentation:
xml_parts.append(indentation)
xml_parts.append('</%s>%s' % (name, new_line))
else:
xml_parts.append('/>%s' % new_line)
def WriteXmlIfChanged(content, path, encoding='utf-8', pretty=False,
win32=False):
""" Writes the XML content to disk, touching the file only if it has changed.
Args:
content: The structured content to be written.
path: Location of the file.
encoding: The encoding to report on the first line of the XML file.
pretty: True if we want pretty printing with indents and new lines.
"""
xml_string = XmlToString(content, encoding, pretty)
if win32 and os.linesep != '\r\n':
xml_string = xml_string.replace('\n', '\r\n')
try:
xml_string = xml_string.encode(encoding)
except Exception:
xml_string = unicode(xml_string, 'latin-1').encode(encoding)
# Get the old content
try:
f = open(path, 'r')
existing = f.read()
f.close()
except:
existing = None
# It has changed, write it
if existing != xml_string:
f = open(path, 'w')
f.write(xml_string)
f.close()
_xml_escape_map = {
'"': '"',
"'": ''',
'<': '<',
'>': '>',
'&': '&',
'\n': '
',
'\r': '
',
}
_xml_escape_re = re.compile(
"(%s)" % "|".join(map(re.escape, _xml_escape_map.keys())))
def _XmlEscape(value, attr=False):
""" Escape a string for inclusion in XML."""
def replace(match):
m = match.string[match.start() : match.end()]
# don't replace single quotes in attrs
if attr and m == "'":
return m
return _xml_escape_map[m]
return _xml_escape_re.sub(replace, value)
|
orenov/phraug | refs/heads/master | csv2vw.py | 4 | """
Convert CSV file to vw format. Headers can be skipped with argv[4] == true.
Use -1 for label index if there no labels in the input file
phraug2 version has an option to ignore columns:
https://github.com/zygmuntz/phraug2/blob/master/csv2vw.py
"""
import sys
import csv
def construct_line( label, line ):
new_line = []
if float( label ) == 0.0:
label = "0"
new_line.append( "%s |n " % ( label ))
for i, item in enumerate( line ):
if float( item ) == 0.0:
continue # sparse!!!
new_item = "%s:%s" % ( i + 1, item )
new_line.append( new_item )
new_line = " ".join( new_line )
new_line += "\n"
return new_line
# ---
input_file = sys.argv[1]
output_file = sys.argv[2]
try:
label_index = int( sys.argv[3] )
except IndexError:
label_index = 0
try:
skip_headers = sys.argv[4]
except IndexError:
skip_headers = 0
i = open( input_file )
o = open( output_file, 'w' )
reader = csv.reader( i )
if skip_headers:
headers = reader.next()
n = 0
for line in reader:
if label_index == -1:
label = 1
else:
label = line.pop( label_index )
new_line = construct_line( label, line )
o.write( new_line )
n += 1
if n % 10000 == 0:
print n
|
Livit/Livit.Learn.EdX | refs/heads/labster/develop | common/djangoapps/third_party_auth/tests/specs/test_twitter.py | 86 | """
Separate integration test for Twitter which is an OAuth1 provider.
"""
from mock import patch
from third_party_auth.tests.specs import base
class TwitterIntegrationTest(base.Oauth2IntegrationTest):
"""Integration tests for Twitter backend."""
def setUp(self):
super(TwitterIntegrationTest, self).setUp()
self.provider = self.configure_twitter_provider(
enabled=True,
key='twitter_oauth1_key',
secret='twitter_oauth1_secret',
)
# To test an OAuth1 provider, we need to patch an additional method:
patcher = patch(
'social.backends.twitter.TwitterOAuth.unauthorized_token',
create=True,
return_value="unauth_token"
)
patcher.start()
self.addCleanup(patcher.stop)
TOKEN_RESPONSE_DATA = {
'access_token': 'access_token_value',
'token_type': 'bearer',
}
USER_RESPONSE_DATA = {
'id': 10101010,
'name': 'Bob Loblaw',
'description': 'A Twitter User',
'screen_name': 'bobloblaw',
'location': 'Twitterverse',
'followers_count': 77,
'verified': False,
}
def get_username(self):
response_data = self.get_response_data()
return response_data.get('screen_name')
|
jeremykid/FunAlgorithm | refs/heads/master | faceRegonization/sample.py | 1 | # -*- coding: utf-8 -*-
import urllib2
import urllib
import time
http_url = 'https://api-us.faceplusplus.com/facepp/v3/detect'
key = "Bn7swhGpvtzxS9uMWG-0CkacJY-_gt-4"
secret = "tNCF8Wd-xjtw-qyQn47yjZh8RzLkVBkU"
filepath = r"./sample.jpg"
boundary = '----------%s' % hex(int(time.time() * 1000))
data = []
data.append('--%s' % boundary)
data.append('Content-Disposition: form-data; name="%s"\r\n' % 'api_key')
data.append(key)
data.append('--%s' % boundary)
data.append('Content-Disposition: form-data; name="%s"\r\n' % 'api_secret')
data.append(secret)
data.append('--%s' % boundary)
# print (data)
# '''
fr=open(filepath,'rb')
data.append('Content-Disposition: form-data; name="%s"; filename="co33.jpg"' % 'image_file')
data.append('Content-Type: %s\r\n' % 'application/octet-stream')
data.append(fr.read())
fr.close()
# data.append('Content-Disposition: form-data; name="%s"\r\n' % 'return_attributes')
# data.append('gender')
data.append('--%s--\r\n' % boundary)
http_body='\r\n'.join(data)
#buld http request
req=urllib2.Request(http_url)
#header
req.add_header('Content-Type', 'multipart/form-data; boundary=%s' % boundary)
req.add_data(http_body)
try:
#post data to server
resp = urllib2.urlopen(req, timeout=5)
#get response
qrcont=resp.read()
print qrcont
except Exception,e:
print 'http error',e
|
zhounanshu/flasky | refs/heads/master | app/auth/forms.py | 111 | from flask.ext.wtf import Form
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms.validators import Required, Length, Email, Regexp, EqualTo
from wtforms import ValidationError
from ..models import User
class LoginForm(Form):
email = StringField('Email', validators=[Required(), Length(1, 64),
Email()])
password = PasswordField('Password', validators=[Required()])
remember_me = BooleanField('Keep me logged in')
submit = SubmitField('Log In')
class RegistrationForm(Form):
email = StringField('Email', validators=[Required(), Length(1, 64),
Email()])
username = StringField('Username', validators=[
Required(), Length(1, 64), Regexp('^[A-Za-z][A-Za-z0-9_.]*$', 0,
'Usernames must have only letters, '
'numbers, dots or underscores')])
password = PasswordField('Password', validators=[
Required(), EqualTo('password2', message='Passwords must match.')])
password2 = PasswordField('Confirm password', validators=[Required()])
submit = SubmitField('Register')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first():
raise ValidationError('Email already registered.')
def validate_username(self, field):
if User.query.filter_by(username=field.data).first():
raise ValidationError('Username already in use.')
class ChangePasswordForm(Form):
old_password = PasswordField('Old password', validators=[Required()])
password = PasswordField('New password', validators=[
Required(), EqualTo('password2', message='Passwords must match')])
password2 = PasswordField('Confirm new password', validators=[Required()])
submit = SubmitField('Update Password')
class PasswordResetRequestForm(Form):
email = StringField('Email', validators=[Required(), Length(1, 64),
Email()])
submit = SubmitField('Reset Password')
class PasswordResetForm(Form):
email = StringField('Email', validators=[Required(), Length(1, 64),
Email()])
password = PasswordField('New Password', validators=[
Required(), EqualTo('password2', message='Passwords must match')])
password2 = PasswordField('Confirm password', validators=[Required()])
submit = SubmitField('Reset Password')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first() is None:
raise ValidationError('Unknown email address.')
class ChangeEmailForm(Form):
email = StringField('New Email', validators=[Required(), Length(1, 64),
Email()])
password = PasswordField('Password', validators=[Required()])
submit = SubmitField('Update Email Address')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first():
raise ValidationError('Email already registered.')
|
Charimon/complex_input | refs/heads/master | node_modules/node-gyp/gyp/tools/graphviz.py | 2679 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Using the JSON dumped by the dump-dependency-json generator,
generate input suitable for graphviz to render a dependency graph of
targets."""
import collections
import json
import sys
def ParseTarget(target):
target, _, suffix = target.partition('#')
filename, _, target = target.partition(':')
return filename, target, suffix
def LoadEdges(filename, targets):
"""Load the edges map from the dump file, and filter it to only
show targets in |targets| and their depedendents."""
file = open('dump.json')
edges = json.load(file)
file.close()
# Copy out only the edges we're interested in from the full edge list.
target_edges = {}
to_visit = targets[:]
while to_visit:
src = to_visit.pop()
if src in target_edges:
continue
target_edges[src] = edges[src]
to_visit.extend(edges[src])
return target_edges
def WriteGraph(edges):
"""Print a graphviz graph to stdout.
|edges| is a map of target to a list of other targets it depends on."""
# Bucket targets by file.
files = collections.defaultdict(list)
for src, dst in edges.items():
build_file, target_name, toolset = ParseTarget(src)
files[build_file].append(src)
print 'digraph D {'
print ' fontsize=8' # Used by subgraphs.
print ' node [fontsize=8]'
# Output nodes by file. We must first write out each node within
# its file grouping before writing out any edges that may refer
# to those nodes.
for filename, targets in files.items():
if len(targets) == 1:
# If there's only one node for this file, simplify
# the display by making it a box without an internal node.
target = targets[0]
build_file, target_name, toolset = ParseTarget(target)
print ' "%s" [shape=box, label="%s\\n%s"]' % (target, filename,
target_name)
else:
# Group multiple nodes together in a subgraph.
print ' subgraph "cluster_%s" {' % filename
print ' label = "%s"' % filename
for target in targets:
build_file, target_name, toolset = ParseTarget(target)
print ' "%s" [label="%s"]' % (target, target_name)
print ' }'
# Now that we've placed all the nodes within subgraphs, output all
# the edges between nodes.
for src, dsts in edges.items():
for dst in dsts:
print ' "%s" -> "%s"' % (src, dst)
print '}'
def main():
if len(sys.argv) < 2:
print >>sys.stderr, __doc__
print >>sys.stderr
print >>sys.stderr, 'usage: %s target1 target2...' % (sys.argv[0])
return 1
edges = LoadEdges('dump.json', sys.argv[1:])
WriteGraph(edges)
return 0
if __name__ == '__main__':
sys.exit(main())
|
AnimeshSinha1309/Website-Edunet | refs/heads/master | WebsiteEdunet/env/Lib/site-packages/django/contrib/sites/shortcuts.py | 615 | from __future__ import unicode_literals
from django.apps import apps
def get_current_site(request):
"""
Checks if contrib.sites is installed and returns either the current
``Site`` object or a ``RequestSite`` object based on the request.
"""
# Imports are inside the function because its point is to avoid importing
# the Site models when django.contrib.sites isn't installed.
if apps.is_installed('django.contrib.sites'):
from .models import Site
return Site.objects.get_current(request)
else:
from .requests import RequestSite
return RequestSite(request)
|
woozzu/pylearn2 | refs/heads/master | pylearn2/models/tests/test_autoencoder.py | 32 | """
Tests for the pylearn2 autoencoder module.
"""
import os.path
import numpy as np
import theano
import theano.tensor as tensor
from theano import config
from pylearn2.models.autoencoder import Autoencoder, \
HigherOrderContractiveAutoencoder, DeepComposedAutoencoder, \
UntiedAutoencoder, StackedDenoisingAutoencoder
from pylearn2.corruption import BinomialCorruptor
from pylearn2.config import yaml_parse
from theano.tensor.basic import _allclose
yaml_dir_path = os.path.join(
os.path.abspath(os.path.join(os.path.dirname(__file__))), 'config')
def test_autoencoder_properly_initialized():
ae = Autoencoder(1, 1, 'sigmoid', 'linear')
assert hasattr(ae, 'fn'), "Autoencoder didn't call Block.__init__"
assert hasattr(ae, 'extensions'), "Autoencoder didn't call Model.__init__"
def test_autoencoder_logistic_linear_tied():
data = np.random.randn(10, 5).astype(config.floatX)
ae = Autoencoder(5, 7, act_enc='sigmoid', act_dec='linear',
tied_weights=True)
w = ae.weights.get_value()
ae.hidbias.set_value(np.random.randn(7).astype(config.floatX))
hb = ae.hidbias.get_value()
ae.visbias.set_value(np.random.randn(5).astype(config.floatX))
vb = ae.visbias.get_value()
d = tensor.matrix()
result = np.dot(1. / (1 + np.exp(-hb - np.dot(data, w))), w.T) + vb
ff = theano.function([d], ae.reconstruct(d))
assert _allclose(ff(data), result)
def test_autoencoder_tanh_cos_untied():
data = np.random.randn(10, 5).astype(config.floatX)
ae = Autoencoder(5, 7, act_enc='tanh', act_dec='cos',
tied_weights=False)
w = ae.weights.get_value()
w_prime = ae.w_prime.get_value()
ae.hidbias.set_value(np.random.randn(7).astype(config.floatX))
hb = ae.hidbias.get_value()
ae.visbias.set_value(np.random.randn(5).astype(config.floatX))
vb = ae.visbias.get_value()
d = tensor.matrix()
result = np.cos(np.dot(np.tanh(hb + np.dot(data, w)), w_prime) + vb)
ff = theano.function([d], ae.reconstruct(d))
assert _allclose(ff(data), result)
def test_high_order_autoencoder_init():
"""
Just test that model initialize and return
the penalty without error.
"""
corruptor = BinomialCorruptor(corruption_level=0.5)
model = HigherOrderContractiveAutoencoder(
corruptor=corruptor,
num_corruptions=2,
nvis=5,
nhid=7,
act_enc='sigmoid',
act_dec='sigmoid')
X = tensor.matrix()
data = np.random.randn(10, 5).astype(config.floatX)
ff = theano.function([X], model.higher_order_penalty(X))
assert type(ff(data)) == np.ndarray
def test_cae_basic():
"""
Tests that we can load a contractive autoencoder
and train it for a few epochs (without saving) on a dummy
dataset-- tiny model and dataset
"""
with open(os.path.join(yaml_dir_path, 'cae.yaml')) as f:
yaml_string = f.read()
train = yaml_parse.load(yaml_string)
train.main_loop()
def test_hcae_basic():
"""
Tests that we can load a higher order contractive autoencoder
and train it for a few epochs (without saving) on a dummy
dataset-- tiny model and dataset
"""
with open(os.path.join(yaml_dir_path, 'hcae.yaml')) as f:
yaml_string = f.read()
train = yaml_parse.load(yaml_string)
train.main_loop()
def test_untied_ae():
"""
Tests that UntiedAutoencoder calls the Model superclass constructor
"""
ae = Autoencoder(5, 7, act_enc='tanh', act_dec='cos',
tied_weights=True)
model = UntiedAutoencoder(ae)
model._ensure_extensions()
def test_dcae():
"""
Tests that DeepComposedAutoencoder works correctly
"""
ae = Autoencoder(5, 7, act_enc='tanh', act_dec='cos',
tied_weights=True)
model = DeepComposedAutoencoder([ae])
model._ensure_extensions()
data = np.random.randn(10, 5).astype(config.floatX)
model.perform(data)
def test_sdae():
"""
Tests that StackedDenoisingAutoencoder works correctly
"""
data = np.random.randn(10, 5).astype(config.floatX) * 100
ae = Autoencoder(5, 7, act_enc='tanh', act_dec='cos',
tied_weights=False)
corruptor = BinomialCorruptor(corruption_level=0.5)
model = StackedDenoisingAutoencoder([ae], corruptor)
model._ensure_extensions()
w = ae.weights.get_value()
w_prime = ae.w_prime.get_value()
ae.hidbias.set_value(np.random.randn(7).astype(config.floatX))
hb = ae.hidbias.get_value()
ae.visbias.set_value(np.random.randn(5).astype(config.floatX))
vb = ae.visbias.get_value()
d = tensor.matrix()
result = np.cos(np.dot(np.tanh(hb + np.dot(data, w)), w_prime) + vb)
ff = theano.function([d], model.reconstruct(d))
assert not _allclose(ff(data), result)
|
Etxea/gestioneide | refs/heads/master | asistencias/tests.py | 24123 | from django.test import TestCase
# Create your tests here.
|
kenshay/ImageScript | refs/heads/master | ProgramData/SystemFiles/Python/Lib/test/test_httpservers.py | 5 | """Unittests for the various HTTPServer modules.
Written by Cody A.W. Somerville <[email protected]>,
Josip Dzolonga, and Michael Otteneder for the 2007/08 GHOP contest.
"""
import os
import sys
import re
import base64
import ntpath
import shutil
import urllib
import httplib
import tempfile
import unittest
import CGIHTTPServer
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
from CGIHTTPServer import CGIHTTPRequestHandler
from StringIO import StringIO
from test import test_support
threading = test_support.import_module('threading')
class NoLogRequestHandler:
def log_message(self, *args):
# don't write log messages to stderr
pass
class SocketlessRequestHandler(SimpleHTTPRequestHandler):
def __init__(self):
self.get_called = False
self.protocol_version = "HTTP/1.1"
def do_GET(self):
self.get_called = True
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write(b'<html><body>Data</body></html>\r\n')
def log_message(self, fmt, *args):
pass
class TestServerThread(threading.Thread):
def __init__(self, test_object, request_handler):
threading.Thread.__init__(self)
self.request_handler = request_handler
self.test_object = test_object
def run(self):
self.server = HTTPServer(('', 0), self.request_handler)
self.test_object.PORT = self.server.socket.getsockname()[1]
self.test_object.server_started.set()
self.test_object = None
try:
self.server.serve_forever(0.05)
finally:
self.server.server_close()
def stop(self):
self.server.shutdown()
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._threads = test_support.threading_setup()
os.environ = test_support.EnvironmentVarGuard()
self.server_started = threading.Event()
self.thread = TestServerThread(self, self.request_handler)
self.thread.start()
self.server_started.wait()
def tearDown(self):
self.thread.stop()
os.environ.__exit__()
test_support.threading_cleanup(*self._threads)
def request(self, uri, method='GET', body=None, headers={}):
self.connection = httplib.HTTPConnection('localhost', self.PORT)
self.connection.request(method, uri, body, headers)
return self.connection.getresponse()
class BaseHTTPRequestHandlerTestCase(unittest.TestCase):
"""Test the functionality of the BaseHTTPServer focussing on
BaseHTTPRequestHandler.
"""
HTTPResponseMatch = re.compile('HTTP/1.[0-9]+ 200 OK')
def setUp (self):
self.handler = SocketlessRequestHandler()
def send_typical_request(self, message):
input_msg = StringIO(message)
output = StringIO()
self.handler.rfile = input_msg
self.handler.wfile = output
self.handler.handle_one_request()
output.seek(0)
return output.readlines()
def verify_get_called(self):
self.assertTrue(self.handler.get_called)
def verify_expected_headers(self, headers):
for fieldName in 'Server: ', 'Date: ', 'Content-Type: ':
self.assertEqual(sum(h.startswith(fieldName) for h in headers), 1)
def verify_http_server_response(self, response):
match = self.HTTPResponseMatch.search(response)
self.assertIsNotNone(match)
def test_http_1_1(self):
result = self.send_typical_request('GET / HTTP/1.1\r\n\r\n')
self.verify_http_server_response(result[0])
self.verify_expected_headers(result[1:-1])
self.verify_get_called()
self.assertEqual(result[-1], '<html><body>Data</body></html>\r\n')
def test_http_1_0(self):
result = self.send_typical_request('GET / HTTP/1.0\r\n\r\n')
self.verify_http_server_response(result[0])
self.verify_expected_headers(result[1:-1])
self.verify_get_called()
self.assertEqual(result[-1], '<html><body>Data</body></html>\r\n')
def test_http_0_9(self):
result = self.send_typical_request('GET / HTTP/0.9\r\n\r\n')
self.assertEqual(len(result), 1)
self.assertEqual(result[0], '<html><body>Data</body></html>\r\n')
self.verify_get_called()
def test_with_continue_1_0(self):
result = self.send_typical_request('GET / HTTP/1.0\r\nExpect: 100-continue\r\n\r\n')
self.verify_http_server_response(result[0])
self.verify_expected_headers(result[1:-1])
self.verify_get_called()
self.assertEqual(result[-1], '<html><body>Data</body></html>\r\n')
def test_request_length(self):
# Issue #10714: huge request lines are discarded, to avoid Denial
# of Service attacks.
result = self.send_typical_request(b'GET ' + b'x' * 65537)
self.assertEqual(result[0], b'HTTP/1.1 414 Request-URI Too Long\r\n')
self.assertFalse(self.handler.get_called)
class BaseHTTPServerTestCase(BaseTestCase):
class request_handler(NoLogRequestHandler, BaseHTTPRequestHandler):
protocol_version = 'HTTP/1.1'
default_request_version = 'HTTP/1.1'
def do_TEST(self):
self.send_response(204)
self.send_header('Content-Type', 'text/html')
self.send_header('Connection', 'close')
self.end_headers()
def do_KEEP(self):
self.send_response(204)
self.send_header('Content-Type', 'text/html')
self.send_header('Connection', 'keep-alive')
self.end_headers()
def do_KEYERROR(self):
self.send_error(999)
def do_CUSTOM(self):
self.send_response(999)
self.send_header('Content-Type', 'text/html')
self.send_header('Connection', 'close')
self.end_headers()
def do_SEND_ERROR(self):
self.send_error(int(self.path[1:]))
def do_HEAD(self):
self.send_error(int(self.path[1:]))
def setUp(self):
BaseTestCase.setUp(self)
self.con = httplib.HTTPConnection('localhost', self.PORT)
self.con.connect()
def test_command(self):
self.con.request('GET', '/')
res = self.con.getresponse()
self.assertEqual(res.status, 501)
def test_request_line_trimming(self):
self.con._http_vsn_str = 'HTTP/1.1\n'
self.con.putrequest('XYZBOGUS', '/')
self.con.endheaders()
res = self.con.getresponse()
self.assertEqual(res.status, 501)
def test_version_bogus(self):
self.con._http_vsn_str = 'FUBAR'
self.con.putrequest('GET', '/')
self.con.endheaders()
res = self.con.getresponse()
self.assertEqual(res.status, 400)
def test_version_digits(self):
self.con._http_vsn_str = 'HTTP/9.9.9'
self.con.putrequest('GET', '/')
self.con.endheaders()
res = self.con.getresponse()
self.assertEqual(res.status, 400)
def test_version_none_get(self):
self.con._http_vsn_str = ''
self.con.putrequest('GET', '/')
self.con.endheaders()
res = self.con.getresponse()
self.assertEqual(res.status, 501)
def test_version_none(self):
# Test that a valid method is rejected when not HTTP/1.x
self.con._http_vsn_str = ''
self.con.putrequest('CUSTOM', '/')
self.con.endheaders()
res = self.con.getresponse()
self.assertEqual(res.status, 400)
def test_version_invalid(self):
self.con._http_vsn = 99
self.con._http_vsn_str = 'HTTP/9.9'
self.con.putrequest('GET', '/')
self.con.endheaders()
res = self.con.getresponse()
self.assertEqual(res.status, 505)
def test_send_blank(self):
self.con._http_vsn_str = ''
self.con.putrequest('', '')
self.con.endheaders()
res = self.con.getresponse()
self.assertEqual(res.status, 400)
def test_header_close(self):
self.con.putrequest('GET', '/')
self.con.putheader('Connection', 'close')
self.con.endheaders()
res = self.con.getresponse()
self.assertEqual(res.status, 501)
def test_head_keep_alive(self):
self.con._http_vsn_str = 'HTTP/1.1'
self.con.putrequest('GET', '/')
self.con.putheader('Connection', 'keep-alive')
self.con.endheaders()
res = self.con.getresponse()
self.assertEqual(res.status, 501)
def test_handler(self):
self.con.request('TEST', '/')
res = self.con.getresponse()
self.assertEqual(res.status, 204)
def test_return_header_keep_alive(self):
self.con.request('KEEP', '/')
res = self.con.getresponse()
self.assertEqual(res.getheader('Connection'), 'keep-alive')
self.con.request('TEST', '/')
self.addCleanup(self.con.close)
def test_internal_key_error(self):
self.con.request('KEYERROR', '/')
res = self.con.getresponse()
self.assertEqual(res.status, 999)
def test_return_custom_status(self):
self.con.request('CUSTOM', '/')
res = self.con.getresponse()
self.assertEqual(res.status, 999)
def test_send_error(self):
allow_transfer_encoding_codes = (205, 304)
for code in (101, 102, 204, 205, 304):
self.con.request('SEND_ERROR', '/{}'.format(code))
res = self.con.getresponse()
self.assertEqual(code, res.status)
self.assertEqual(None, res.getheader('Content-Length'))
self.assertEqual(None, res.getheader('Content-Type'))
if code not in allow_transfer_encoding_codes:
self.assertEqual(None, res.getheader('Transfer-Encoding'))
data = res.read()
self.assertEqual(b'', data)
def test_head_via_send_error(self):
allow_transfer_encoding_codes = (205, 304)
for code in (101, 200, 204, 205, 304):
self.con.request('HEAD', '/{}'.format(code))
res = self.con.getresponse()
self.assertEqual(code, res.status)
if code == 200:
self.assertEqual(None, res.getheader('Content-Length'))
self.assertIn('text/html', res.getheader('Content-Type'))
else:
self.assertEqual(None, res.getheader('Content-Length'))
self.assertEqual(None, res.getheader('Content-Type'))
if code not in allow_transfer_encoding_codes:
self.assertEqual(None, res.getheader('Transfer-Encoding'))
data = res.read()
self.assertEqual(b'', data)
class SimpleHTTPServerTestCase(BaseTestCase):
class request_handler(NoLogRequestHandler, SimpleHTTPRequestHandler):
pass
def setUp(self):
BaseTestCase.setUp(self)
self.cwd = os.getcwd()
basetempdir = tempfile.gettempdir()
os.chdir(basetempdir)
self.data = 'We are the knights who say Ni!'
self.tempdir = tempfile.mkdtemp(dir=basetempdir)
self.tempdir_name = os.path.basename(self.tempdir)
self.base_url = '/' + self.tempdir_name
temp = open(os.path.join(self.tempdir, 'test'), 'wb')
temp.write(self.data)
temp.close()
def tearDown(self):
try:
os.chdir(self.cwd)
try:
shutil.rmtree(self.tempdir)
except OSError:
pass
finally:
BaseTestCase.tearDown(self)
def check_status_and_reason(self, response, status, data=None):
body = response.read()
self.assertTrue(response)
self.assertEqual(response.status, status)
self.assertIsNotNone(response.reason)
if data:
self.assertEqual(data, body)
def test_get(self):
#constructs the path relative to the root directory of the HTTPServer
response = self.request(self.base_url + '/test')
self.check_status_and_reason(response, 200, data=self.data)
# check for trailing "/" which should return 404. See Issue17324
response = self.request(self.base_url + '/test/')
self.check_status_and_reason(response, 404)
response = self.request(self.base_url + '/')
self.check_status_and_reason(response, 200)
response = self.request(self.base_url)
self.check_status_and_reason(response, 301)
response = self.request(self.base_url + '/?hi=2')
self.check_status_and_reason(response, 200)
response = self.request(self.base_url + '?hi=1')
self.check_status_and_reason(response, 301)
self.assertEqual(response.getheader("Location"),
self.base_url + "/?hi=1")
response = self.request('/ThisDoesNotExist')
self.check_status_and_reason(response, 404)
response = self.request('/' + 'ThisDoesNotExist' + '/')
self.check_status_and_reason(response, 404)
with open(os.path.join(self.tempdir_name, 'index.html'), 'w') as fp:
response = self.request(self.base_url + '/')
self.check_status_and_reason(response, 200)
# chmod() doesn't work as expected on Windows, and filesystem
# permissions are ignored by root on Unix.
if os.name == 'posix' and os.geteuid() != 0:
os.chmod(self.tempdir, 0)
response = self.request(self.base_url + '/')
self.check_status_and_reason(response, 404)
os.chmod(self.tempdir, 0755)
def test_head(self):
response = self.request(
self.base_url + '/test', method='HEAD')
self.check_status_and_reason(response, 200)
self.assertEqual(response.getheader('content-length'),
str(len(self.data)))
self.assertEqual(response.getheader('content-type'),
'application/octet-stream')
def test_invalid_requests(self):
response = self.request('/', method='FOO')
self.check_status_and_reason(response, 501)
# requests must be case sensitive,so this should fail too
response = self.request('/', method='custom')
self.check_status_and_reason(response, 501)
response = self.request('/', method='GETs')
self.check_status_and_reason(response, 501)
def test_path_without_leading_slash(self):
response = self.request(self.tempdir_name + '/test')
self.check_status_and_reason(response, 200, data=self.data)
response = self.request(self.tempdir_name + '/test/')
self.check_status_and_reason(response, 404)
response = self.request(self.tempdir_name + '/')
self.check_status_and_reason(response, 200)
response = self.request(self.tempdir_name)
self.check_status_and_reason(response, 301)
response = self.request(self.tempdir_name + '/?hi=2')
self.check_status_and_reason(response, 200)
response = self.request(self.tempdir_name + '?hi=1')
self.check_status_and_reason(response, 301)
self.assertEqual(response.getheader("Location"),
self.tempdir_name + "/?hi=1")
cgi_file1 = """\
#!%s
print "Content-type: text/html"
print
print "Hello World"
"""
cgi_file2 = """\
#!%s
import cgi
print "Content-type: text/html"
print
form = cgi.FieldStorage()
print "%%s, %%s, %%s" %% (form.getfirst("spam"), form.getfirst("eggs"),
form.getfirst("bacon"))
"""
cgi_file4 = """\
#!%s
import os
print("Content-type: text/html")
print("")
print(os.environ["%s"])
"""
@unittest.skipIf(hasattr(os, 'geteuid') and os.geteuid() == 0,
"This test can't be run reliably as root (issue #13308).")
class CGIHTTPServerTestCase(BaseTestCase):
class request_handler(NoLogRequestHandler, CGIHTTPRequestHandler):
pass
def setUp(self):
BaseTestCase.setUp(self)
self.parent_dir = tempfile.mkdtemp()
self.cgi_dir = os.path.join(self.parent_dir, 'cgi-bin')
self.cgi_child_dir = os.path.join(self.cgi_dir, 'child-dir')
os.mkdir(self.cgi_dir)
os.mkdir(self.cgi_child_dir)
# The shebang line should be pure ASCII: use symlink if possible.
# See issue #7668.
if hasattr(os, 'symlink'):
self.pythonexe = os.path.join(self.parent_dir, 'python')
os.symlink(sys.executable, self.pythonexe)
else:
self.pythonexe = sys.executable
self.nocgi_path = os.path.join(self.parent_dir, 'nocgi.py')
with open(self.nocgi_path, 'w') as fp:
fp.write(cgi_file1 % self.pythonexe)
os.chmod(self.nocgi_path, 0777)
self.file1_path = os.path.join(self.cgi_dir, 'file1.py')
with open(self.file1_path, 'w') as file1:
file1.write(cgi_file1 % self.pythonexe)
os.chmod(self.file1_path, 0777)
self.file2_path = os.path.join(self.cgi_dir, 'file2.py')
with open(self.file2_path, 'w') as file2:
file2.write(cgi_file2 % self.pythonexe)
os.chmod(self.file2_path, 0777)
self.file3_path = os.path.join(self.cgi_child_dir, 'file3.py')
with open(self.file3_path, 'w') as file3:
file3.write(cgi_file1 % self.pythonexe)
os.chmod(self.file3_path, 0777)
self.file4_path = os.path.join(self.cgi_dir, 'file4.py')
with open(self.file4_path, 'w') as file4:
file4.write(cgi_file4 % (self.pythonexe, 'QUERY_STRING'))
os.chmod(self.file4_path, 0o777)
self.cwd = os.getcwd()
os.chdir(self.parent_dir)
def tearDown(self):
try:
os.chdir(self.cwd)
if self.pythonexe != sys.executable:
os.remove(self.pythonexe)
os.remove(self.nocgi_path)
os.remove(self.file1_path)
os.remove(self.file2_path)
os.remove(self.file3_path)
os.remove(self.file4_path)
os.rmdir(self.cgi_child_dir)
os.rmdir(self.cgi_dir)
os.rmdir(self.parent_dir)
finally:
BaseTestCase.tearDown(self)
def test_url_collapse_path(self):
# verify tail is the last portion and head is the rest on proper urls
test_vectors = {
'': '//',
'..': IndexError,
'/.//..': IndexError,
'/': '//',
'//': '//',
'/\\': '//\\',
'/.//': '//',
'cgi-bin/file1.py': '/cgi-bin/file1.py',
'/cgi-bin/file1.py': '/cgi-bin/file1.py',
'a': '//a',
'/a': '//a',
'//a': '//a',
'./a': '//a',
'./C:/': '/C:/',
'/a/b': '/a/b',
'/a/b/': '/a/b/',
'/a/b/.': '/a/b/',
'/a/b/c/..': '/a/b/',
'/a/b/c/../d': '/a/b/d',
'/a/b/c/../d/e/../f': '/a/b/d/f',
'/a/b/c/../d/e/../../f': '/a/b/f',
'/a/b/c/../d/e/.././././..//f': '/a/b/f',
'../a/b/c/../d/e/.././././..//f': IndexError,
'/a/b/c/../d/e/../../../f': '/a/f',
'/a/b/c/../d/e/../../../../f': '//f',
'/a/b/c/../d/e/../../../../../f': IndexError,
'/a/b/c/../d/e/../../../../f/..': '//',
'/a/b/c/../d/e/../../../../f/../.': '//',
}
for path, expected in test_vectors.iteritems():
if isinstance(expected, type) and issubclass(expected, Exception):
self.assertRaises(expected,
CGIHTTPServer._url_collapse_path, path)
else:
actual = CGIHTTPServer._url_collapse_path(path)
self.assertEqual(expected, actual,
msg='path = %r\nGot: %r\nWanted: %r' %
(path, actual, expected))
def test_headers_and_content(self):
res = self.request('/cgi-bin/file1.py')
self.assertEqual(('Hello World\n', 'text/html', 200),
(res.read(), res.getheader('Content-type'), res.status))
def test_issue19435(self):
res = self.request('///////////nocgi.py/../cgi-bin/nothere.sh')
self.assertEqual(res.status, 404)
def test_post(self):
params = urllib.urlencode({'spam' : 1, 'eggs' : 'python', 'bacon' : 123456})
headers = {'Content-type' : 'application/x-www-form-urlencoded'}
res = self.request('/cgi-bin/file2.py', 'POST', params, headers)
self.assertEqual(res.read(), '1, python, 123456\n')
def test_invaliduri(self):
res = self.request('/cgi-bin/invalid')
res.read()
self.assertEqual(res.status, 404)
def test_authorization(self):
headers = {'Authorization' : 'Basic %s' %
base64.b64encode('username:pass')}
res = self.request('/cgi-bin/file1.py', 'GET', headers=headers)
self.assertEqual(('Hello World\n', 'text/html', 200),
(res.read(), res.getheader('Content-type'), res.status))
def test_no_leading_slash(self):
# http://bugs.python.org/issue2254
res = self.request('cgi-bin/file1.py')
self.assertEqual(('Hello World\n', 'text/html', 200),
(res.read(), res.getheader('Content-type'), res.status))
def test_os_environ_is_not_altered(self):
signature = "Test CGI Server"
os.environ['SERVER_SOFTWARE'] = signature
res = self.request('/cgi-bin/file1.py')
self.assertEqual((b'Hello World\n', 'text/html', 200),
(res.read(), res.getheader('Content-type'), res.status))
self.assertEqual(os.environ['SERVER_SOFTWARE'], signature)
def test_urlquote_decoding_in_cgi_check(self):
res = self.request('/cgi-bin%2ffile1.py')
self.assertEqual((b'Hello World\n', 'text/html', 200),
(res.read(), res.getheader('Content-type'), res.status))
def test_nested_cgi_path_issue21323(self):
res = self.request('/cgi-bin/child-dir/file3.py')
self.assertEqual((b'Hello World\n', 'text/html', 200),
(res.read(), res.getheader('Content-type'), res.status))
def test_query_with_multiple_question_mark(self):
res = self.request('/cgi-bin/file4.py?a=b?c=d')
self.assertEqual(
(b'a=b?c=d\n', 'text/html', 200),
(res.read(), res.getheader('Content-type'), res.status))
def test_query_with_continuous_slashes(self):
res = self.request('/cgi-bin/file4.py?k=aa%2F%2Fbb&//q//p//=//a//b//')
self.assertEqual(
(b'k=aa%2F%2Fbb&//q//p//=//a//b//\n',
'text/html', 200),
(res.read(), res.getheader('Content-type'), res.status))
class SimpleHTTPRequestHandlerTestCase(unittest.TestCase):
""" Test url parsing """
def setUp(self):
self.translated = os.getcwd()
self.translated = os.path.join(self.translated, 'filename')
self.handler = SocketlessRequestHandler()
def test_query_arguments(self):
path = self.handler.translate_path('/filename')
self.assertEqual(path, self.translated)
path = self.handler.translate_path('/filename?foo=bar')
self.assertEqual(path, self.translated)
path = self.handler.translate_path('/filename?a=b&spam=eggs#zot')
self.assertEqual(path, self.translated)
def test_start_with_double_slash(self):
path = self.handler.translate_path('//filename')
self.assertEqual(path, self.translated)
path = self.handler.translate_path('//filename?foo=bar')
self.assertEqual(path, self.translated)
def test_windows_colon(self):
import SimpleHTTPServer
with test_support.swap_attr(SimpleHTTPServer.os, 'path', ntpath):
path = self.handler.translate_path('c:c:c:foo/filename')
path = path.replace(ntpath.sep, os.sep)
self.assertEqual(path, self.translated)
path = self.handler.translate_path('\\c:../filename')
path = path.replace(ntpath.sep, os.sep)
self.assertEqual(path, self.translated)
path = self.handler.translate_path('c:\\c:..\\foo/filename')
path = path.replace(ntpath.sep, os.sep)
self.assertEqual(path, self.translated)
path = self.handler.translate_path('c:c:foo\\c:c:bar/filename')
path = path.replace(ntpath.sep, os.sep)
self.assertEqual(path, self.translated)
def test_main(verbose=None):
try:
cwd = os.getcwd()
test_support.run_unittest(BaseHTTPRequestHandlerTestCase,
SimpleHTTPRequestHandlerTestCase,
BaseHTTPServerTestCase,
SimpleHTTPServerTestCase,
CGIHTTPServerTestCase
)
finally:
os.chdir(cwd)
if __name__ == '__main__':
test_main()
|
colinnewell/odoo | refs/heads/8.0 | addons/l10n_br/account.py | 340 | # -*- encoding: utf-8 -*-
#################################################################################
# #
# Copyright (C) 2009 Renato Lima - Akretion #
# #
#This program is free software: you can redistribute it and/or modify #
#it under the terms of the GNU Affero General Public License as published by #
#the Free Software Foundation, either version 3 of the License, or #
#(at your option) any later version. #
# #
#This program is distributed in the hope that it will be useful, #
#but WITHOUT ANY WARRANTY; without even the implied warranty of #
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
#GNU General Public License for more details. #
# #
#You should have received a copy of the GNU General Public License #
#along with this program. If not, see <http://www.gnu.org/licenses/>. #
#################################################################################
import openerp
from openerp.osv import fields, osv
TAX_CODE_COLUMNS = {
'domain':fields.char('Domain',
help="This field is only used if you develop your own module allowing developers to create specific taxes in a custom domain."),
'tax_discount': fields.boolean('Discount this Tax in Prince',
help="Mark it for (ICMS, PIS, COFINS and others taxes included)."),
}
TAX_DEFAULTS = {
'base_reduction': 0,
'amount_mva': 0,
}
class account_tax_code_template(osv.osv):
""" Add fields used to define some brazilian taxes """
_inherit = 'account.tax.code.template'
_columns = TAX_CODE_COLUMNS
def generate_tax_code(self, cr, uid, tax_code_root_id, company_id,
context=None):
"""This function generates the tax codes from the templates of tax
code that are children of the given one passed in argument. Then it
returns a dictionary with the mappping between the templates and the
real objects.
:param tax_code_root_id: id of the root of all the tax code templates
to process.
:param company_id: id of the company the wizard is running for
:returns: dictionary with the mappping between the templates and the
real objects.
:rtype: dict
"""
obj_tax_code_template = self.pool.get('account.tax.code.template')
obj_tax_code = self.pool.get('account.tax.code')
tax_code_template_ref = {}
company = self.pool.get('res.company').browse(cr, uid, company_id, context=context)
#find all the children of the tax_code_root_id
children_tax_code_template = tax_code_root_id and obj_tax_code_template.search(cr, uid, [('parent_id','child_of',[tax_code_root_id])], order='id') or []
for tax_code_template in obj_tax_code_template.browse(cr, uid, children_tax_code_template, context=context):
parent_id = tax_code_template.parent_id and ((tax_code_template.parent_id.id in tax_code_template_ref) and tax_code_template_ref[tax_code_template.parent_id.id]) or False
vals = {
'name': (tax_code_root_id == tax_code_template.id) and company.name or tax_code_template.name,
'code': tax_code_template.code,
'info': tax_code_template.info,
'parent_id': parent_id,
'company_id': company_id,
'sign': tax_code_template.sign,
'domain': tax_code_template.domain,
'tax_discount': tax_code_template.tax_discount,
}
#check if this tax code already exists
rec_list = obj_tax_code.search(cr, uid, [('name', '=', vals['name']),
('parent_id','=',parent_id),
('code', '=', vals['code']),
('company_id', '=', vals['company_id'])], context=context)
if not rec_list:
#if not yet, create it
new_tax_code = obj_tax_code.create(cr, uid, vals)
#recording the new tax code to do the mapping
tax_code_template_ref[tax_code_template.id] = new_tax_code
return tax_code_template_ref
class account_tax_code(osv.osv):
""" Add fields used to define some brazilian taxes """
_inherit = 'account.tax.code'
_columns = TAX_CODE_COLUMNS
def get_precision_tax():
def change_digit_tax(cr):
decimal_precision = openerp.registry(cr.dbname)['decimal.precision']
res = decimal_precision.precision_get(cr, 1, 'Account')
return (16, res+2)
return change_digit_tax
class account_tax_template(osv.osv):
""" Add fields used to define some brazilian taxes """
_inherit = 'account.tax.template'
_columns = {
'tax_discount': fields.boolean('Discount this Tax in Prince',
help="Mark it for (ICMS, PIS e etc.)."),
'base_reduction': fields.float('Redution', required=True,
digits_compute=get_precision_tax(),
help="Um percentual decimal em % entre 0-1."),
'amount_mva': fields.float('MVA Percent', required=True,
digits_compute=get_precision_tax(),
help="Um percentual decimal em % entre 0-1."),
'type': fields.selection([('percent','Percentage'),
('fixed','Fixed Amount'),
('none','None'),
('code','Python Code'),
('balance','Balance'),
('quantity','Quantity')], 'Tax Type', required=True,
help="The computation method for the tax amount."),
}
_defaults = TAX_DEFAULTS
def _generate_tax(self, cr, uid, tax_templates, tax_code_template_ref, company_id, context=None):
"""
This method generate taxes from templates.
:param tax_templates: list of browse record of the tax templates to process
:param tax_code_template_ref: Taxcode templates reference.
:param company_id: id of the company the wizard is running for
:returns:
{
'tax_template_to_tax': mapping between tax template and the newly generated taxes corresponding,
'account_dict': dictionary containing a to-do list with all the accounts to assign on new taxes
}
"""
result = super(account_tax_template, self)._generate_tax(cr, uid,
tax_templates,
tax_code_template_ref,
company_id,
context)
tax_templates = self.browse(cr, uid, result['tax_template_to_tax'].keys(), context)
obj_acc_tax = self.pool.get('account.tax')
for tax_template in tax_templates:
if tax_template.tax_code_id:
obj_acc_tax.write(cr, uid, result['tax_template_to_tax'][tax_template.id], {'domain': tax_template.tax_code_id.domain,
'tax_discount': tax_template.tax_code_id.tax_discount})
return result
def onchange_tax_code_id(self, cr, uid, ids, tax_code_id, context=None):
result = {'value': {}}
if not tax_code_id:
return result
obj_tax_code = self.pool.get('account.tax.code.template').browse(cr, uid, tax_code_id)
if obj_tax_code:
result['value']['tax_discount'] = obj_tax_code.tax_discount
result['value']['domain'] = obj_tax_code.domain
return result
class account_tax(osv.osv):
""" Add fields used to define some brazilian taxes """
_inherit = 'account.tax'
_columns = {
'tax_discount': fields.boolean('Discount this Tax in Prince',
help="Mark it for (ICMS, PIS e etc.)."),
'base_reduction': fields.float('Redution', required=True,
digits_compute=get_precision_tax(),
help="Um percentual decimal em % entre 0-1."),
'amount_mva': fields.float('MVA Percent', required=True,
digits_compute=get_precision_tax(),
help="Um percentual decimal em % entre 0-1."),
'type': fields.selection([('percent','Percentage'),
('fixed','Fixed Amount'),
('none','None'),
('code','Python Code'),
('balance','Balance'),
('quantity','Quantity')], 'Tax Type', required=True,
help="The computation method for the tax amount."),
}
_defaults = TAX_DEFAULTS
def onchange_tax_code_id(self, cr, uid, ids, tax_code_id, context=None):
result = {'value': {}}
if not tax_code_id:
return result
obj_tax_code = self.pool.get('account.tax.code').browse(cr, uid, tax_code_id)
if obj_tax_code:
result['value']['tax_discount'] = obj_tax_code.tax_discount
result['value']['domain'] = obj_tax_code.domain
return result
|
unnikrishnankgs/va | refs/heads/master | venv/lib/python3.5/site-packages/tensorflow/core/framework/op_def_pb2.py | 9 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tensorflow/core/framework/op_def.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from tensorflow.core.framework import attr_value_pb2 as tensorflow_dot_core_dot_framework_dot_attr__value__pb2
from tensorflow.core.framework import types_pb2 as tensorflow_dot_core_dot_framework_dot_types__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow/core/framework/op_def.proto',
package='tensorflow',
syntax='proto3',
serialized_pb=_b('\n&tensorflow/core/framework/op_def.proto\x12\ntensorflow\x1a*tensorflow/core/framework/attr_value.proto\x1a%tensorflow/core/framework/types.proto\"\xb8\x05\n\x05OpDef\x12\x0c\n\x04name\x18\x01 \x01(\t\x12+\n\tinput_arg\x18\x02 \x03(\x0b\x32\x18.tensorflow.OpDef.ArgDef\x12,\n\noutput_arg\x18\x03 \x03(\x0b\x32\x18.tensorflow.OpDef.ArgDef\x12\'\n\x04\x61ttr\x18\x04 \x03(\x0b\x32\x19.tensorflow.OpDef.AttrDef\x12.\n\x0b\x64\x65precation\x18\x08 \x01(\x0b\x32\x19.tensorflow.OpDeprecation\x12\x0f\n\x07summary\x18\x05 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x06 \x01(\t\x12\x16\n\x0eis_commutative\x18\x12 \x01(\x08\x12\x14\n\x0cis_aggregate\x18\x10 \x01(\x08\x12\x13\n\x0bis_stateful\x18\x11 \x01(\x08\x12\"\n\x1a\x61llows_uninitialized_input\x18\x13 \x01(\x08\x1a\x9f\x01\n\x06\x41rgDef\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\"\n\x04type\x18\x03 \x01(\x0e\x32\x14.tensorflow.DataType\x12\x11\n\ttype_attr\x18\x04 \x01(\t\x12\x13\n\x0bnumber_attr\x18\x05 \x01(\t\x12\x16\n\x0etype_list_attr\x18\x06 \x01(\t\x12\x0e\n\x06is_ref\x18\x10 \x01(\x08\x1a\xbd\x01\n\x07\x41ttrDef\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04type\x18\x02 \x01(\t\x12,\n\rdefault_value\x18\x03 \x01(\x0b\x32\x15.tensorflow.AttrValue\x12\x13\n\x0b\x64\x65scription\x18\x04 \x01(\t\x12\x13\n\x0bhas_minimum\x18\x05 \x01(\x08\x12\x0f\n\x07minimum\x18\x06 \x01(\x03\x12-\n\x0e\x61llowed_values\x18\x07 \x01(\x0b\x32\x15.tensorflow.AttrValue\"5\n\rOpDeprecation\x12\x0f\n\x07version\x18\x01 \x01(\x05\x12\x13\n\x0b\x65xplanation\x18\x02 \x01(\t\"\'\n\x06OpList\x12\x1d\n\x02op\x18\x01 \x03(\x0b\x32\x11.tensorflow.OpDefB,\n\x18org.tensorflow.frameworkB\x0bOpDefProtosP\x01\xf8\x01\x01\x62\x06proto3')
,
dependencies=[tensorflow_dot_core_dot_framework_dot_attr__value__pb2.DESCRIPTOR,tensorflow_dot_core_dot_framework_dot_types__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_OPDEF_ARGDEF = _descriptor.Descriptor(
name='ArgDef',
full_name='tensorflow.OpDef.ArgDef',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='tensorflow.OpDef.ArgDef.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='description', full_name='tensorflow.OpDef.ArgDef.description', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='type', full_name='tensorflow.OpDef.ArgDef.type', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='type_attr', full_name='tensorflow.OpDef.ArgDef.type_attr', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='number_attr', full_name='tensorflow.OpDef.ArgDef.number_attr', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='type_list_attr', full_name='tensorflow.OpDef.ArgDef.type_list_attr', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_ref', full_name='tensorflow.OpDef.ArgDef.is_ref', index=6,
number=16, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=483,
serialized_end=642,
)
_OPDEF_ATTRDEF = _descriptor.Descriptor(
name='AttrDef',
full_name='tensorflow.OpDef.AttrDef',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='tensorflow.OpDef.AttrDef.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='type', full_name='tensorflow.OpDef.AttrDef.type', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='default_value', full_name='tensorflow.OpDef.AttrDef.default_value', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='description', full_name='tensorflow.OpDef.AttrDef.description', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='has_minimum', full_name='tensorflow.OpDef.AttrDef.has_minimum', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='minimum', full_name='tensorflow.OpDef.AttrDef.minimum', index=5,
number=6, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='allowed_values', full_name='tensorflow.OpDef.AttrDef.allowed_values', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=645,
serialized_end=834,
)
_OPDEF = _descriptor.Descriptor(
name='OpDef',
full_name='tensorflow.OpDef',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='tensorflow.OpDef.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='input_arg', full_name='tensorflow.OpDef.input_arg', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='output_arg', full_name='tensorflow.OpDef.output_arg', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='attr', full_name='tensorflow.OpDef.attr', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='deprecation', full_name='tensorflow.OpDef.deprecation', index=4,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='summary', full_name='tensorflow.OpDef.summary', index=5,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='description', full_name='tensorflow.OpDef.description', index=6,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_commutative', full_name='tensorflow.OpDef.is_commutative', index=7,
number=18, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_aggregate', full_name='tensorflow.OpDef.is_aggregate', index=8,
number=16, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_stateful', full_name='tensorflow.OpDef.is_stateful', index=9,
number=17, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='allows_uninitialized_input', full_name='tensorflow.OpDef.allows_uninitialized_input', index=10,
number=19, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_OPDEF_ARGDEF, _OPDEF_ATTRDEF, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=138,
serialized_end=834,
)
_OPDEPRECATION = _descriptor.Descriptor(
name='OpDeprecation',
full_name='tensorflow.OpDeprecation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='version', full_name='tensorflow.OpDeprecation.version', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='explanation', full_name='tensorflow.OpDeprecation.explanation', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=836,
serialized_end=889,
)
_OPLIST = _descriptor.Descriptor(
name='OpList',
full_name='tensorflow.OpList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='op', full_name='tensorflow.OpList.op', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=891,
serialized_end=930,
)
_OPDEF_ARGDEF.fields_by_name['type'].enum_type = tensorflow_dot_core_dot_framework_dot_types__pb2._DATATYPE
_OPDEF_ARGDEF.containing_type = _OPDEF
_OPDEF_ATTRDEF.fields_by_name['default_value'].message_type = tensorflow_dot_core_dot_framework_dot_attr__value__pb2._ATTRVALUE
_OPDEF_ATTRDEF.fields_by_name['allowed_values'].message_type = tensorflow_dot_core_dot_framework_dot_attr__value__pb2._ATTRVALUE
_OPDEF_ATTRDEF.containing_type = _OPDEF
_OPDEF.fields_by_name['input_arg'].message_type = _OPDEF_ARGDEF
_OPDEF.fields_by_name['output_arg'].message_type = _OPDEF_ARGDEF
_OPDEF.fields_by_name['attr'].message_type = _OPDEF_ATTRDEF
_OPDEF.fields_by_name['deprecation'].message_type = _OPDEPRECATION
_OPLIST.fields_by_name['op'].message_type = _OPDEF
DESCRIPTOR.message_types_by_name['OpDef'] = _OPDEF
DESCRIPTOR.message_types_by_name['OpDeprecation'] = _OPDEPRECATION
DESCRIPTOR.message_types_by_name['OpList'] = _OPLIST
OpDef = _reflection.GeneratedProtocolMessageType('OpDef', (_message.Message,), dict(
ArgDef = _reflection.GeneratedProtocolMessageType('ArgDef', (_message.Message,), dict(
DESCRIPTOR = _OPDEF_ARGDEF,
__module__ = 'tensorflow.core.framework.op_def_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.OpDef.ArgDef)
))
,
AttrDef = _reflection.GeneratedProtocolMessageType('AttrDef', (_message.Message,), dict(
DESCRIPTOR = _OPDEF_ATTRDEF,
__module__ = 'tensorflow.core.framework.op_def_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.OpDef.AttrDef)
))
,
DESCRIPTOR = _OPDEF,
__module__ = 'tensorflow.core.framework.op_def_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.OpDef)
))
_sym_db.RegisterMessage(OpDef)
_sym_db.RegisterMessage(OpDef.ArgDef)
_sym_db.RegisterMessage(OpDef.AttrDef)
OpDeprecation = _reflection.GeneratedProtocolMessageType('OpDeprecation', (_message.Message,), dict(
DESCRIPTOR = _OPDEPRECATION,
__module__ = 'tensorflow.core.framework.op_def_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.OpDeprecation)
))
_sym_db.RegisterMessage(OpDeprecation)
OpList = _reflection.GeneratedProtocolMessageType('OpList', (_message.Message,), dict(
DESCRIPTOR = _OPLIST,
__module__ = 'tensorflow.core.framework.op_def_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.OpList)
))
_sym_db.RegisterMessage(OpList)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\030org.tensorflow.frameworkB\013OpDefProtosP\001\370\001\001'))
# @@protoc_insertion_point(module_scope)
|
ar7z1/ansible | refs/heads/devel | test/units/modules/network/aruba/aruba_module.py | 73 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
from units.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except:
pass
fixture_data[path] = data
return data
class TestArubaModule(ModuleTestCase):
def execute_module(self, failed=False, changed=False, commands=None, sort=True, defaults=False):
self.load_fixtures(commands)
if failed:
result = self.failed()
self.assertTrue(result['failed'], result)
else:
result = self.changed(changed)
self.assertEqual(result['changed'], changed, result)
if commands is not None:
if sort:
self.assertEqual(sorted(commands), sorted(result['commands']), result['commands'])
else:
self.assertEqual(commands, result['commands'], result['commands'])
return result
def failed(self):
with self.assertRaises(AnsibleFailJson) as exc:
self.module.main()
result = exc.exception.args[0]
self.assertTrue(result['failed'], result)
return result
def changed(self, changed=False):
with self.assertRaises(AnsibleExitJson) as exc:
self.module.main()
result = exc.exception.args[0]
self.assertEqual(result['changed'], changed, result)
return result
def load_fixtures(self, commands=None):
pass
|
TheNeuralBit/gottengeography | refs/heads/master | tests/test_territories.py | 1 | """Test the classes and functions defined by gg/territories.py"""
from tests import BaseTestCase
class TerritoriesTestCase(BaseTestCase):
filename = 'territories'
def setUp(self):
super().setUp()
def test_countries(self):
"""Ensure we can read country codes."""
self.assertEqual(self.mod.countries['CA'], 'Canada')
self.assertEqual(self.mod.get_country('MT'), 'Malta')
self.assertIsNone(self.mod.get_country('Narnia'))
def test_territories(self):
"""Ensure we can read state/province codes."""
self.assertEqual(self.mod.territories['CA.02'], 'British Columbia')
self.assertEqual(self.mod.get_state('CA', '01'), 'Alberta')
self.assertEqual(self.mod.get_state('US', 'WI'), 'Wisconsin')
self.assertIsNone(self.mod.get_state('US', 'Fakerson'))
def test_zones(self):
"""Ensure we can read timezones."""
self.assertIn('Atlantic', self.mod.zones)
self.assertIn('Pacific', self.mod.zones)
self.assertIn('America', self.mod.tz_regions)
|
jmcarbo/openerp7 | refs/heads/master | openerp/addons/base/ir/ir_config_parameter.py | 72 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""
Store database-specific configuration parameters
"""
import uuid
import datetime
from openerp import SUPERUSER_ID
from openerp.osv import osv, fields
from openerp.tools import misc, config
"""
A dictionary holding some configuration parameters to be initialized when the database is created.
"""
_default_parameters = {
"database.uuid": lambda: str(uuid.uuid1()),
"database.create_date": lambda: datetime.datetime.now().strftime(misc.DEFAULT_SERVER_DATETIME_FORMAT),
"web.base.url": lambda: "http://localhost:%s" % config.get('xmlrpc_port'),
}
class ir_config_parameter(osv.osv):
"""Per-database storage of configuration key-value pairs."""
_name = 'ir.config_parameter'
_columns = {
'key': fields.char('Key', size=256, required=True, select=1),
'value': fields.text('Value', required=True),
}
_sql_constraints = [
('key_uniq', 'unique (key)', 'Key must be unique.')
]
def init(self, cr, force=False):
"""
Initializes the parameters listed in _default_parameters.
It overrides existing parameters if force is ``True``.
"""
for key, func in _default_parameters.iteritems():
# force=True skips search and always performs the 'if' body (because ids=False)
ids = not force and self.search(cr, SUPERUSER_ID, [('key','=',key)])
if not ids:
self.set_param(cr, SUPERUSER_ID, key, func())
def get_param(self, cr, uid, key, default=False, context=None):
"""Retrieve the value for a given key.
:param string key: The key of the parameter value to retrieve.
:param string default: default value if parameter is missing.
:return: The value of the parameter, or ``default`` if it does not exist.
:rtype: string
"""
ids = self.search(cr, uid, [('key','=',key)], context=context)
if not ids:
return default
param = self.browse(cr, uid, ids[0], context=context)
value = param.value
return value
def set_param(self, cr, uid, key, value, context=None):
"""Sets the value of a parameter.
:param string key: The key of the parameter value to set.
:param string value: The value to set.
:return: the previous value of the parameter or False if it did
not exist.
:rtype: string
"""
ids = self.search(cr, uid, [('key','=',key)], context=context)
if ids:
param = self.browse(cr, uid, ids[0], context=context)
old = param.value
self.write(cr, uid, ids, {'value': value}, context=context)
return old
else:
self.create(cr, uid, {'key': key, 'value': value}, context=context)
return False
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
anryko/ansible | refs/heads/devel | lib/ansible/modules/cloud/memset/memset_zone.py | 44 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018, Simon Weald <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: memset_zone
author: "Simon Weald (@glitchcrab)"
version_added: "2.6"
short_description: Creates and deletes Memset DNS zones.
notes:
- Zones can be thought of as a logical group of domains, all of which share the
same DNS records (i.e. they point to the same IP). An API key generated via the
Memset customer control panel is needed with the following minimum scope -
I(dns.zone_create), I(dns.zone_delete), I(dns.zone_list).
description:
- Manage DNS zones in a Memset account.
options:
state:
required: true
description:
- Indicates desired state of resource.
choices: [ absent, present ]
api_key:
required: true
description:
- The API key obtained from the Memset control panel.
name:
required: true
description:
- The zone nickname; usually the same as the main domain. Ensure this
value has at most 250 characters.
aliases: [ nickname ]
ttl:
description:
- The default TTL for all records created in the zone. This must be a
valid int from U(https://www.memset.com/apidocs/methods_dns.html#dns.zone_create).
choices: [ 0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400 ]
force:
required: false
default: false
type: bool
description:
- Forces deletion of a zone and all zone domains/zone records it contains.
'''
EXAMPLES = '''
# Create the zone 'test'
- name: create zone
memset_zone:
name: test
state: present
api_key: 5eb86c9196ab03919abcf03857163741
ttl: 300
delegate_to: localhost
# Force zone deletion
- name: force delete zone
memset_zone:
name: test
state: absent
api_key: 5eb86c9196ab03919abcf03857163741
force: true
delegate_to: localhost
'''
RETURN = '''
memset_api:
description: Zone info from the Memset API
returned: when state == present
type: complex
contains:
domains:
description: List of domains in this zone
returned: always
type: list
sample: []
id:
description: Zone id
returned: always
type: str
sample: "b0bb1ce851aeea6feeb2dc32fe83bf9c"
nickname:
description: Zone name
returned: always
type: str
sample: "example.com"
records:
description: List of DNS records for domains in this zone
returned: always
type: list
sample: []
ttl:
description: Default TTL for domains in this zone
returned: always
type: int
sample: 300
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.memset import check_zone
from ansible.module_utils.memset import get_zone_id
from ansible.module_utils.memset import memset_api_call
def api_validation(args=None):
'''
Perform some validation which will be enforced by Memset's API (see:
https://www.memset.com/apidocs/methods_dns.html#dns.zone_record_create)
'''
# zone domain length must be less than 250 chars.
if len(args['name']) > 250:
stderr = 'Zone name must be less than 250 characters in length.'
module.fail_json(failed=True, msg=stderr, stderr=stderr)
def check(args=None):
'''
Support for running with check mode.
'''
retvals = dict()
api_method = 'dns.zone_list'
has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
zone_exists, counter = check_zone(data=response, name=args['name'])
# set changed to true if the operation would cause a change.
has_changed = ((zone_exists and args['state'] == 'absent') or (not zone_exists and args['state'] == 'present'))
retvals['changed'] = has_changed
retvals['failed'] = has_failed
return(retvals)
def create_zone(args=None, zone_exists=None, payload=None):
'''
At this point we already know whether the zone exists, so we
just need to make the API reflect the desired state.
'''
has_changed, has_failed = False, False
msg, memset_api = None, None
if not zone_exists:
payload['ttl'] = args['ttl']
payload['nickname'] = args['name']
api_method = 'dns.zone_create'
has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
if not has_failed:
has_changed = True
else:
api_method = 'dns.zone_list'
_has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
for zone in response.json():
if zone['nickname'] == args['name']:
break
if zone['ttl'] != args['ttl']:
# update the zone if the desired TTL is different.
payload['id'] = zone['id']
payload['ttl'] = args['ttl']
api_method = 'dns.zone_update'
has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
if not has_failed:
has_changed = True
# populate return var with zone info.
api_method = 'dns.zone_list'
_has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
zone_exists, msg, counter, zone_id = get_zone_id(zone_name=args['name'], current_zones=response.json())
if zone_exists:
payload = dict()
payload['id'] = zone_id
api_method = 'dns.zone_info'
_has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
memset_api = response.json()
return(has_failed, has_changed, memset_api, msg)
def delete_zone(args=None, zone_exists=None, payload=None):
'''
Deletion requires extra sanity checking as the zone cannot be
deleted if it contains domains or records. Setting force=true
will override this behaviour.
'''
has_changed, has_failed = False, False
msg, memset_api = None, None
if zone_exists:
api_method = 'dns.zone_list'
_has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
counter = 0
for zone in response.json():
if zone['nickname'] == args['name']:
counter += 1
if counter == 1:
for zone in response.json():
if zone['nickname'] == args['name']:
zone_id = zone['id']
domain_count = len(zone['domains'])
record_count = len(zone['records'])
if (domain_count > 0 or record_count > 0) and args['force'] is False:
# we need to fail out if force was not explicitly set.
stderr = 'Zone contains domains or records and force was not used.'
has_failed = True
has_changed = False
module.fail_json(failed=has_failed, changed=has_changed, msg=msg, stderr=stderr, rc=1)
api_method = 'dns.zone_delete'
payload['id'] = zone_id
has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
if not has_failed:
has_changed = True
# return raw JSON from API in named var and then unset msg var so we aren't returning the same thing twice.
memset_api = msg
msg = None
else:
# zone names are not unique, so we cannot safely delete the requested
# zone at this time.
has_failed = True
has_changed = False
msg = 'Unable to delete zone as multiple zones with the same name exist.'
else:
has_failed, has_changed = False, False
return(has_failed, has_changed, memset_api, msg)
def create_or_delete(args=None):
'''
We need to perform some initial sanity checking and also look
up required info before handing it off to create or delete.
'''
retvals, payload = dict(), dict()
has_failed, has_changed = False, False
msg, memset_api, stderr = None, None, None
# get the zones and check if the relevant zone exists.
api_method = 'dns.zone_list'
_has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
if _has_failed:
# this is the first time the API is called; incorrect credentials will
# manifest themselves at this point so we need to ensure the user is
# informed of the reason.
retvals['failed'] = _has_failed
retvals['msg'] = _msg
return(retvals)
zone_exists, _msg, counter, _zone_id = get_zone_id(zone_name=args['name'], current_zones=response.json())
if args['state'] == 'present':
has_failed, has_changed, memset_api, msg = create_zone(args=args, zone_exists=zone_exists, payload=payload)
elif args['state'] == 'absent':
has_failed, has_changed, memset_api, msg = delete_zone(args=args, zone_exists=zone_exists, payload=payload)
retvals['failed'] = has_failed
retvals['changed'] = has_changed
for val in ['msg', 'stderr', 'memset_api']:
if val is not None:
retvals[val] = eval(val)
return(retvals)
def main():
global module
module = AnsibleModule(
argument_spec=dict(
state=dict(required=True, choices=['present', 'absent'], type='str'),
api_key=dict(required=True, type='str', no_log=True),
name=dict(required=True, aliases=['nickname'], type='str'),
ttl=dict(required=False, default=0, choices=[0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400], type='int'),
force=dict(required=False, default=False, type='bool')
),
supports_check_mode=True
)
# populate the dict with the user-provided vars.
args = dict()
for key, arg in module.params.items():
args[key] = arg
args['check_mode'] = module.check_mode
# validate some API-specific limitations.
api_validation(args=args)
if module.check_mode:
retvals = check(args)
else:
retvals = create_or_delete(args)
if retvals['failed']:
module.fail_json(**retvals)
else:
module.exit_json(**retvals)
if __name__ == '__main__':
main()
|
sergray/energy-meter-mercury206 | refs/heads/master | mercury206/__init__.py | 1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Sergey Panfilov'
__email__ = '[email protected]'
__version__ = '0.0.1'
|
therewillbecode/ichnaea | refs/heads/master | ichnaea/config.py | 1 | """
Contains helper functionality for reading and parsing configuration files.
"""
import os
from configparser import (
ConfigParser,
NoOptionError,
NoSectionError,
)
from six import PY2, string_types
class Config(ConfigParser):
"""
A :class:`configparser.ConfigParser` subclass with added
functionality.
"""
def __init__(self, filename):
"""
:param filename: The path to a configuration file.
"""
ConfigParser.__init__(self)
# let's read the file
if isinstance(filename, string_types):
self.filename = filename
self.read(filename)
else: # pragma: no cover
self.filename = None
self.read_file(filename)
def get(self, section, option, default=None):
"""
A get method which returns the default argument when the option
cannot be found instead of raising an exception.
"""
try:
value = ConfigParser.get(self, section, option)
except (NoOptionError, NoSectionError): # pragma: no cover
value = default
return value
def get_map(self, section, default=None):
"""
Return a config section as a dictionary.
"""
try:
value = dict(self.items(section))
except (NoOptionError, NoSectionError): # pragma: no cover
value = default
return value
def optionxform(self, option):
"""
Disable automatic lowercasing of option names.
"""
return option
def asdict(self): # pragma: no cover
"""
Return the entire config as a dict of dicts.
"""
result = {}
for section in self.sections():
result[section] = self.get_map(section)
return result
class DummyConfig(object):
"""
A stub implementation of :class:`ichnaea.config.Config` used in tests.
"""
def __init__(self, settings):
"""
:param settings: A dict of dicts representing the parsed config
settings.
"""
self.settings = settings
def get(self, section, option, default=None):
section_values = self.get_map(section, {})
return section_values.get(option, default)
def get_map(self, section, default=None):
return self.settings.get(section, default)
def sections(self):
return list(self.settings.keys())
def asdict(self):
result = {}
for section in self.sections():
result[section] = self.get_map(section)
return result
def read_config(filename=None, envvar='ICHNAEA_CFG', fallback='location.ini'):
"""
Read a configuration file from three possible locations:
1. from the passed in filename,
2. from the environment variable passed as `envvar`
3. from the `fallback` file in the current working directory.
:rtype: :class:`ichnaea.config.Config`
"""
if filename is None:
filename = os.environ.get(envvar, fallback)
if PY2: # pragma: no cover
filename = filename.decode('utf-8')
return Config(filename)
|
shanemcd/ansible | refs/heads/devel | lib/ansible/modules/network/avi/avi_vrfcontext.py | 27 | #!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi ([email protected])
# Eric Anderson ([email protected])
# module_check: supported
# Avi Version: 17.1.2
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_vrfcontext
author: Gaurav Rastogi ([email protected])
short_description: Module for setup of VrfContext Avi RESTful Object
description:
- This module is used to configure VrfContext object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
bgp_profile:
description:
- Bgp local and peer info.
cloud_ref:
description:
- It is a reference to an object of type cloud.
debugvrfcontext:
description:
- Configure debug flags for vrf.
- Field introduced in 17.1.1.
description:
description:
- User defined description for the object.
gateway_mon:
description:
- Configure ping based heartbeat check for gateway in service engines of vrf.
internal_gateway_monitor:
description:
- Configure ping based heartbeat check for all default gateways in service engines of vrf.
- Field introduced in 17.1.1.
name:
description:
- Name of the object.
required: true
static_routes:
description:
- List of staticroute.
system_default:
description:
- Boolean flag to set system_default.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create VrfContext object
avi_vrfcontext:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_vrfcontext
"""
RETURN = '''
obj:
description: VrfContext (api/vrfcontext) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
bgp_profile=dict(type='dict',),
cloud_ref=dict(type='str',),
debugvrfcontext=dict(type='dict',),
description=dict(type='str',),
gateway_mon=dict(type='list',),
internal_gateway_monitor=dict(type='dict',),
name=dict(type='str', required=True),
static_routes=dict(type='list',),
system_default=dict(type='bool',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'vrfcontext',
set([]))
if __name__ == '__main__':
main()
|
2014c2g19/2014c2g19 | refs/heads/master | exts/w2/static/Brython2.0.0-20140209-164925/Lib/_functools.py | 727 | def partial(func, *args, **keywords):
def newfunc(*fargs, **fkeywords):
newkeywords = keywords.copy()
newkeywords.update(fkeywords)
return func(*(args + fargs), **newkeywords)
newfunc.func = func
newfunc.args = args
newfunc.keywords = keywords
return newfunc
def reduce(func,iterable,initializer=None):
args = iter(iterable)
if initializer is not None:
res = initializer
else:
res = next(args)
while True:
try:
res = func(res,next(args))
except StopIteration:
return res
|
balloob/home-assistant | refs/heads/dev | homeassistant/components/heos/const.py | 28 | """Const for the HEOS integration."""
ATTR_PASSWORD = "password"
ATTR_USERNAME = "username"
COMMAND_RETRY_ATTEMPTS = 2
COMMAND_RETRY_DELAY = 1
DATA_CONTROLLER_MANAGER = "controller"
DATA_SOURCE_MANAGER = "source_manager"
DATA_DISCOVERED_HOSTS = "heos_discovered_hosts"
DOMAIN = "heos"
SERVICE_SIGN_IN = "sign_in"
SERVICE_SIGN_OUT = "sign_out"
SIGNAL_HEOS_UPDATED = "heos_updated"
|
tmerrick1/spack | refs/heads/develop | var/spack/repos/builtin.mock/packages/archive-files/package.py | 4 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class ArchiveFiles(AutotoolsPackage):
"""Simple package with one optional dependency"""
homepage = "http://www.example.com"
url = "http://www.example.com/a-1.0.tar.gz"
version('1.0', '0123456789abcdef0123456789abcdef')
version('2.0', '2.0_a_hash')
@property
def archive_files(self):
return super(ArchiveFiles, self).archive_files + ['../../outside.log']
def autoreconf(self, spec, prefix):
pass
def configure(self, spec, prefix):
pass
def build(self, spec, prefix):
mkdirp(self.build_directory)
config_log = join_path(self.build_directory, 'config.log')
touch(config_log)
def install(self, spec, prefix):
touch(join_path(prefix, 'deleteme'))
|
sagemathinc/smc | refs/heads/master | src/smc_sagews/smc_sagews/tests/a.py | 5 | def f2(*args, **kwargs):
print("test f2 1")
|
dct2012/chromeos-3.14 | refs/heads/chromeos-3.14 | tools/perf/tests/attr.py | 3174 | #! /usr/bin/python
import os
import sys
import glob
import optparse
import tempfile
import logging
import shutil
import ConfigParser
class Fail(Exception):
def __init__(self, test, msg):
self.msg = msg
self.test = test
def getMsg(self):
return '\'%s\' - %s' % (self.test.path, self.msg)
class Unsup(Exception):
def __init__(self, test):
self.test = test
def getMsg(self):
return '\'%s\'' % self.test.path
class Event(dict):
terms = [
'cpu',
'flags',
'type',
'size',
'config',
'sample_period',
'sample_type',
'read_format',
'disabled',
'inherit',
'pinned',
'exclusive',
'exclude_user',
'exclude_kernel',
'exclude_hv',
'exclude_idle',
'mmap',
'comm',
'freq',
'inherit_stat',
'enable_on_exec',
'task',
'watermark',
'precise_ip',
'mmap_data',
'sample_id_all',
'exclude_host',
'exclude_guest',
'exclude_callchain_kernel',
'exclude_callchain_user',
'wakeup_events',
'bp_type',
'config1',
'config2',
'branch_sample_type',
'sample_regs_user',
'sample_stack_user',
]
def add(self, data):
for key, val in data:
log.debug(" %s = %s" % (key, val))
self[key] = val
def __init__(self, name, data, base):
log.debug(" Event %s" % name);
self.name = name;
self.group = ''
self.add(base)
self.add(data)
def compare_data(self, a, b):
# Allow multiple values in assignment separated by '|'
a_list = a.split('|')
b_list = b.split('|')
for a_item in a_list:
for b_item in b_list:
if (a_item == b_item):
return True
elif (a_item == '*') or (b_item == '*'):
return True
return False
def equal(self, other):
for t in Event.terms:
log.debug(" [%s] %s %s" % (t, self[t], other[t]));
if not self.has_key(t) or not other.has_key(t):
return False
if not self.compare_data(self[t], other[t]):
return False
return True
def diff(self, other):
for t in Event.terms:
if not self.has_key(t) or not other.has_key(t):
continue
if not self.compare_data(self[t], other[t]):
log.warning("expected %s=%s, got %s" % (t, self[t], other[t]))
# Test file description needs to have following sections:
# [config]
# - just single instance in file
# - needs to specify:
# 'command' - perf command name
# 'args' - special command arguments
# 'ret' - expected command return value (0 by default)
#
# [eventX:base]
# - one or multiple instances in file
# - expected values assignments
class Test(object):
def __init__(self, path, options):
parser = ConfigParser.SafeConfigParser()
parser.read(path)
log.warning("running '%s'" % path)
self.path = path
self.test_dir = options.test_dir
self.perf = options.perf
self.command = parser.get('config', 'command')
self.args = parser.get('config', 'args')
try:
self.ret = parser.get('config', 'ret')
except:
self.ret = 0
self.expect = {}
self.result = {}
log.debug(" loading expected events");
self.load_events(path, self.expect)
def is_event(self, name):
if name.find("event") == -1:
return False
else:
return True
def load_events(self, path, events):
parser_event = ConfigParser.SafeConfigParser()
parser_event.read(path)
# The event record section header contains 'event' word,
# optionaly followed by ':' allowing to load 'parent
# event' first as a base
for section in filter(self.is_event, parser_event.sections()):
parser_items = parser_event.items(section);
base_items = {}
# Read parent event if there's any
if (':' in section):
base = section[section.index(':') + 1:]
parser_base = ConfigParser.SafeConfigParser()
parser_base.read(self.test_dir + '/' + base)
base_items = parser_base.items('event')
e = Event(section, parser_items, base_items)
events[section] = e
def run_cmd(self, tempdir):
cmd = "PERF_TEST_ATTR=%s %s %s -o %s/perf.data %s" % (tempdir,
self.perf, self.command, tempdir, self.args)
ret = os.WEXITSTATUS(os.system(cmd))
log.info(" '%s' ret %d " % (cmd, ret))
if ret != int(self.ret):
raise Unsup(self)
def compare(self, expect, result):
match = {}
log.debug(" compare");
# For each expected event find all matching
# events in result. Fail if there's not any.
for exp_name, exp_event in expect.items():
exp_list = []
log.debug(" matching [%s]" % exp_name)
for res_name, res_event in result.items():
log.debug(" to [%s]" % res_name)
if (exp_event.equal(res_event)):
exp_list.append(res_name)
log.debug(" ->OK")
else:
log.debug(" ->FAIL");
log.debug(" match: [%s] matches %s" % (exp_name, str(exp_list)))
# we did not any matching event - fail
if (not exp_list):
exp_event.diff(res_event)
raise Fail(self, 'match failure');
match[exp_name] = exp_list
# For each defined group in the expected events
# check we match the same group in the result.
for exp_name, exp_event in expect.items():
group = exp_event.group
if (group == ''):
continue
for res_name in match[exp_name]:
res_group = result[res_name].group
if res_group not in match[group]:
raise Fail(self, 'group failure')
log.debug(" group: [%s] matches group leader %s" %
(exp_name, str(match[group])))
log.debug(" matched")
def resolve_groups(self, events):
for name, event in events.items():
group_fd = event['group_fd'];
if group_fd == '-1':
continue;
for iname, ievent in events.items():
if (ievent['fd'] == group_fd):
event.group = iname
log.debug('[%s] has group leader [%s]' % (name, iname))
break;
def run(self):
tempdir = tempfile.mkdtemp();
try:
# run the test script
self.run_cmd(tempdir);
# load events expectation for the test
log.debug(" loading result events");
for f in glob.glob(tempdir + '/event*'):
self.load_events(f, self.result);
# resolve group_fd to event names
self.resolve_groups(self.expect);
self.resolve_groups(self.result);
# do the expectation - results matching - both ways
self.compare(self.expect, self.result)
self.compare(self.result, self.expect)
finally:
# cleanup
shutil.rmtree(tempdir)
def run_tests(options):
for f in glob.glob(options.test_dir + '/' + options.test):
try:
Test(f, options).run()
except Unsup, obj:
log.warning("unsupp %s" % obj.getMsg())
def setup_log(verbose):
global log
level = logging.CRITICAL
if verbose == 1:
level = logging.WARNING
if verbose == 2:
level = logging.INFO
if verbose >= 3:
level = logging.DEBUG
log = logging.getLogger('test')
log.setLevel(level)
ch = logging.StreamHandler()
ch.setLevel(level)
formatter = logging.Formatter('%(message)s')
ch.setFormatter(formatter)
log.addHandler(ch)
USAGE = '''%s [OPTIONS]
-d dir # tests dir
-p path # perf binary
-t test # single test
-v # verbose level
''' % sys.argv[0]
def main():
parser = optparse.OptionParser(usage=USAGE)
parser.add_option("-t", "--test",
action="store", type="string", dest="test")
parser.add_option("-d", "--test-dir",
action="store", type="string", dest="test_dir")
parser.add_option("-p", "--perf",
action="store", type="string", dest="perf")
parser.add_option("-v", "--verbose",
action="count", dest="verbose")
options, args = parser.parse_args()
if args:
parser.error('FAILED wrong arguments %s' % ' '.join(args))
return -1
setup_log(options.verbose)
if not options.test_dir:
print 'FAILED no -d option specified'
sys.exit(-1)
if not options.test:
options.test = 'test*'
try:
run_tests(options)
except Fail, obj:
print "FAILED %s" % obj.getMsg();
sys.exit(-1)
sys.exit(0)
if __name__ == '__main__':
main()
|
liu602348184/django | refs/heads/master | tests/aggregation_regress/tests.py | 66 | from __future__ import unicode_literals
import datetime
import pickle
from decimal import Decimal
from operator import attrgetter
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import FieldError
from django.db import connection
from django.db.models import (
F, Q, Avg, Count, Max, StdDev, Sum, Value, Variance,
)
from django.test import TestCase, skipUnlessAnyDBFeature, skipUnlessDBFeature
from django.test.utils import Approximate
from django.utils import six
from .models import (
Alfa, Author, Book, Bravo, Charlie, Clues, Entries, HardbackBook, ItemTag,
Publisher, SelfRefFK, Store, WithManualPK,
)
class AggregationTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.a1 = Author.objects.create(name='Adrian Holovaty', age=34)
cls.a2 = Author.objects.create(name='Jacob Kaplan-Moss', age=35)
cls.a3 = Author.objects.create(name='Brad Dayley', age=45)
cls.a4 = Author.objects.create(name='James Bennett', age=29)
cls.a5 = Author.objects.create(name='Jeffrey Forcier', age=37)
cls.a6 = Author.objects.create(name='Paul Bissex', age=29)
cls.a7 = Author.objects.create(name='Wesley J. Chun', age=25)
cls.a8 = Author.objects.create(name='Peter Norvig', age=57)
cls.a9 = Author.objects.create(name='Stuart Russell', age=46)
cls.a1.friends.add(cls.a2, cls.a4)
cls.a2.friends.add(cls.a1, cls.a7)
cls.a4.friends.add(cls.a1)
cls.a5.friends.add(cls.a6, cls.a7)
cls.a6.friends.add(cls.a5, cls.a7)
cls.a7.friends.add(cls.a2, cls.a5, cls.a6)
cls.a8.friends.add(cls.a9)
cls.a9.friends.add(cls.a8)
cls.p1 = Publisher.objects.create(name='Apress', num_awards=3)
cls.p2 = Publisher.objects.create(name='Sams', num_awards=1)
cls.p3 = Publisher.objects.create(name='Prentice Hall', num_awards=7)
cls.p4 = Publisher.objects.create(name='Morgan Kaufmann', num_awards=9)
cls.p5 = Publisher.objects.create(name="Jonno's House of Books", num_awards=0)
cls.b1 = Book.objects.create(
isbn='159059725', name='The Definitive Guide to Django: Web Development Done Right',
pages=447, rating=4.5, price=Decimal('30.00'), contact=cls.a1, publisher=cls.p1,
pubdate=datetime.date(2007, 12, 6)
)
cls.b2 = Book.objects.create(
isbn='067232959', name='Sams Teach Yourself Django in 24 Hours',
pages=528, rating=3.0, price=Decimal('23.09'), contact=cls.a3, publisher=cls.p2,
pubdate=datetime.date(2008, 3, 3)
)
cls.b3 = Book.objects.create(
isbn='159059996', name='Practical Django Projects',
pages=300, rating=4.0, price=Decimal('29.69'), contact=cls.a4, publisher=cls.p1,
pubdate=datetime.date(2008, 6, 23)
)
cls.b4 = Book.objects.create(
isbn='013235613', name='Python Web Development with Django',
pages=350, rating=4.0, price=Decimal('29.69'), contact=cls.a5, publisher=cls.p3,
pubdate=datetime.date(2008, 11, 3)
)
cls.b5 = HardbackBook.objects.create(
isbn='013790395', name='Artificial Intelligence: A Modern Approach',
pages=1132, rating=4.0, price=Decimal('82.80'), contact=cls.a8, publisher=cls.p3,
pubdate=datetime.date(1995, 1, 15), weight=4.5)
cls.b6 = HardbackBook.objects.create(
isbn='155860191', name='Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp',
pages=946, rating=5.0, price=Decimal('75.00'), contact=cls.a8, publisher=cls.p4,
pubdate=datetime.date(1991, 10, 15), weight=3.7)
cls.b1.authors.add(cls.a1, cls.a2)
cls.b2.authors.add(cls.a3)
cls.b3.authors.add(cls.a4)
cls.b4.authors.add(cls.a5, cls.a6, cls.a7)
cls.b5.authors.add(cls.a8, cls.a9)
cls.b6.authors.add(cls.a8)
s1 = Store.objects.create(
name='Amazon.com',
original_opening=datetime.datetime(1994, 4, 23, 9, 17, 42),
friday_night_closing=datetime.time(23, 59, 59)
)
s2 = Store.objects.create(
name='Books.com',
original_opening=datetime.datetime(2001, 3, 15, 11, 23, 37),
friday_night_closing=datetime.time(23, 59, 59)
)
s3 = Store.objects.create(
name="Mamma and Pappa's Books",
original_opening=datetime.datetime(1945, 4, 25, 16, 24, 14),
friday_night_closing=datetime.time(21, 30)
)
s1.books.add(cls.b1, cls.b2, cls.b3, cls.b4, cls.b5, cls.b6)
s2.books.add(cls.b1, cls.b3, cls.b5, cls.b6)
s3.books.add(cls.b3, cls.b4, cls.b6)
def assertObjectAttrs(self, obj, **kwargs):
for attr, value in six.iteritems(kwargs):
self.assertEqual(getattr(obj, attr), value)
def test_aggregates_in_where_clause(self):
"""
Regression test for #12822: DatabaseError: aggregates not allowed in
WHERE clause
Tests that the subselect works and returns results equivalent to a
query with the IDs listed.
Before the corresponding fix for this bug, this test passed in 1.1 and
failed in 1.2-beta (trunk).
"""
qs = Book.objects.values('contact').annotate(Max('id'))
qs = qs.order_by('contact').values_list('id__max', flat=True)
# don't do anything with the queryset (qs) before including it as a
# subquery
books = Book.objects.order_by('id')
qs1 = books.filter(id__in=qs)
qs2 = books.filter(id__in=list(qs))
self.assertEqual(list(qs1), list(qs2))
def test_aggregates_in_where_clause_pre_eval(self):
"""
Regression test for #12822: DatabaseError: aggregates not allowed in
WHERE clause
Same as the above test, but evaluates the queryset for the subquery
before it's used as a subquery.
Before the corresponding fix for this bug, this test failed in both
1.1 and 1.2-beta (trunk).
"""
qs = Book.objects.values('contact').annotate(Max('id'))
qs = qs.order_by('contact').values_list('id__max', flat=True)
# force the queryset (qs) for the subquery to be evaluated in its
# current state
list(qs)
books = Book.objects.order_by('id')
qs1 = books.filter(id__in=qs)
qs2 = books.filter(id__in=list(qs))
self.assertEqual(list(qs1), list(qs2))
@skipUnlessDBFeature('supports_subqueries_in_group_by')
def test_annotate_with_extra(self):
"""
Regression test for #11916: Extra params + aggregation creates
incorrect SQL.
"""
# Oracle doesn't support subqueries in group by clause
shortest_book_sql = """
SELECT name
FROM aggregation_regress_book b
WHERE b.publisher_id = aggregation_regress_publisher.id
ORDER BY b.pages
LIMIT 1
"""
# tests that this query does not raise a DatabaseError due to the full
# subselect being (erroneously) added to the GROUP BY parameters
qs = Publisher.objects.extra(select={
'name_of_shortest_book': shortest_book_sql,
}).annotate(total_books=Count('book'))
# force execution of the query
list(qs)
def test_aggregate(self):
# Ordering requests are ignored
self.assertEqual(
Author.objects.order_by("name").aggregate(Avg("age")),
{"age__avg": Approximate(37.444, places=1)}
)
# Implicit ordering is also ignored
self.assertEqual(
Book.objects.aggregate(Sum("pages")),
{"pages__sum": 3703},
)
# Baseline results
self.assertEqual(
Book.objects.aggregate(Sum('pages'), Avg('pages')),
{'pages__sum': 3703, 'pages__avg': Approximate(617.166, places=2)}
)
# Empty values query doesn't affect grouping or results
self.assertEqual(
Book.objects.values().aggregate(Sum('pages'), Avg('pages')),
{'pages__sum': 3703, 'pages__avg': Approximate(617.166, places=2)}
)
# Aggregate overrides extra selected column
self.assertEqual(
Book.objects.extra(select={'price_per_page': 'price / pages'}).aggregate(Sum('pages')),
{'pages__sum': 3703}
)
def test_annotation(self):
# Annotations get combined with extra select clauses
obj = Book.objects.annotate(mean_auth_age=Avg("authors__age")).extra(
select={"manufacture_cost": "price * .5"}).get(pk=self.b2.pk)
self.assertObjectAttrs(
obj,
contact_id=self.a3.id,
isbn='067232959',
mean_auth_age=45.0,
name='Sams Teach Yourself Django in 24 Hours',
pages=528,
price=Decimal("23.09"),
pubdate=datetime.date(2008, 3, 3),
publisher_id=self.p2.id,
rating=3.0
)
# Different DB backends return different types for the extra select computation
self.assertIn(obj.manufacture_cost, (11.545, Decimal('11.545')))
# Order of the annotate/extra in the query doesn't matter
obj = Book.objects.extra(select={'manufacture_cost': 'price * .5'}).annotate(
mean_auth_age=Avg('authors__age')).get(pk=self.b2.pk)
self.assertObjectAttrs(
obj,
contact_id=self.a3.id,
isbn='067232959',
mean_auth_age=45.0,
name='Sams Teach Yourself Django in 24 Hours',
pages=528,
price=Decimal("23.09"),
pubdate=datetime.date(2008, 3, 3),
publisher_id=self.p2.id,
rating=3.0
)
# Different DB backends return different types for the extra select computation
self.assertIn(obj.manufacture_cost, (11.545, Decimal('11.545')))
# Values queries can be combined with annotate and extra
obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(
select={'manufacture_cost': 'price * .5'}).values().get(pk=self.b2.pk)
manufacture_cost = obj['manufacture_cost']
self.assertIn(manufacture_cost, (11.545, Decimal('11.545')))
del obj['manufacture_cost']
self.assertEqual(obj, {
'id': self.b2.id,
'contact_id': self.a3.id,
'isbn': '067232959',
'mean_auth_age': 45.0,
'name': 'Sams Teach Yourself Django in 24 Hours',
'pages': 528,
'price': Decimal('23.09'),
'pubdate': datetime.date(2008, 3, 3),
'publisher_id': self.p2.id,
'rating': 3.0,
})
# The order of the (empty) values, annotate and extra clauses doesn't
# matter
obj = Book.objects.values().annotate(mean_auth_age=Avg('authors__age')).extra(
select={'manufacture_cost': 'price * .5'}).get(pk=self.b2.pk)
manufacture_cost = obj['manufacture_cost']
self.assertIn(manufacture_cost, (11.545, Decimal('11.545')))
del obj['manufacture_cost']
self.assertEqual(obj, {
'id': self.b2.id,
'contact_id': self.a3.id,
'isbn': '067232959',
'mean_auth_age': 45.0,
'name': 'Sams Teach Yourself Django in 24 Hours',
'pages': 528,
'price': Decimal('23.09'),
'pubdate': datetime.date(2008, 3, 3),
'publisher_id': self.p2.id,
'rating': 3.0
})
# If the annotation precedes the values clause, it won't be included
# unless it is explicitly named
obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(
select={'price_per_page': 'price / pages'}).values('name').get(pk=self.b1.pk)
self.assertEqual(obj, {
"name": 'The Definitive Guide to Django: Web Development Done Right',
})
obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(
select={'price_per_page': 'price / pages'}).values('name', 'mean_auth_age').get(pk=self.b1.pk)
self.assertEqual(obj, {
'mean_auth_age': 34.5,
'name': 'The Definitive Guide to Django: Web Development Done Right',
})
# If an annotation isn't included in the values, it can still be used
# in a filter
qs = Book.objects.annotate(n_authors=Count('authors')).values('name').filter(n_authors__gt=2)
self.assertQuerysetEqual(
qs, [
{"name": 'Python Web Development with Django'}
],
lambda b: b,
)
# The annotations are added to values output if values() precedes
# annotate()
obj = Book.objects.values('name').annotate(mean_auth_age=Avg('authors__age')).extra(
select={'price_per_page': 'price / pages'}).get(pk=self.b1.pk)
self.assertEqual(obj, {
'mean_auth_age': 34.5,
'name': 'The Definitive Guide to Django: Web Development Done Right',
})
# Check that all of the objects are getting counted (allow_nulls) and
# that values respects the amount of objects
self.assertEqual(
len(Author.objects.annotate(Avg('friends__age')).values()),
9
)
# Check that consecutive calls to annotate accumulate in the query
qs = Book.objects.values('price').annotate(oldest=Max('authors__age')).order_by('oldest', 'price').annotate(Max('publisher__num_awards'))
self.assertQuerysetEqual(
qs, [
{'price': Decimal("30"), 'oldest': 35, 'publisher__num_awards__max': 3},
{'price': Decimal("29.69"), 'oldest': 37, 'publisher__num_awards__max': 7},
{'price': Decimal("23.09"), 'oldest': 45, 'publisher__num_awards__max': 1},
{'price': Decimal("75"), 'oldest': 57, 'publisher__num_awards__max': 9},
{'price': Decimal("82.8"), 'oldest': 57, 'publisher__num_awards__max': 7}
],
lambda b: b,
)
def test_aggrate_annotation(self):
# Aggregates can be composed over annotations.
# The return type is derived from the composed aggregate
vals = Book.objects.all().annotate(num_authors=Count('authors__id')).aggregate(Max('pages'), Max('price'), Sum('num_authors'), Avg('num_authors'))
self.assertEqual(vals, {
'num_authors__sum': 10,
'num_authors__avg': Approximate(1.666, places=2),
'pages__max': 1132,
'price__max': Decimal("82.80")
})
# Regression for #15624 - Missing SELECT columns when using values, annotate
# and aggregate in a single query
self.assertEqual(
Book.objects.annotate(c=Count('authors')).values('c').aggregate(Max('c')),
{'c__max': 3}
)
def test_decimal_aggregate_annotation_filter(self):
"""
Filtering on an aggregate annotation with Decimal values should work.
Requires special handling on SQLite (#18247).
"""
self.assertEqual(
len(Author.objects.annotate(sum=Sum('book_contact_set__price')).filter(sum__gt=Decimal(40))),
1
)
self.assertEqual(
len(Author.objects.annotate(sum=Sum('book_contact_set__price')).filter(sum__lte=Decimal(40))),
4
)
def test_field_error(self):
# Bad field requests in aggregates are caught and reported
self.assertRaises(
FieldError,
lambda: Book.objects.all().aggregate(num_authors=Count('foo'))
)
self.assertRaises(
FieldError,
lambda: Book.objects.all().annotate(num_authors=Count('foo'))
)
self.assertRaises(
FieldError,
lambda: Book.objects.all().annotate(num_authors=Count('authors__id')).aggregate(Max('foo'))
)
def test_more(self):
# Old-style count aggregations can be mixed with new-style
self.assertEqual(
Book.objects.annotate(num_authors=Count('authors')).count(),
6
)
# Non-ordinal, non-computed Aggregates over annotations correctly
# inherit the annotation's internal type if the annotation is ordinal
# or computed
vals = Book.objects.annotate(num_authors=Count('authors')).aggregate(Max('num_authors'))
self.assertEqual(
vals,
{'num_authors__max': 3}
)
vals = Publisher.objects.annotate(avg_price=Avg('book__price')).aggregate(Max('avg_price'))
self.assertEqual(
vals,
{'avg_price__max': 75.0}
)
# Aliases are quoted to protected aliases that might be reserved names
vals = Book.objects.aggregate(number=Max('pages'), select=Max('pages'))
self.assertEqual(
vals,
{'number': 1132, 'select': 1132}
)
# Regression for #10064: select_related() plays nice with aggregates
obj = Book.objects.select_related('publisher').annotate(
num_authors=Count('authors')).values().get(isbn='013790395')
self.assertEqual(obj, {
'contact_id': self.a8.id,
'id': self.b5.id,
'isbn': '013790395',
'name': 'Artificial Intelligence: A Modern Approach',
'num_authors': 2,
'pages': 1132,
'price': Decimal("82.8"),
'pubdate': datetime.date(1995, 1, 15),
'publisher_id': self.p3.id,
'rating': 4.0,
})
# Regression for #10010: exclude on an aggregate field is correctly
# negated
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors'))),
6
)
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors')).filter(num_authors__gt=2)),
1
)
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors')).exclude(num_authors__gt=2)),
5
)
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors')).filter(num_authors__lt=3).exclude(num_authors__lt=2)),
2
)
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors')).exclude(num_authors__lt=2).filter(num_authors__lt=3)),
2
)
def test_aggregate_fexpr(self):
# Aggregates can be used with F() expressions
# ... where the F() is pushed into the HAVING clause
qs = Publisher.objects.annotate(num_books=Count('book')).filter(num_books__lt=F('num_awards') / 2).order_by('name').values('name', 'num_books', 'num_awards')
self.assertQuerysetEqual(
qs, [
{'num_books': 1, 'name': 'Morgan Kaufmann', 'num_awards': 9},
{'num_books': 2, 'name': 'Prentice Hall', 'num_awards': 7}
],
lambda p: p,
)
qs = Publisher.objects.annotate(num_books=Count('book')).exclude(num_books__lt=F('num_awards') / 2).order_by('name').values('name', 'num_books', 'num_awards')
self.assertQuerysetEqual(
qs, [
{'num_books': 2, 'name': 'Apress', 'num_awards': 3},
{'num_books': 0, 'name': "Jonno's House of Books", 'num_awards': 0},
{'num_books': 1, 'name': 'Sams', 'num_awards': 1}
],
lambda p: p,
)
# ... and where the F() references an aggregate
qs = Publisher.objects.annotate(num_books=Count('book')).filter(num_awards__gt=2 * F('num_books')).order_by('name').values('name', 'num_books', 'num_awards')
self.assertQuerysetEqual(
qs, [
{'num_books': 1, 'name': 'Morgan Kaufmann', 'num_awards': 9},
{'num_books': 2, 'name': 'Prentice Hall', 'num_awards': 7}
],
lambda p: p,
)
qs = Publisher.objects.annotate(num_books=Count('book')).exclude(num_books__lt=F('num_awards') / 2).order_by('name').values('name', 'num_books', 'num_awards')
self.assertQuerysetEqual(
qs, [
{'num_books': 2, 'name': 'Apress', 'num_awards': 3},
{'num_books': 0, 'name': "Jonno's House of Books", 'num_awards': 0},
{'num_books': 1, 'name': 'Sams', 'num_awards': 1}
],
lambda p: p,
)
def test_db_col_table(self):
# Tests on fields with non-default table and column names.
qs = Clues.objects.values('EntryID__Entry').annotate(Appearances=Count('EntryID'), Distinct_Clues=Count('Clue', distinct=True))
self.assertQuerysetEqual(qs, [])
qs = Entries.objects.annotate(clue_count=Count('clues__ID'))
self.assertQuerysetEqual(qs, [])
def test_boolean_conversion(self):
# Aggregates mixed up ordering of columns for backend's convert_values
# method. Refs #21126.
e = Entries.objects.create(Entry='foo')
c = Clues.objects.create(EntryID=e, Clue='bar')
qs = Clues.objects.select_related('EntryID').annotate(Count('ID'))
self.assertQuerysetEqual(
qs, [c], lambda x: x)
self.assertEqual(qs[0].EntryID, e)
self.assertIs(qs[0].EntryID.Exclude, False)
def test_empty(self):
# Regression for #10089: Check handling of empty result sets with
# aggregates
self.assertEqual(
Book.objects.filter(id__in=[]).count(),
0
)
vals = Book.objects.filter(id__in=[]).aggregate(num_authors=Count('authors'), avg_authors=Avg('authors'), max_authors=Max('authors'), max_price=Max('price'), max_rating=Max('rating'))
self.assertEqual(
vals,
{'max_authors': None, 'max_rating': None, 'num_authors': 0, 'avg_authors': None, 'max_price': None}
)
qs = Publisher.objects.filter(name="Jonno's House of Books").annotate(num_authors=Count('book__authors'), avg_authors=Avg('book__authors'), max_authors=Max('book__authors'), max_price=Max('book__price'), max_rating=Max('book__rating')).values()
self.assertQuerysetEqual(
qs, [
{'max_authors': None, 'name': "Jonno's House of Books", 'num_awards': 0, 'max_price': None, 'num_authors': 0, 'max_rating': None, 'id': self.p5.id, 'avg_authors': None}
],
lambda p: p
)
def test_more_more(self):
# Regression for #10113 - Fields mentioned in order_by() must be
# included in the GROUP BY. This only becomes a problem when the
# order_by introduces a new join.
self.assertQuerysetEqual(
Book.objects.annotate(num_authors=Count('authors')).order_by('publisher__name', 'name'), [
"Practical Django Projects",
"The Definitive Guide to Django: Web Development Done Right",
"Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp",
"Artificial Intelligence: A Modern Approach",
"Python Web Development with Django",
"Sams Teach Yourself Django in 24 Hours",
],
lambda b: b.name
)
# Regression for #10127 - Empty select_related() works with annotate
qs = Book.objects.filter(rating__lt=4.5).select_related().annotate(Avg('authors__age'))
self.assertQuerysetEqual(
qs, [
('Artificial Intelligence: A Modern Approach', 51.5, 'Prentice Hall', 'Peter Norvig'),
('Practical Django Projects', 29.0, 'Apress', 'James Bennett'),
('Python Web Development with Django', Approximate(30.333, places=2), 'Prentice Hall', 'Jeffrey Forcier'),
('Sams Teach Yourself Django in 24 Hours', 45.0, 'Sams', 'Brad Dayley')
],
lambda b: (b.name, b.authors__age__avg, b.publisher.name, b.contact.name)
)
# Regression for #10132 - If the values() clause only mentioned extra
# (select=) columns, those columns are used for grouping
qs = Book.objects.extra(select={'pub': 'publisher_id'}).values('pub').annotate(Count('id')).order_by('pub')
self.assertQuerysetEqual(
qs, [
{'pub': self.b1.id, 'id__count': 2},
{'pub': self.b2.id, 'id__count': 1},
{'pub': self.b3.id, 'id__count': 2},
{'pub': self.b4.id, 'id__count': 1}
],
lambda b: b
)
qs = Book.objects.extra(select={'pub': 'publisher_id', 'foo': 'pages'}).values('pub').annotate(Count('id')).order_by('pub')
self.assertQuerysetEqual(
qs, [
{'pub': self.p1.id, 'id__count': 2},
{'pub': self.p2.id, 'id__count': 1},
{'pub': self.p3.id, 'id__count': 2},
{'pub': self.p4.id, 'id__count': 1}
],
lambda b: b
)
# Regression for #10182 - Queries with aggregate calls are correctly
# realiased when used in a subquery
ids = Book.objects.filter(pages__gt=100).annotate(n_authors=Count('authors')).filter(n_authors__gt=2).order_by('n_authors')
self.assertQuerysetEqual(
Book.objects.filter(id__in=ids), [
"Python Web Development with Django",
],
lambda b: b.name
)
# Regression for #15709 - Ensure each group_by field only exists once
# per query
qstr = str(Book.objects.values('publisher').annotate(max_pages=Max('pages')).order_by().query)
# Check that there is just one GROUP BY clause (zero commas means at
# most one clause)
self.assertEqual(qstr[qstr.index('GROUP BY'):].count(', '), 0)
def test_duplicate_alias(self):
# Regression for #11256 - duplicating a default alias raises ValueError.
self.assertRaises(ValueError, Book.objects.all().annotate, Avg('authors__age'), authors__age__avg=Avg('authors__age'))
def test_field_name_conflict(self):
# Regression for #11256 - providing an aggregate name that conflicts with a field name on the model raises ValueError
self.assertRaises(ValueError, Author.objects.annotate, age=Avg('friends__age'))
def test_m2m_name_conflict(self):
# Regression for #11256 - providing an aggregate name that conflicts with an m2m name on the model raises ValueError
self.assertRaises(ValueError, Author.objects.annotate, friends=Count('friends'))
def test_values_queryset_non_conflict(self):
# Regression for #14707 -- If you're using a values query set, some potential conflicts are avoided.
# age is a field on Author, so it shouldn't be allowed as an aggregate.
# But age isn't included in values(), so it is.
results = Author.objects.values('name').annotate(age=Count('book_contact_set')).order_by('name')
self.assertEqual(len(results), 9)
self.assertEqual(results[0]['name'], 'Adrian Holovaty')
self.assertEqual(results[0]['age'], 1)
# Same problem, but aggregating over m2m fields
results = Author.objects.values('name').annotate(age=Avg('friends__age')).order_by('name')
self.assertEqual(len(results), 9)
self.assertEqual(results[0]['name'], 'Adrian Holovaty')
self.assertEqual(results[0]['age'], 32.0)
# Same problem, but colliding with an m2m field
results = Author.objects.values('name').annotate(friends=Count('friends')).order_by('name')
self.assertEqual(len(results), 9)
self.assertEqual(results[0]['name'], 'Adrian Holovaty')
self.assertEqual(results[0]['friends'], 2)
def test_reverse_relation_name_conflict(self):
# Regression for #11256 - providing an aggregate name that conflicts with a reverse-related name on the model raises ValueError
self.assertRaises(ValueError, Author.objects.annotate, book_contact_set=Avg('friends__age'))
def test_pickle(self):
# Regression for #10197 -- Queries with aggregates can be pickled.
# First check that pickling is possible at all. No crash = success
qs = Book.objects.annotate(num_authors=Count('authors'))
pickle.dumps(qs)
# Then check that the round trip works.
query = qs.query.get_compiler(qs.db).as_sql()[0]
qs2 = pickle.loads(pickle.dumps(qs))
self.assertEqual(
qs2.query.get_compiler(qs2.db).as_sql()[0],
query,
)
def test_more_more_more(self):
# Regression for #10199 - Aggregate calls clone the original query so
# the original query can still be used
books = Book.objects.all()
books.aggregate(Avg("authors__age"))
self.assertQuerysetEqual(
books.all(), [
'Artificial Intelligence: A Modern Approach',
'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp',
'Practical Django Projects',
'Python Web Development with Django',
'Sams Teach Yourself Django in 24 Hours',
'The Definitive Guide to Django: Web Development Done Right'
],
lambda b: b.name
)
# Regression for #10248 - Annotations work with DateQuerySets
qs = Book.objects.annotate(num_authors=Count('authors')).filter(num_authors=2).dates('pubdate', 'day')
self.assertQuerysetEqual(
qs, [
datetime.date(1995, 1, 15),
datetime.date(2007, 12, 6),
],
lambda b: b
)
# Regression for #10290 - extra selects with parameters can be used for
# grouping.
qs = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(select={'sheets': '(pages + %s) / %s'}, select_params=[1, 2]).order_by('sheets').values('sheets')
self.assertQuerysetEqual(
qs, [
150,
175,
224,
264,
473,
566
],
lambda b: int(b["sheets"])
)
# Regression for 10425 - annotations don't get in the way of a count()
# clause
self.assertEqual(
Book.objects.values('publisher').annotate(Count('publisher')).count(),
4
)
self.assertEqual(
Book.objects.annotate(Count('publisher')).values('publisher').count(),
6
)
# Note: intentionally no order_by(), that case needs tests, too.
publishers = Publisher.objects.filter(id__in=[1, 2])
self.assertEqual(
sorted(p.name for p in publishers),
[
"Apress",
"Sams"
]
)
publishers = publishers.annotate(n_books=Count("book"))
sorted_publishers = sorted(publishers, key=lambda x: x.name)
self.assertEqual(
sorted_publishers[0].n_books,
2
)
self.assertEqual(
sorted_publishers[1].n_books,
1
)
self.assertEqual(
sorted(p.name for p in publishers),
[
"Apress",
"Sams"
]
)
books = Book.objects.filter(publisher__in=publishers)
self.assertQuerysetEqual(
books, [
"Practical Django Projects",
"Sams Teach Yourself Django in 24 Hours",
"The Definitive Guide to Django: Web Development Done Right",
],
lambda b: b.name
)
self.assertEqual(
sorted(p.name for p in publishers),
[
"Apress",
"Sams"
]
)
# Regression for 10666 - inherited fields work with annotations and
# aggregations
self.assertEqual(
HardbackBook.objects.aggregate(n_pages=Sum('book_ptr__pages')),
{'n_pages': 2078}
)
self.assertEqual(
HardbackBook.objects.aggregate(n_pages=Sum('pages')),
{'n_pages': 2078},
)
qs = HardbackBook.objects.annotate(n_authors=Count('book_ptr__authors')).values('name', 'n_authors')
self.assertQuerysetEqual(
qs, [
{'n_authors': 2, 'name': 'Artificial Intelligence: A Modern Approach'},
{'n_authors': 1, 'name': 'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp'}
],
lambda h: h
)
qs = HardbackBook.objects.annotate(n_authors=Count('authors')).values('name', 'n_authors')
self.assertQuerysetEqual(
qs, [
{'n_authors': 2, 'name': 'Artificial Intelligence: A Modern Approach'},
{'n_authors': 1, 'name': 'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp'}
],
lambda h: h,
)
# Regression for #10766 - Shouldn't be able to reference an aggregate
# fields in an aggregate() call.
self.assertRaises(
FieldError,
lambda: Book.objects.annotate(mean_age=Avg('authors__age')).annotate(Avg('mean_age'))
)
def test_empty_filter_count(self):
self.assertEqual(
Author.objects.filter(id__in=[]).annotate(Count("friends")).count(),
0
)
def test_empty_filter_aggregate(self):
self.assertEqual(
Author.objects.filter(id__in=[]).annotate(Count("friends")).aggregate(Count("pk")),
{"pk__count": None}
)
def test_none_call_before_aggregate(self):
# Regression for #11789
self.assertEqual(
Author.objects.none().aggregate(Avg('age')),
{'age__avg': None}
)
def test_annotate_and_join(self):
self.assertEqual(
Author.objects.annotate(c=Count("friends__name")).exclude(friends__name="Joe").count(),
Author.objects.count()
)
def test_f_expression_annotation(self):
# Books with less than 200 pages per author.
qs = Book.objects.values("name").annotate(
n_authors=Count("authors")
).filter(
pages__lt=F("n_authors") * 200
).values_list("pk")
self.assertQuerysetEqual(
Book.objects.filter(pk__in=qs), [
"Python Web Development with Django"
],
attrgetter("name")
)
def test_values_annotate_values(self):
qs = Book.objects.values("name").annotate(
n_authors=Count("authors")
).values_list("pk", flat=True)
self.assertEqual(list(qs), list(Book.objects.values_list("pk", flat=True)))
def test_having_group_by(self):
# Test that when a field occurs on the LHS of a HAVING clause that it
# appears correctly in the GROUP BY clause
qs = Book.objects.values_list("name").annotate(
n_authors=Count("authors")
).filter(
pages__gt=F("n_authors")
).values_list("name", flat=True)
# Results should be the same, all Books have more pages than authors
self.assertEqual(
list(qs), list(Book.objects.values_list("name", flat=True))
)
def test_values_list_annotation_args_ordering(self):
"""
Annotate *args ordering should be preserved in values_list results.
**kwargs comes after *args.
Regression test for #23659.
"""
books = Book.objects.values_list("publisher__name").annotate(
Count("id"), Avg("price"), Avg("authors__age"), avg_pgs=Avg("pages")
).order_by("-publisher__name")
self.assertEqual(books[0], ('Sams', 1, 23.09, 45.0, 528.0))
def test_annotation_disjunction(self):
qs = Book.objects.annotate(n_authors=Count("authors")).filter(
Q(n_authors=2) | Q(name="Python Web Development with Django")
)
self.assertQuerysetEqual(
qs, [
"Artificial Intelligence: A Modern Approach",
"Python Web Development with Django",
"The Definitive Guide to Django: Web Development Done Right",
],
attrgetter("name")
)
qs = Book.objects.annotate(n_authors=Count("authors")).filter(
Q(name="The Definitive Guide to Django: Web Development Done Right") | (Q(name="Artificial Intelligence: A Modern Approach") & Q(n_authors=3))
)
self.assertQuerysetEqual(
qs, [
"The Definitive Guide to Django: Web Development Done Right",
],
attrgetter("name")
)
qs = Publisher.objects.annotate(
rating_sum=Sum("book__rating"),
book_count=Count("book")
).filter(
Q(rating_sum__gt=5.5) | Q(rating_sum__isnull=True)
).order_by('pk')
self.assertQuerysetEqual(
qs, [
"Apress",
"Prentice Hall",
"Jonno's House of Books",
],
attrgetter("name")
)
qs = Publisher.objects.annotate(
rating_sum=Sum("book__rating"),
book_count=Count("book")
).filter(
Q(rating_sum__gt=F("book_count")) | Q(rating_sum=None)
).order_by("num_awards")
self.assertQuerysetEqual(
qs, [
"Jonno's House of Books",
"Sams",
"Apress",
"Prentice Hall",
"Morgan Kaufmann"
],
attrgetter("name")
)
def test_quoting_aggregate_order_by(self):
qs = Book.objects.filter(
name="Python Web Development with Django"
).annotate(
authorCount=Count("authors")
).order_by("authorCount")
self.assertQuerysetEqual(
qs, [
("Python Web Development with Django", 3),
],
lambda b: (b.name, b.authorCount)
)
@skipUnlessDBFeature('supports_stddev')
def test_stddev(self):
self.assertEqual(
Book.objects.aggregate(StdDev('pages')),
{'pages__stddev': Approximate(311.46, 1)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('rating')),
{'rating__stddev': Approximate(0.60, 1)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('price')),
{'price__stddev': Approximate(24.16, 2)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('pages', sample=True)),
{'pages__stddev': Approximate(341.19, 2)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('rating', sample=True)),
{'rating__stddev': Approximate(0.66, 2)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('price', sample=True)),
{'price__stddev': Approximate(26.46, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('pages')),
{'pages__variance': Approximate(97010.80, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('rating')),
{'rating__variance': Approximate(0.36, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('price')),
{'price__variance': Approximate(583.77, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('pages', sample=True)),
{'pages__variance': Approximate(116412.96, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('rating', sample=True)),
{'rating__variance': Approximate(0.44, 2)}
)
self.assertEqual(
Book.objects.aggregate(Variance('price', sample=True)),
{'price__variance': Approximate(700.53, 2)}
)
def test_filtering_by_annotation_name(self):
# Regression test for #14476
# The name of the explicitly provided annotation name in this case
# poses no problem
qs = Author.objects.annotate(book_cnt=Count('book')).filter(book_cnt=2).order_by('name')
self.assertQuerysetEqual(
qs,
['Peter Norvig'],
lambda b: b.name
)
# Neither in this case
qs = Author.objects.annotate(book_count=Count('book')).filter(book_count=2).order_by('name')
self.assertQuerysetEqual(
qs,
['Peter Norvig'],
lambda b: b.name
)
# This case used to fail because the ORM couldn't resolve the
# automatically generated annotation name `book__count`
qs = Author.objects.annotate(Count('book')).filter(book__count=2).order_by('name')
self.assertQuerysetEqual(
qs,
['Peter Norvig'],
lambda b: b.name
)
def test_annotate_joins(self):
"""
Test that the base table's join isn't promoted to LOUTER. This could
cause the query generation to fail if there is an exclude() for fk-field
in the query, too. Refs #19087.
"""
qs = Book.objects.annotate(n=Count('pk'))
self.assertIs(qs.query.alias_map['aggregation_regress_book'].join_type, None)
# Check that the query executes without problems.
self.assertEqual(len(qs.exclude(publisher=-1)), 6)
@skipUnlessAnyDBFeature('allows_group_by_pk', 'allows_group_by_selected_pks')
def test_aggregate_duplicate_columns(self):
# Regression test for #17144
results = Author.objects.annotate(num_contacts=Count('book_contact_set'))
# There should only be one GROUP BY clause, for the `id` column.
# `name` and `age` should not be grouped on.
_, _, group_by = results.query.get_compiler(using='default').pre_sql_setup()
self.assertEqual(len(group_by), 1)
self.assertIn('id', group_by[0][0])
self.assertNotIn('name', group_by[0][0])
self.assertNotIn('age', group_by[0][0])
# Ensure that we get correct results.
self.assertEqual(
[(a.name, a.num_contacts) for a in results.order_by('name')],
[
('Adrian Holovaty', 1),
('Brad Dayley', 1),
('Jacob Kaplan-Moss', 0),
('James Bennett', 1),
('Jeffrey Forcier', 1),
('Paul Bissex', 0),
('Peter Norvig', 2),
('Stuart Russell', 0),
('Wesley J. Chun', 0),
]
)
@skipUnlessAnyDBFeature('allows_group_by_pk', 'allows_group_by_selected_pks')
def test_aggregate_duplicate_columns_only(self):
# Works with only() too.
results = Author.objects.only('id', 'name').annotate(num_contacts=Count('book_contact_set'))
_, _, grouping = results.query.get_compiler(using='default').pre_sql_setup()
self.assertEqual(len(grouping), 1)
self.assertIn('id', grouping[0][0])
self.assertNotIn('name', grouping[0][0])
self.assertNotIn('age', grouping[0][0])
# Ensure that we get correct results.
self.assertEqual(
[(a.name, a.num_contacts) for a in results.order_by('name')],
[
('Adrian Holovaty', 1),
('Brad Dayley', 1),
('Jacob Kaplan-Moss', 0),
('James Bennett', 1),
('Jeffrey Forcier', 1),
('Paul Bissex', 0),
('Peter Norvig', 2),
('Stuart Russell', 0),
('Wesley J. Chun', 0),
]
)
@skipUnlessAnyDBFeature('allows_group_by_pk', 'allows_group_by_selected_pks')
def test_aggregate_duplicate_columns_select_related(self):
# And select_related()
results = Book.objects.select_related('contact').annotate(
num_authors=Count('authors'))
_, _, grouping = results.query.get_compiler(using='default').pre_sql_setup()
# In the case of `group_by_selected_pks` we also group by contact.id because of the select_related.
self.assertEqual(len(grouping), 1 if connection.features.allows_group_by_pk else 2)
self.assertIn('id', grouping[0][0])
self.assertNotIn('name', grouping[0][0])
self.assertNotIn('contact', grouping[0][0])
# Ensure that we get correct results.
self.assertEqual(
[(b.name, b.num_authors) for b in results.order_by('name')],
[
('Artificial Intelligence: A Modern Approach', 2),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1),
('Practical Django Projects', 1),
('Python Web Development with Django', 3),
('Sams Teach Yourself Django in 24 Hours', 1),
('The Definitive Guide to Django: Web Development Done Right', 2)
]
)
def test_reverse_join_trimming(self):
qs = Author.objects.annotate(Count('book_contact_set__contact'))
self.assertIn(' JOIN ', str(qs.query))
def test_aggregation_with_generic_reverse_relation(self):
"""
Regression test for #10870: Aggregates with joins ignore extra
filters provided by setup_joins
tests aggregations with generic reverse relations
"""
django_book = Book.objects.get(name='Practical Django Projects')
ItemTag.objects.create(object_id=django_book.id, tag='intermediate',
content_type=ContentType.objects.get_for_model(django_book))
ItemTag.objects.create(object_id=django_book.id, tag='django',
content_type=ContentType.objects.get_for_model(django_book))
# Assign a tag to model with same PK as the book above. If the JOIN
# used in aggregation doesn't have content type as part of the
# condition the annotation will also count the 'hi mom' tag for b.
wmpk = WithManualPK.objects.create(id=django_book.pk)
ItemTag.objects.create(object_id=wmpk.id, tag='hi mom',
content_type=ContentType.objects.get_for_model(wmpk))
ai_book = Book.objects.get(name__startswith='Paradigms of Artificial Intelligence')
ItemTag.objects.create(object_id=ai_book.id, tag='intermediate',
content_type=ContentType.objects.get_for_model(ai_book))
self.assertEqual(Book.objects.aggregate(Count('tags')), {'tags__count': 3})
results = Book.objects.annotate(Count('tags')).order_by('-tags__count', 'name')
self.assertEqual(
[(b.name, b.tags__count) for b in results],
[
('Practical Django Projects', 2),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1),
('Artificial Intelligence: A Modern Approach', 0),
('Python Web Development with Django', 0),
('Sams Teach Yourself Django in 24 Hours', 0),
('The Definitive Guide to Django: Web Development Done Right', 0)
]
)
def test_negated_aggregation(self):
expected_results = Author.objects.exclude(
pk__in=Author.objects.annotate(book_cnt=Count('book')).filter(book_cnt=2)
).order_by('name')
expected_results = [a.name for a in expected_results]
qs = Author.objects.annotate(book_cnt=Count('book')).exclude(
Q(book_cnt=2), Q(book_cnt=2)).order_by('name')
self.assertQuerysetEqual(
qs,
expected_results,
lambda b: b.name
)
expected_results = Author.objects.exclude(
pk__in=Author.objects.annotate(book_cnt=Count('book')).filter(book_cnt=2)
).order_by('name')
expected_results = [a.name for a in expected_results]
qs = Author.objects.annotate(book_cnt=Count('book')).exclude(Q(book_cnt=2) | Q(book_cnt=2)).order_by('name')
self.assertQuerysetEqual(
qs,
expected_results,
lambda b: b.name
)
def test_name_filters(self):
qs = Author.objects.annotate(Count('book')).filter(
Q(book__count__exact=2) | Q(name='Adrian Holovaty')
).order_by('name')
self.assertQuerysetEqual(
qs,
['Adrian Holovaty', 'Peter Norvig'],
lambda b: b.name
)
def test_name_expressions(self):
# Test that aggregates are spotted correctly from F objects.
# Note that Adrian's age is 34 in the fixtures, and he has one book
# so both conditions match one author.
qs = Author.objects.annotate(Count('book')).filter(
Q(name='Peter Norvig') | Q(age=F('book__count') + 33)
).order_by('name')
self.assertQuerysetEqual(
qs,
['Adrian Holovaty', 'Peter Norvig'],
lambda b: b.name
)
def test_ticket_11293(self):
q1 = Q(price__gt=50)
q2 = Q(authors__count__gt=1)
query = Book.objects.annotate(Count('authors')).filter(
q1 | q2).order_by('pk')
self.assertQuerysetEqual(
query, [1, 4, 5, 6],
lambda b: b.pk)
def test_ticket_11293_q_immutable(self):
"""
Check that splitting a q object to parts for where/having doesn't alter
the original q-object.
"""
q1 = Q(isbn='')
q2 = Q(authors__count__gt=1)
query = Book.objects.annotate(Count('authors'))
query.filter(q1 | q2)
self.assertEqual(len(q2.children), 1)
def test_fobj_group_by(self):
"""
Check that an F() object referring to related column works correctly
in group by.
"""
qs = Book.objects.annotate(
acount=Count('authors')
).filter(
acount=F('publisher__num_awards')
)
self.assertQuerysetEqual(
qs, ['Sams Teach Yourself Django in 24 Hours'],
lambda b: b.name)
def test_annotate_reserved_word(self):
"""
Regression #18333 - Ensure annotated column name is properly quoted.
"""
vals = Book.objects.annotate(select=Count('authors__id')).aggregate(Sum('select'), Avg('select'))
self.assertEqual(vals, {
'select__sum': 10,
'select__avg': Approximate(1.666, places=2),
})
def test_annotate_on_relation(self):
book = Book.objects.annotate(avg_price=Avg('price'), publisher_name=F('publisher__name')).get(pk=self.b1.pk)
self.assertEqual(book.avg_price, 30.00)
self.assertEqual(book.publisher_name, "Apress")
def test_aggregate_on_relation(self):
# A query with an existing annotation aggregation on a relation should
# succeed.
qs = Book.objects.annotate(avg_price=Avg('price')).aggregate(
publisher_awards=Sum('publisher__num_awards')
)
self.assertEqual(qs['publisher_awards'], 30)
def test_annotate_distinct_aggregate(self):
# There are three books with rating of 4.0 and two of the books have
# the same price. Hence, the distinct removes one rating of 4.0
# from the results.
vals1 = Book.objects.values('rating', 'price').distinct().aggregate(result=Sum('rating'))
vals2 = Book.objects.aggregate(result=Sum('rating') - Value(4.0))
self.assertEqual(vals1, vals2)
class JoinPromotionTests(TestCase):
def test_ticket_21150(self):
b = Bravo.objects.create()
c = Charlie.objects.create(bravo=b)
qs = Charlie.objects.select_related('alfa').annotate(Count('bravo__charlie'))
self.assertQuerysetEqual(
qs, [c], lambda x: x)
self.assertIs(qs[0].alfa, None)
a = Alfa.objects.create()
c.alfa = a
c.save()
# Force re-evaluation
qs = qs.all()
self.assertQuerysetEqual(
qs, [c], lambda x: x)
self.assertEqual(qs[0].alfa, a)
def test_existing_join_not_promoted(self):
# No promotion for existing joins
qs = Charlie.objects.filter(alfa__name__isnull=False).annotate(Count('alfa__name'))
self.assertIn(' INNER JOIN ', str(qs.query))
# Also, the existing join is unpromoted when doing filtering for already
# promoted join.
qs = Charlie.objects.annotate(Count('alfa__name')).filter(alfa__name__isnull=False)
self.assertIn(' INNER JOIN ', str(qs.query))
# But, as the join is nullable first use by annotate will be LOUTER
qs = Charlie.objects.annotate(Count('alfa__name'))
self.assertIn(' LEFT OUTER JOIN ', str(qs.query))
def test_non_nullable_fk_not_promoted(self):
qs = Book.objects.annotate(Count('contact__name'))
self.assertIn(' INNER JOIN ', str(qs.query))
class SelfReferentialFKTests(TestCase):
def test_ticket_24748(self):
t1 = SelfRefFK.objects.create(name='t1')
SelfRefFK.objects.create(name='t2', parent=t1)
SelfRefFK.objects.create(name='t3', parent=t1)
self.assertQuerysetEqual(
SelfRefFK.objects.annotate(num_children=Count('children')).order_by('name'),
[('t1', 2), ('t2', 0), ('t3', 0)],
lambda x: (x.name, x.num_children)
)
|
firebitsbr/raspberry_pwn | refs/heads/master | src/pentest/sqlmap/lib/core/bigarray.py | 5 | #!/usr/bin/env python
"""
Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
try:
import cPickle as pickle
except:
import pickle
import os
import tempfile
from lib.core.settings import BIGARRAY_CHUNK_LENGTH
class Cache(object):
"""
Auxiliary class used for storing cached chunks
"""
def __init__(self, index, data, dirty):
self.index = index
self.data = data
self.dirty = dirty
class BigArray(list):
"""
List-like class used for storing large amounts of data (disk cached)
"""
def __init__(self):
self.chunks = [[]]
self.cache = None
self.length = 0
self.filenames = set()
def append(self, value):
self.chunks[-1].append(value)
if len(self.chunks[-1]) >= BIGARRAY_CHUNK_LENGTH:
filename = self._dump(self.chunks[-1])
del(self.chunks[-1][:])
self.chunks[-1] = filename
self.chunks.append([])
def extend(self, value):
for _ in value:
self.append(_)
def pop(self):
if len(self.chunks[-1]) < 1:
self.chunks.pop()
with open(self.chunks[-1], "rb") as fp:
self.chunks[-1] = pickle.load(fp)
return self.chunks[-1].pop()
def index(self, value):
for index in xrange(len(self)):
if self[index] == value:
return index
return ValueError, "%s is not in list" % value
def _dump(self, value):
handle, filename = tempfile.mkstemp(prefix="sqlmapba-")
self.filenames.add(filename)
os.close(handle)
with open(filename, "w+b") as fp:
pickle.dump(value, fp, pickle.HIGHEST_PROTOCOL)
return filename
def _checkcache(self, index):
if (self.cache and self.cache.index != index and self.cache.dirty):
filename = self._dump(self.cache.data)
self.chunks[self.cache.index] = filename
if not (self.cache and self.cache.index == index):
with open(self.chunks[index], "rb") as fp:
self.cache = Cache(index, pickle.load(fp), False)
def __getslice__(self, i, j):
retval = BigArray()
i = max(0, len(self) + i if i < 0 else i)
j = min(len(self), len(self) + j if j < 0 else j)
for _ in xrange(i, j):
retval.append(self[_])
return retval
def __getitem__(self, y):
if y < 0:
y += len(self)
index = y / BIGARRAY_CHUNK_LENGTH
offset = y % BIGARRAY_CHUNK_LENGTH
chunk = self.chunks[index]
if isinstance(chunk, list):
return chunk[offset]
else:
self._checkcache(index)
return self.cache.data[offset]
def __setitem__(self, y, value):
index = y / BIGARRAY_CHUNK_LENGTH
offset = y % BIGARRAY_CHUNK_LENGTH
chunk = self.chunks[index]
if isinstance(chunk, list):
chunk[offset] = value
else:
self._checkcache(index)
self.cache.data[offset] = value
self.cache.dirty = True
def __repr__(self):
return "%s%s" % ("..." if len(self.chunks) > 1 else "", self.chunks[-1].__repr__())
def __iter__(self):
for i in xrange(len(self)):
yield self[i]
def __len__(self):
return len(self.chunks[-1]) if len(self.chunks) == 1 else (len(self.chunks) - 1) * BIGARRAY_CHUNK_LENGTH + len(self.chunks[-1])
def __del__(self):
for filename in self.filenames:
try:
os.remove(filename)
except:
pass
|
googleapis/googleapis-gen | refs/heads/master | google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/enums/types/external_conversion_source.py | 1 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v8.enums',
marshal='google.ads.googleads.v8',
manifest={
'ExternalConversionSourceEnum',
},
)
class ExternalConversionSourceEnum(proto.Message):
r"""Container for enum describing the external conversion source
that is associated with a ConversionAction.
"""
class ExternalConversionSource(proto.Enum):
r"""The external conversion source that is associated with a
ConversionAction.
"""
UNSPECIFIED = 0
UNKNOWN = 1
WEBPAGE = 2
ANALYTICS = 3
UPLOAD = 4
AD_CALL_METRICS = 5
WEBSITE_CALL_METRICS = 6
STORE_VISITS = 7
ANDROID_IN_APP = 8
IOS_IN_APP = 9
IOS_FIRST_OPEN = 10
APP_UNSPECIFIED = 11
ANDROID_FIRST_OPEN = 12
UPLOAD_CALLS = 13
FIREBASE = 14
CLICK_TO_CALL = 15
SALESFORCE = 16
STORE_SALES_CRM = 17
STORE_SALES_PAYMENT_NETWORK = 18
GOOGLE_PLAY = 19
THIRD_PARTY_APP_ANALYTICS = 20
GOOGLE_ATTRIBUTION = 21
STORE_SALES_DIRECT_UPLOAD = 23
STORE_SALES = 24
SEARCH_ADS_360 = 25
GOOGLE_HOSTED = 27
FLOODLIGHT = 29
ANALYTICS_SEARCH_ADS_360 = 31
FIREBASE_SEARCH_ADS_360 = 33
__all__ = tuple(sorted(__protobuf__.manifest))
|
rcos/Observatory | refs/heads/master | observatory/lib/dulwich/repo.py | 2 | # repo.py -- For dealing with git repositories.
# Copyright (C) 2007 James Westby <[email protected]>
# Copyright (C) 2008-2009 Jelmer Vernooij <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2
# of the License or (at your option) any later version of
# the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
"""Repository access."""
from cStringIO import StringIO
import errno
import os
from lib.dulwich.errors import (
MissingCommitError,
NoIndexPresent,
NotBlobError,
NotCommitError,
NotGitRepository,
NotTreeError,
NotTagError,
PackedRefsException,
CommitError,
RefFormatError,
)
from lib.dulwich.file import (
ensure_dir_exists,
GitFile,
)
from lib.dulwich.object_store import (
DiskObjectStore,
MemoryObjectStore,
)
from lib.dulwich.objects import (
Blob,
Commit,
ShaFile,
Tag,
Tree,
hex_to_sha,
)
import warnings
OBJECTDIR = 'objects'
SYMREF = 'ref: '
REFSDIR = 'refs'
REFSDIR_TAGS = 'tags'
REFSDIR_HEADS = 'heads'
INDEX_FILENAME = "index"
BASE_DIRECTORIES = [
["branches"],
[REFSDIR],
[REFSDIR, REFSDIR_TAGS],
[REFSDIR, REFSDIR_HEADS],
["hooks"],
["info"]
]
def read_info_refs(f):
ret = {}
for l in f.readlines():
(sha, name) = l.rstrip("\r\n").split("\t", 1)
ret[name] = sha
return ret
def check_ref_format(refname):
"""Check if a refname is correctly formatted.
Implements all the same rules as git-check-ref-format[1].
[1] http://www.kernel.org/pub/software/scm/git/docs/git-check-ref-format.html
:param refname: The refname to check
:return: True if refname is valid, False otherwise
"""
# These could be combined into one big expression, but are listed separately
# to parallel [1].
if '/.' in refname or refname.startswith('.'):
return False
if '/' not in refname:
return False
if '..' in refname:
return False
for c in refname:
if ord(c) < 040 or c in '\177 ~^:?*[':
return False
if refname[-1] in '/.':
return False
if refname.endswith('.lock'):
return False
if '@{' in refname:
return False
if '\\' in refname:
return False
return True
class RefsContainer(object):
"""A container for refs."""
def set_ref(self, name, other):
warnings.warn("RefsContainer.set_ref() is deprecated."
"Use set_symblic_ref instead.",
category=DeprecationWarning, stacklevel=2)
return self.set_symbolic_ref(name, other)
def set_symbolic_ref(self, name, other):
"""Make a ref point at another ref.
:param name: Name of the ref to set
:param other: Name of the ref to point at
"""
raise NotImplementedError(self.set_symbolic_ref)
def get_packed_refs(self):
"""Get contents of the packed-refs file.
:return: Dictionary mapping ref names to SHA1s
:note: Will return an empty dictionary when no packed-refs file is
present.
"""
raise NotImplementedError(self.get_packed_refs)
def get_peeled(self, name):
"""Return the cached peeled value of a ref, if available.
:param name: Name of the ref to peel
:return: The peeled value of the ref. If the ref is known not point to a
tag, this will be the SHA the ref refers to. If the ref may point to
a tag, but no cached information is available, None is returned.
"""
return None
def import_refs(self, base, other):
for name, value in other.iteritems():
self["%s/%s" % (base, name)] = value
def allkeys(self):
"""All refs present in this container."""
raise NotImplementedError(self.allkeys)
def keys(self, base=None):
"""Refs present in this container.
:param base: An optional base to return refs under.
:return: An unsorted set of valid refs in this container, including
packed refs.
"""
if base is not None:
return self.subkeys(base)
else:
return self.allkeys()
def subkeys(self, base):
"""Refs present in this container under a base.
:param base: The base to return refs under.
:return: A set of valid refs in this container under the base; the base
prefix is stripped from the ref names returned.
"""
keys = set()
base_len = len(base) + 1
for refname in self.allkeys():
if refname.startswith(base):
keys.add(refname[base_len:])
return keys
def as_dict(self, base=None):
"""Return the contents of this container as a dictionary.
"""
ret = {}
keys = self.keys(base)
if base is None:
base = ""
for key in keys:
try:
ret[key] = self[("%s/%s" % (base, key)).strip("/")]
except KeyError:
continue # Unable to resolve
return ret
def _check_refname(self, name):
"""Ensure a refname is valid and lives in refs or is HEAD.
HEAD is not a valid refname according to git-check-ref-format, but this
class needs to be able to touch HEAD. Also, check_ref_format expects
refnames without the leading 'refs/', but this class requires that
so it cannot touch anything outside the refs dir (or HEAD).
:param name: The name of the reference.
:raises KeyError: if a refname is not HEAD or is otherwise not valid.
"""
if name == 'HEAD':
return
if not name.startswith('refs/') or not check_ref_format(name[5:]):
raise RefFormatError(name)
def read_ref(self, refname):
"""Read a reference without following any references.
:param refname: The name of the reference
:return: The contents of the ref file, or None if it does
not exist.
"""
contents = self.read_loose_ref(refname)
if not contents:
contents = self.get_packed_refs().get(refname, None)
return contents
def read_loose_ref(self, name):
"""Read a loose reference and return its contents.
:param name: the refname to read
:return: The contents of the ref file, or None if it does
not exist.
"""
raise NotImplementedError(self.read_loose_ref)
def _follow(self, name):
"""Follow a reference name.
:return: a tuple of (refname, sha), where refname is the name of the
last reference in the symbolic reference chain
"""
self._check_refname(name)
contents = SYMREF + name
depth = 0
while contents.startswith(SYMREF):
refname = contents[len(SYMREF):]
contents = self.read_ref(refname)
if not contents:
break
depth += 1
if depth > 5:
raise KeyError(name)
return refname, contents
def __contains__(self, refname):
if self.read_ref(refname):
return True
return False
def __getitem__(self, name):
"""Get the SHA1 for a reference name.
This method follows all symbolic references.
"""
_, sha = self._follow(name)
if sha is None:
raise KeyError(name)
return sha
def set_if_equals(self, name, old_ref, new_ref):
"""Set a refname to new_ref only if it currently equals old_ref.
This method follows all symbolic references if applicable for the
subclass, and can be used to perform an atomic compare-and-swap
operation.
:param name: The refname to set.
:param old_ref: The old sha the refname must refer to, or None to set
unconditionally.
:param new_ref: The new sha the refname will refer to.
:return: True if the set was successful, False otherwise.
"""
raise NotImplementedError(self.set_if_equals)
def add_if_new(self, name, ref):
"""Add a new reference only if it does not already exist."""
raise NotImplementedError(self.add_if_new)
def __setitem__(self, name, ref):
"""Set a reference name to point to the given SHA1.
This method follows all symbolic references if applicable for the
subclass.
:note: This method unconditionally overwrites the contents of a
reference. To update atomically only if the reference has not
changed, use set_if_equals().
:param name: The refname to set.
:param ref: The new sha the refname will refer to.
"""
self.set_if_equals(name, None, ref)
def remove_if_equals(self, name, old_ref):
"""Remove a refname only if it currently equals old_ref.
This method does not follow symbolic references, even if applicable for
the subclass. It can be used to perform an atomic compare-and-delete
operation.
:param name: The refname to delete.
:param old_ref: The old sha the refname must refer to, or None to delete
unconditionally.
:return: True if the delete was successful, False otherwise.
"""
raise NotImplementedError(self.remove_if_equals)
def __delitem__(self, name):
"""Remove a refname.
This method does not follow symbolic references, even if applicable for
the subclass.
:note: This method unconditionally deletes the contents of a reference.
To delete atomically only if the reference has not changed, use
remove_if_equals().
:param name: The refname to delete.
"""
self.remove_if_equals(name, None)
class DictRefsContainer(RefsContainer):
"""RefsContainer backed by a simple dict.
This container does not support symbolic or packed references and is not
threadsafe.
"""
def __init__(self, refs):
self._refs = refs
self._peeled = {}
def allkeys(self):
return self._refs.keys()
def read_loose_ref(self, name):
return self._refs.get(name, None)
def get_packed_refs(self):
return {}
def set_symbolic_ref(self, name, other):
self._refs[name] = SYMREF + other
def set_if_equals(self, name, old_ref, new_ref):
if old_ref is not None and self._refs.get(name, None) != old_ref:
return False
realname, _ = self._follow(name)
self._refs[realname] = new_ref
return True
def add_if_new(self, name, ref):
if name in self._refs:
return False
self._refs[name] = ref
return True
def remove_if_equals(self, name, old_ref):
if old_ref is not None and self._refs.get(name, None) != old_ref:
return False
del self._refs[name]
return True
def get_peeled(self, name):
return self._peeled.get(name)
def _update(self, refs):
"""Update multiple refs; intended only for testing."""
# TODO(dborowitz): replace this with a public function that uses
# set_if_equal.
self._refs.update(refs)
def _update_peeled(self, peeled):
"""Update cached peeled refs; intended only for testing."""
self._peeled.update(peeled)
class DiskRefsContainer(RefsContainer):
"""Refs container that reads refs from disk."""
def __init__(self, path):
self.path = path
self._packed_refs = None
self._peeled_refs = None
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self.path)
def subkeys(self, base):
keys = set()
path = self.refpath(base)
for root, dirs, files in os.walk(path):
dir = root[len(path):].strip(os.path.sep).replace(os.path.sep, "/")
for filename in files:
refname = ("%s/%s" % (dir, filename)).strip("/")
# check_ref_format requires at least one /, so we prepend the
# base before calling it.
if check_ref_format("%s/%s" % (base, refname)):
keys.add(refname)
for key in self.get_packed_refs():
if key.startswith(base):
keys.add(key[len(base):].strip("/"))
return keys
def allkeys(self):
keys = set()
if os.path.exists(self.refpath("HEAD")):
keys.add("HEAD")
path = self.refpath("")
for root, dirs, files in os.walk(self.refpath("refs")):
dir = root[len(path):].strip(os.path.sep).replace(os.path.sep, "/")
for filename in files:
refname = ("%s/%s" % (dir, filename)).strip("/")
if check_ref_format(refname):
keys.add(refname)
keys.update(self.get_packed_refs())
return keys
def refpath(self, name):
"""Return the disk path of a ref.
"""
if os.path.sep != "/":
name = name.replace("/", os.path.sep)
return os.path.join(self.path, name)
def get_packed_refs(self):
"""Get contents of the packed-refs file.
:return: Dictionary mapping ref names to SHA1s
:note: Will return an empty dictionary when no packed-refs file is
present.
"""
# TODO: invalidate the cache on repacking
if self._packed_refs is None:
# set both to empty because we want _peeled_refs to be
# None if and only if _packed_refs is also None.
self._packed_refs = {}
self._peeled_refs = {}
path = os.path.join(self.path, 'packed-refs')
try:
f = GitFile(path, 'rb')
except IOError, e:
if e.errno == errno.ENOENT:
return {}
raise
try:
first_line = iter(f).next().rstrip()
if (first_line.startswith("# pack-refs") and " peeled" in
first_line):
for sha, name, peeled in read_packed_refs_with_peeled(f):
self._packed_refs[name] = sha
if peeled:
self._peeled_refs[name] = peeled
else:
f.seek(0)
for sha, name in read_packed_refs(f):
self._packed_refs[name] = sha
finally:
f.close()
return self._packed_refs
def get_peeled(self, name):
"""Return the cached peeled value of a ref, if available.
:param name: Name of the ref to peel
:return: The peeled value of the ref. If the ref is known not point to a
tag, this will be the SHA the ref refers to. If the ref may point to
a tag, but no cached information is available, None is returned.
"""
self.get_packed_refs()
if self._peeled_refs is None or name not in self._packed_refs:
# No cache: no peeled refs were read, or this ref is loose
return None
if name in self._peeled_refs:
return self._peeled_refs[name]
else:
# Known not peelable
return self[name]
def read_loose_ref(self, name):
"""Read a reference file and return its contents.
If the reference file a symbolic reference, only read the first line of
the file. Otherwise, only read the first 40 bytes.
:param name: the refname to read, relative to refpath
:return: The contents of the ref file, or None if the file does not
exist.
:raises IOError: if any other error occurs
"""
filename = self.refpath(name)
try:
f = GitFile(filename, 'rb')
try:
header = f.read(len(SYMREF))
if header == SYMREF:
# Read only the first line
return header + iter(f).next().rstrip("\r\n")
else:
# Read only the first 40 bytes
return header + f.read(40-len(SYMREF))
finally:
f.close()
except IOError, e:
if e.errno == errno.ENOENT:
return None
raise
def _remove_packed_ref(self, name):
if self._packed_refs is None:
return
filename = os.path.join(self.path, 'packed-refs')
# reread cached refs from disk, while holding the lock
f = GitFile(filename, 'wb')
try:
self._packed_refs = None
self.get_packed_refs()
if name not in self._packed_refs:
return
del self._packed_refs[name]
if name in self._peeled_refs:
del self._peeled_refs[name]
write_packed_refs(f, self._packed_refs, self._peeled_refs)
f.close()
finally:
f.abort()
def set_symbolic_ref(self, name, other):
"""Make a ref point at another ref.
:param name: Name of the ref to set
:param other: Name of the ref to point at
"""
self._check_refname(name)
self._check_refname(other)
filename = self.refpath(name)
try:
f = GitFile(filename, 'wb')
try:
f.write(SYMREF + other + '\n')
except (IOError, OSError):
f.abort()
raise
finally:
f.close()
def set_if_equals(self, name, old_ref, new_ref):
"""Set a refname to new_ref only if it currently equals old_ref.
This method follows all symbolic references, and can be used to perform
an atomic compare-and-swap operation.
:param name: The refname to set.
:param old_ref: The old sha the refname must refer to, or None to set
unconditionally.
:param new_ref: The new sha the refname will refer to.
:return: True if the set was successful, False otherwise.
"""
try:
realname, _ = self._follow(name)
except KeyError:
realname = name
filename = self.refpath(realname)
ensure_dir_exists(os.path.dirname(filename))
f = GitFile(filename, 'wb')
try:
if old_ref is not None:
try:
# read again while holding the lock
orig_ref = self.read_loose_ref(realname)
if orig_ref is None:
orig_ref = self.get_packed_refs().get(realname, None)
if orig_ref != old_ref:
f.abort()
return False
except (OSError, IOError):
f.abort()
raise
try:
f.write(new_ref+"\n")
except (OSError, IOError):
f.abort()
raise
finally:
f.close()
return True
def add_if_new(self, name, ref):
"""Add a new reference only if it does not already exist.
This method follows symrefs, and only ensures that the last ref in the
chain does not exist.
:param name: The refname to set.
:param ref: The new sha the refname will refer to.
:return: True if the add was successful, False otherwise.
"""
try:
realname, contents = self._follow(name)
if contents is not None:
return False
except KeyError:
realname = name
self._check_refname(realname)
filename = self.refpath(realname)
ensure_dir_exists(os.path.dirname(filename))
f = GitFile(filename, 'wb')
try:
if os.path.exists(filename) or name in self.get_packed_refs():
f.abort()
return False
try:
f.write(ref+"\n")
except (OSError, IOError):
f.abort()
raise
finally:
f.close()
return True
def remove_if_equals(self, name, old_ref):
"""Remove a refname only if it currently equals old_ref.
This method does not follow symbolic references. It can be used to
perform an atomic compare-and-delete operation.
:param name: The refname to delete.
:param old_ref: The old sha the refname must refer to, or None to delete
unconditionally.
:return: True if the delete was successful, False otherwise.
"""
self._check_refname(name)
filename = self.refpath(name)
ensure_dir_exists(os.path.dirname(filename))
f = GitFile(filename, 'wb')
try:
if old_ref is not None:
orig_ref = self.read_loose_ref(name)
if orig_ref is None:
orig_ref = self.get_packed_refs().get(name, None)
if orig_ref != old_ref:
return False
# may only be packed
try:
os.remove(filename)
except OSError, e:
if e.errno != errno.ENOENT:
raise
self._remove_packed_ref(name)
finally:
# never write, we just wanted the lock
f.abort()
return True
def _split_ref_line(line):
"""Split a single ref line into a tuple of SHA1 and name."""
fields = line.rstrip("\n").split(" ")
if len(fields) != 2:
raise PackedRefsException("invalid ref line '%s'" % line)
sha, name = fields
try:
hex_to_sha(sha)
except (AssertionError, TypeError), e:
raise PackedRefsException(e)
if not check_ref_format(name):
raise PackedRefsException("invalid ref name '%s'" % name)
return (sha, name)
def read_packed_refs(f):
"""Read a packed refs file.
:param f: file-like object to read from
:return: Iterator over tuples with SHA1s and ref names.
"""
for l in f:
if l[0] == "#":
# Comment
continue
if l[0] == "^":
raise PackedRefsException(
"found peeled ref in packed-refs without peeled")
yield _split_ref_line(l)
def read_packed_refs_with_peeled(f):
"""Read a packed refs file including peeled refs.
Assumes the "# pack-refs with: peeled" line was already read. Yields tuples
with ref names, SHA1s, and peeled SHA1s (or None).
:param f: file-like object to read from, seek'ed to the second line
"""
last = None
for l in f:
if l[0] == "#":
continue
l = l.rstrip("\r\n")
if l[0] == "^":
if not last:
raise PackedRefsException("unexpected peeled ref line")
try:
hex_to_sha(l[1:])
except (AssertionError, TypeError), e:
raise PackedRefsException(e)
sha, name = _split_ref_line(last)
last = None
yield (sha, name, l[1:])
else:
if last:
sha, name = _split_ref_line(last)
yield (sha, name, None)
last = l
if last:
sha, name = _split_ref_line(last)
yield (sha, name, None)
def write_packed_refs(f, packed_refs, peeled_refs=None):
"""Write a packed refs file.
:param f: empty file-like object to write to
:param packed_refs: dict of refname to sha of packed refs to write
:param peeled_refs: dict of refname to peeled value of sha
"""
if peeled_refs is None:
peeled_refs = {}
else:
f.write('# pack-refs with: peeled\n')
for refname in sorted(packed_refs.iterkeys()):
f.write('%s %s\n' % (packed_refs[refname], refname))
if refname in peeled_refs:
f.write('^%s\n' % peeled_refs[refname])
class BaseRepo(object):
"""Base class for a git repository.
:ivar object_store: Dictionary-like object for accessing
the objects
:ivar refs: Dictionary-like object with the refs in this repository
"""
def __init__(self, object_store, refs):
self.object_store = object_store
self.refs = refs
def _init_files(self, bare):
"""Initialize a default set of named files."""
self._put_named_file('description', "Unnamed repository")
self._put_named_file('config', ('[core]\n'
'repositoryformatversion = 0\n'
'filemode = true\n'
'bare = ' + str(bare).lower() + '\n'
'logallrefupdates = true\n'))
self._put_named_file(os.path.join('info', 'exclude'), '')
def get_named_file(self, path):
"""Get a file from the control dir with a specific name.
Although the filename should be interpreted as a filename relative to
the control dir in a disk-based Repo, the object returned need not be
pointing to a file in that location.
:param path: The path to the file, relative to the control dir.
:return: An open file object, or None if the file does not exist.
"""
raise NotImplementedError(self.get_named_file)
def _put_named_file(self, path, contents):
"""Write a file to the control dir with the given name and contents.
:param path: The path to the file, relative to the control dir.
:param contents: A string to write to the file.
"""
raise NotImplementedError(self._put_named_file)
def open_index(self):
"""Open the index for this repository.
:raises NoIndexPresent: If no index is present
:return: Index instance
"""
raise NotImplementedError(self.open_index)
def fetch(self, target, determine_wants=None, progress=None):
"""Fetch objects into another repository.
:param target: The target repository
:param determine_wants: Optional function to determine what refs to
fetch.
:param progress: Optional progress function
"""
if determine_wants is None:
determine_wants = lambda heads: heads.values()
target.object_store.add_objects(
self.fetch_objects(determine_wants, target.get_graph_walker(),
progress))
return self.get_refs()
def fetch_objects(self, determine_wants, graph_walker, progress,
get_tagged=None):
"""Fetch the missing objects required for a set of revisions.
:param determine_wants: Function that takes a dictionary with heads
and returns the list of heads to fetch.
:param graph_walker: Object that can iterate over the list of revisions
to fetch and has an "ack" method that will be called to acknowledge
that a revision is present.
:param progress: Simple progress function that will be called with
updated progress strings.
:param get_tagged: Function that returns a dict of pointed-to sha -> tag
sha for including tags.
:return: iterator over objects, with __len__ implemented
"""
wants = determine_wants(self.get_refs())
if wants is None:
# TODO(dborowitz): find a way to short-circuit that doesn't change
# this interface.
return None
haves = self.object_store.find_common_revisions(graph_walker)
return self.object_store.iter_shas(
self.object_store.find_missing_objects(haves, wants, progress,
get_tagged))
def get_graph_walker(self, heads=None):
if heads is None:
heads = self.refs.as_dict('refs/heads').values()
return self.object_store.get_graph_walker(heads)
def ref(self, name):
"""Return the SHA1 a ref is pointing to."""
return self.refs[name]
def get_refs(self):
"""Get dictionary with all refs."""
return self.refs.as_dict()
def head(self):
"""Return the SHA1 pointed at by HEAD."""
return self.refs['HEAD']
def _get_object(self, sha, cls):
assert len(sha) in (20, 40)
ret = self.get_object(sha)
if not isinstance(ret, cls):
if cls is Commit:
raise NotCommitError(ret)
elif cls is Blob:
raise NotBlobError(ret)
elif cls is Tree:
raise NotTreeError(ret)
elif cls is Tag:
raise NotTagError(ret)
else:
raise Exception("Type invalid: %r != %r" % (
ret.type_name, cls.type_name))
return ret
def get_object(self, sha):
return self.object_store[sha]
def get_parents(self, sha):
return self.commit(sha).parents
def get_config(self):
import ConfigParser
p = ConfigParser.RawConfigParser()
p.read(os.path.join(self._controldir, 'config'))
return dict((section, dict(p.items(section)))
for section in p.sections())
def commit(self, sha):
"""Retrieve the commit with a particular SHA.
:param sha: SHA of the commit to retrieve
:raise NotCommitError: If the SHA provided doesn't point at a Commit
:raise KeyError: If the SHA provided didn't exist
:return: A `Commit` object
"""
warnings.warn("Repo.commit(sha) is deprecated. Use Repo[sha] instead.",
category=DeprecationWarning, stacklevel=2)
return self._get_object(sha, Commit)
def tree(self, sha):
"""Retrieve the tree with a particular SHA.
:param sha: SHA of the tree to retrieve
:raise NotTreeError: If the SHA provided doesn't point at a Tree
:raise KeyError: If the SHA provided didn't exist
:return: A `Tree` object
"""
warnings.warn("Repo.tree(sha) is deprecated. Use Repo[sha] instead.",
category=DeprecationWarning, stacklevel=2)
return self._get_object(sha, Tree)
def tag(self, sha):
"""Retrieve the tag with a particular SHA.
:param sha: SHA of the tag to retrieve
:raise NotTagError: If the SHA provided doesn't point at a Tag
:raise KeyError: If the SHA provided didn't exist
:return: A `Tag` object
"""
warnings.warn("Repo.tag(sha) is deprecated. Use Repo[sha] instead.",
category=DeprecationWarning, stacklevel=2)
return self._get_object(sha, Tag)
def get_blob(self, sha):
"""Retrieve the blob with a particular SHA.
:param sha: SHA of the blob to retrieve
:raise NotBlobError: If the SHA provided doesn't point at a Blob
:raise KeyError: If the SHA provided didn't exist
:return: A `Blob` object
"""
warnings.warn("Repo.get_blob(sha) is deprecated. Use Repo[sha] "
"instead.", category=DeprecationWarning, stacklevel=2)
return self._get_object(sha, Blob)
def get_peeled(self, ref):
"""Get the peeled value of a ref.
:param ref: The refname to peel.
:return: The fully-peeled SHA1 of a tag object, after peeling all
intermediate tags; if the original ref does not point to a tag, this
will equal the original SHA1.
"""
cached = self.refs.get_peeled(ref)
if cached is not None:
return cached
return self.object_store.peel_sha(self.refs[ref]).id
def revision_history(self, head):
"""Returns a list of the commits reachable from head.
Returns a list of commit objects. the first of which will be the commit
of head, then following theat will be the parents.
Raises NotCommitError if any no commits are referenced, including if the
head parameter isn't the sha of a commit.
XXX: work out how to handle merges.
"""
# We build the list backwards, as parents are more likely to be older
# than children
pending_commits = [head]
history = []
while pending_commits != []:
head = pending_commits.pop(0)
try:
commit = self[head]
except KeyError:
raise MissingCommitError(head)
if type(commit) != Commit:
raise NotCommitError(commit)
if commit in history:
continue
i = 0
for known_commit in history:
if known_commit.commit_time > commit.commit_time:
break
i += 1
history.insert(i, commit)
pending_commits += commit.parents
history.reverse()
return history
def __getitem__(self, name):
if len(name) in (20, 40):
try:
return self.object_store[name]
except KeyError:
pass
try:
return self.object_store[self.refs[name]]
except RefFormatError:
raise KeyError(name)
def __contains__(self, name):
if len(name) in (20, 40):
return name in self.object_store or name in self.refs
else:
return name in self.refs
def __setitem__(self, name, value):
if name.startswith("refs/") or name == "HEAD":
if isinstance(value, ShaFile):
self.refs[name] = value.id
elif isinstance(value, str):
self.refs[name] = value
else:
raise TypeError(value)
else:
raise ValueError(name)
def __delitem__(self, name):
if name.startswith("refs") or name == "HEAD":
del self.refs[name]
raise ValueError(name)
def do_commit(self, message, committer=None,
author=None, commit_timestamp=None,
commit_timezone=None, author_timestamp=None,
author_timezone=None, tree=None, encoding=None):
"""Create a new commit.
:param message: Commit message
:param committer: Committer fullname
:param author: Author fullname (defaults to committer)
:param commit_timestamp: Commit timestamp (defaults to now)
:param commit_timezone: Commit timestamp timezone (defaults to GMT)
:param author_timestamp: Author timestamp (defaults to commit timestamp)
:param author_timezone: Author timestamp timezone
(defaults to commit timestamp timezone)
:param tree: SHA1 of the tree root to use (if not specified the
current index will be committed).
:param encoding: Encoding
:return: New commit SHA1
"""
import time
c = Commit()
if tree is None:
index = self.open_index()
c.tree = index.commit(self.object_store)
else:
if len(tree) != 40:
raise ValueError("tree must be a 40-byte hex sha string")
c.tree = tree
# TODO: Allow username to be missing, and get it from .git/config
if committer is None:
raise ValueError("committer not set")
c.committer = committer
if commit_timestamp is None:
commit_timestamp = time.time()
c.commit_time = int(commit_timestamp)
if commit_timezone is None:
# FIXME: Use current user timezone rather than UTC
commit_timezone = 0
c.commit_timezone = commit_timezone
if author is None:
author = committer
c.author = author
if author_timestamp is None:
author_timestamp = commit_timestamp
c.author_time = int(author_timestamp)
if author_timezone is None:
author_timezone = commit_timezone
c.author_timezone = author_timezone
if encoding is not None:
c.encoding = encoding
c.message = message
try:
old_head = self.refs["HEAD"]
c.parents = [old_head]
self.object_store.add_object(c)
ok = self.refs.set_if_equals("HEAD", old_head, c.id)
except KeyError:
c.parents = []
self.object_store.add_object(c)
ok = self.refs.add_if_new("HEAD", c.id)
if not ok:
# Fail if the atomic compare-and-swap failed, leaving the commit and
# all its objects as garbage.
raise CommitError("HEAD changed during commit")
return c.id
class Repo(BaseRepo):
"""A git repository backed by local disk."""
def __init__(self, root):
if os.path.isdir(os.path.join(root, ".git", OBJECTDIR)):
self.bare = False
self._controldir = os.path.join(root, ".git")
elif (os.path.isdir(os.path.join(root, OBJECTDIR)) and
os.path.isdir(os.path.join(root, REFSDIR))):
self.bare = True
self._controldir = root
else:
raise NotGitRepository(root)
self.path = root
object_store = DiskObjectStore(os.path.join(self.controldir(),
OBJECTDIR))
refs = DiskRefsContainer(self.controldir())
BaseRepo.__init__(self, object_store, refs)
def controldir(self):
"""Return the path of the control directory."""
return self._controldir
def _put_named_file(self, path, contents):
"""Write a file to the control dir with the given name and contents.
:param path: The path to the file, relative to the control dir.
:param contents: A string to write to the file.
"""
path = path.lstrip(os.path.sep)
f = GitFile(os.path.join(self.controldir(), path), 'wb')
try:
f.write(contents)
finally:
f.close()
def get_named_file(self, path):
"""Get a file from the control dir with a specific name.
Although the filename should be interpreted as a filename relative to
the control dir in a disk-based Repo, the object returned need not be
pointing to a file in that location.
:param path: The path to the file, relative to the control dir.
:return: An open file object, or None if the file does not exist.
"""
# TODO(dborowitz): sanitize filenames, since this is used directly by
# the dumb web serving code.
path = path.lstrip(os.path.sep)
try:
return open(os.path.join(self.controldir(), path), 'rb')
except (IOError, OSError), e:
if e.errno == errno.ENOENT:
return None
raise
def index_path(self):
"""Return path to the index file."""
return os.path.join(self.controldir(), INDEX_FILENAME)
def open_index(self):
"""Open the index for this repository."""
from dulwich.index import Index
if not self.has_index():
raise NoIndexPresent()
return Index(self.index_path())
def has_index(self):
"""Check if an index is present."""
# Bare repos must never have index files; non-bare repos may have a
# missing index file, which is treated as empty.
return not self.bare
def stage(self, paths):
"""Stage a set of paths.
:param paths: List of paths, relative to the repository path
"""
from dulwich.index import cleanup_mode
index = self.open_index()
for path in paths:
full_path = os.path.join(self.path, path)
blob = Blob()
try:
st = os.stat(full_path)
except OSError:
# File no longer exists
try:
del index[path]
except KeyError:
pass # Doesn't exist in the index either
else:
f = open(full_path, 'rb')
try:
blob.data = f.read()
finally:
f.close()
self.object_store.add_object(blob)
# XXX: Cleanup some of the other file properties as well?
index[path] = (st.st_ctime, st.st_mtime, st.st_dev, st.st_ino,
cleanup_mode(st.st_mode), st.st_uid, st.st_gid, st.st_size,
blob.id, 0)
index.write()
def __repr__(self):
return "<Repo at %r>" % self.path
@classmethod
def _init_maybe_bare(cls, path, bare):
for d in BASE_DIRECTORIES:
os.mkdir(os.path.join(path, *d))
DiskObjectStore.init(os.path.join(path, OBJECTDIR))
ret = cls(path)
ret.refs.set_symbolic_ref("HEAD", "refs/heads/master")
ret._init_files(bare)
return ret
@classmethod
def init(cls, path, mkdir=False):
if mkdir:
os.mkdir(path)
controldir = os.path.join(path, ".git")
os.mkdir(controldir)
cls._init_maybe_bare(controldir, False)
return cls(path)
@classmethod
def init_bare(cls, path):
return cls._init_maybe_bare(path, True)
create = init_bare
class MemoryRepo(BaseRepo):
"""Repo that stores refs, objects, and named files in memory.
MemoryRepos are always bare: they have no working tree and no index, since
those have a stronger dependency on the filesystem.
"""
def __init__(self):
BaseRepo.__init__(self, MemoryObjectStore(), DictRefsContainer({}))
self._named_files = {}
self.bare = True
def _put_named_file(self, path, contents):
"""Write a file to the control dir with the given name and contents.
:param path: The path to the file, relative to the control dir.
:param contents: A string to write to the file.
"""
self._named_files[path] = contents
def get_named_file(self, path):
"""Get a file from the control dir with a specific name.
Although the filename should be interpreted as a filename relative to
the control dir in a disk-baked Repo, the object returned need not be
pointing to a file in that location.
:param path: The path to the file, relative to the control dir.
:return: An open file object, or None if the file does not exist.
"""
contents = self._named_files.get(path, None)
if contents is None:
return None
return StringIO(contents)
def open_index(self):
"""Fail to open index for this repo, since it is bare."""
raise NoIndexPresent()
@classmethod
def init_bare(cls, objects, refs):
ret = cls()
for obj in objects:
ret.object_store.add_object(obj)
for refname, sha in refs.iteritems():
ret.refs[refname] = sha
ret._init_files(bare=True)
return ret
|
robhudson/django | refs/heads/master | django/forms/boundfield.py | 135 | from __future__ import unicode_literals
import datetime
from django.forms.utils import flatatt, pretty_name
from django.forms.widgets import Textarea, TextInput
from django.utils import six
from django.utils.encoding import (
force_text, python_2_unicode_compatible, smart_text,
)
from django.utils.html import conditional_escape, format_html, html_safe
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
__all__ = ('BoundField',)
UNSET = object()
@html_safe
@python_2_unicode_compatible
class BoundField(object):
"A Field plus data"
def __init__(self, form, field, name):
self.form = form
self.field = field
self.name = name
self.html_name = form.add_prefix(name)
self.html_initial_name = form.add_initial_prefix(name)
self.html_initial_id = form.add_initial_prefix(self.auto_id)
if self.field.label is None:
self.label = pretty_name(name)
else:
self.label = self.field.label
self.help_text = field.help_text or ''
self._initial_value = UNSET
def __str__(self):
"""Renders this field as an HTML widget."""
if self.field.show_hidden_initial:
return self.as_widget() + self.as_hidden(only_initial=True)
return self.as_widget()
def __iter__(self):
"""
Yields rendered strings that comprise all widgets in this BoundField.
This really is only useful for RadioSelect widgets, so that you can
iterate over individual radio buttons in a template.
"""
id_ = self.field.widget.attrs.get('id') or self.auto_id
attrs = {'id': id_} if id_ else {}
for subwidget in self.field.widget.subwidgets(self.html_name, self.value(), attrs):
yield subwidget
def __len__(self):
return len(list(self.__iter__()))
def __getitem__(self, idx):
# Prevent unnecessary reevaluation when accessing BoundField's attrs
# from templates.
if not isinstance(idx, six.integer_types):
raise TypeError
return list(self.__iter__())[idx]
@property
def errors(self):
"""
Returns an ErrorList for this field. Returns an empty ErrorList
if there are none.
"""
return self.form.errors.get(self.name, self.form.error_class())
def as_widget(self, widget=None, attrs=None, only_initial=False):
"""
Renders the field by rendering the passed widget, adding any HTML
attributes passed as attrs. If no widget is specified, then the
field's default widget will be used.
"""
if not widget:
widget = self.field.widget
if self.field.localize:
widget.is_localized = True
attrs = attrs or {}
if self.field.disabled:
attrs['disabled'] = True
auto_id = self.auto_id
if auto_id and 'id' not in attrs and 'id' not in widget.attrs:
if not only_initial:
attrs['id'] = auto_id
else:
attrs['id'] = self.html_initial_id
if not only_initial:
name = self.html_name
else:
name = self.html_initial_name
return force_text(widget.render(name, self.value(), attrs=attrs))
def as_text(self, attrs=None, **kwargs):
"""
Returns a string of HTML for representing this as an <input type="text">.
"""
return self.as_widget(TextInput(), attrs, **kwargs)
def as_textarea(self, attrs=None, **kwargs):
"Returns a string of HTML for representing this as a <textarea>."
return self.as_widget(Textarea(), attrs, **kwargs)
def as_hidden(self, attrs=None, **kwargs):
"""
Returns a string of HTML for representing this as an <input type="hidden">.
"""
return self.as_widget(self.field.hidden_widget(), attrs, **kwargs)
@property
def data(self):
"""
Returns the data for this BoundField, or None if it wasn't given.
"""
return self.field.widget.value_from_datadict(self.form.data, self.form.files, self.html_name)
def value(self):
"""
Returns the value for this BoundField, using the initial value if
the form is not bound or the data otherwise.
"""
if not self.form.is_bound:
data = self.form.initial.get(self.name, self.field.initial)
if callable(data):
if self._initial_value is not UNSET:
data = self._initial_value
else:
data = data()
# If this is an auto-generated default date, nix the
# microseconds for standardized handling. See #22502.
if (isinstance(data, (datetime.datetime, datetime.time)) and
not self.field.widget.supports_microseconds):
data = data.replace(microsecond=0)
self._initial_value = data
else:
data = self.field.bound_data(
self.data, self.form.initial.get(self.name, self.field.initial)
)
return self.field.prepare_value(data)
def label_tag(self, contents=None, attrs=None, label_suffix=None):
"""
Wraps the given contents in a <label>, if the field has an ID attribute.
contents should be 'mark_safe'd to avoid HTML escaping. If contents
aren't given, uses the field's HTML-escaped label.
If attrs are given, they're used as HTML attributes on the <label> tag.
label_suffix allows overriding the form's label_suffix.
"""
contents = contents or self.label
if label_suffix is None:
label_suffix = (self.field.label_suffix if self.field.label_suffix is not None
else self.form.label_suffix)
# Only add the suffix if the label does not end in punctuation.
# Translators: If found as last label character, these punctuation
# characters will prevent the default label_suffix to be appended to the label
if label_suffix and contents and contents[-1] not in _(':?.!'):
contents = format_html('{}{}', contents, label_suffix)
widget = self.field.widget
id_ = widget.attrs.get('id') or self.auto_id
if id_:
id_for_label = widget.id_for_label(id_)
if id_for_label:
attrs = dict(attrs or {}, **{'for': id_for_label})
if self.field.required and hasattr(self.form, 'required_css_class'):
attrs = attrs or {}
if 'class' in attrs:
attrs['class'] += ' ' + self.form.required_css_class
else:
attrs['class'] = self.form.required_css_class
attrs = flatatt(attrs) if attrs else ''
contents = format_html('<label{}>{}</label>', attrs, contents)
else:
contents = conditional_escape(contents)
return mark_safe(contents)
def css_classes(self, extra_classes=None):
"""
Returns a string of space-separated CSS classes for this field.
"""
if hasattr(extra_classes, 'split'):
extra_classes = extra_classes.split()
extra_classes = set(extra_classes or [])
if self.errors and hasattr(self.form, 'error_css_class'):
extra_classes.add(self.form.error_css_class)
if self.field.required and hasattr(self.form, 'required_css_class'):
extra_classes.add(self.form.required_css_class)
return ' '.join(extra_classes)
@property
def is_hidden(self):
"Returns True if this BoundField's widget is hidden."
return self.field.widget.is_hidden
@property
def auto_id(self):
"""
Calculates and returns the ID attribute for this BoundField, if the
associated Form has specified auto_id. Returns an empty string otherwise.
"""
auto_id = self.form.auto_id
if auto_id and '%s' in smart_text(auto_id):
return smart_text(auto_id) % self.html_name
elif auto_id:
return self.html_name
return ''
@property
def id_for_label(self):
"""
Wrapper around the field widget's `id_for_label` method.
Useful, for example, for focusing on this field regardless of whether
it has a single widget or a MultiWidget.
"""
widget = self.field.widget
id_ = widget.attrs.get('id') or self.auto_id
return widget.id_for_label(id_)
|
c11/earthengine-api | refs/heads/master | python/examples/FeatureCollection/buffer.py | 5 | #!/usr/bin/env python
"""Buffer Example.
Display the area within 2 kilometers of any San Francisco BART station.
"""
import ee
import ee.mapclient
ee.Initialize()
ee.mapclient.centerMap(-122.4, 37.7, 11)
bart_stations = ee.FeatureCollection(
'ft:1xCCZkVn8DIkB7i7RVkvsYWxAxsdsQZ6SbD9PCXw')
buffered = bart_stations.map(lambda f: f.buffer(2000))
unioned = buffered.union()
ee.mapclient.addToMap(unioned, {'color': '800080'})
|
dimtruck/magnum | refs/heads/master | magnum/common/pythonk8sclient/swagger_client/models/v1_container_port.py | 5 | # coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
class V1ContainerPort(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
Swagger model
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'name': 'str',
'host_port': 'int',
'container_port': 'int',
'protocol': 'str',
'host_ip': 'str'
}
self.attribute_map = {
'name': 'name',
'host_port': 'hostPort',
'container_port': 'containerPort',
'protocol': 'protocol',
'host_ip': 'hostIP'
}
self._name = None
self._host_port = None
self._container_port = None
self._protocol = None
self._host_ip = None
@property
def name(self):
"""
Gets the name of this V1ContainerPort.
name for the port that can be referred to by services; must be an IANA_SVC_NAME and unique within the pod
:return: The name of this V1ContainerPort.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this V1ContainerPort.
name for the port that can be referred to by services; must be an IANA_SVC_NAME and unique within the pod
:param name: The name of this V1ContainerPort.
:type: str
"""
self._name = name
@property
def host_port(self):
"""
Gets the host_port of this V1ContainerPort.
number of port to expose on the host; most containers do not need this
:return: The host_port of this V1ContainerPort.
:rtype: int
"""
return self._host_port
@host_port.setter
def host_port(self, host_port):
"""
Sets the host_port of this V1ContainerPort.
number of port to expose on the host; most containers do not need this
:param host_port: The host_port of this V1ContainerPort.
:type: int
"""
self._host_port = host_port
@property
def container_port(self):
"""
Gets the container_port of this V1ContainerPort.
number of port to expose on the pod's IP address
:return: The container_port of this V1ContainerPort.
:rtype: int
"""
return self._container_port
@container_port.setter
def container_port(self, container_port):
"""
Sets the container_port of this V1ContainerPort.
number of port to expose on the pod's IP address
:param container_port: The container_port of this V1ContainerPort.
:type: int
"""
self._container_port = container_port
@property
def protocol(self):
"""
Gets the protocol of this V1ContainerPort.
protocol for port; must be UDP or TCP; TCP if unspecified
:return: The protocol of this V1ContainerPort.
:rtype: str
"""
return self._protocol
@protocol.setter
def protocol(self, protocol):
"""
Sets the protocol of this V1ContainerPort.
protocol for port; must be UDP or TCP; TCP if unspecified
:param protocol: The protocol of this V1ContainerPort.
:type: str
"""
self._protocol = protocol
@property
def host_ip(self):
"""
Gets the host_ip of this V1ContainerPort.
host IP to bind the port to
:return: The host_ip of this V1ContainerPort.
:rtype: str
"""
return self._host_ip
@host_ip.setter
def host_ip(self, host_ip):
"""
Sets the host_ip of this V1ContainerPort.
host IP to bind the port to
:param host_ip: The host_ip of this V1ContainerPort.
:type: str
"""
self._host_ip = host_ip
def to_dict(self):
"""
Return model properties dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
else:
result[attr] = value
return result
def to_str(self):
"""
Return model properties str
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
|
sephalon/python-ivi | refs/heads/master | ivi/testequity/testequity140.py | 6 | """
Python Interchangeable Virtual Instrument Library
Driver for Test Equity Model 140
Copyright (c) 2014 Jeff Wurzbach
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .. import ivi
from .. import ics
from .testequityf4 import *
class testequity140(testequityf4, ics.ics8099):
"TestEquity Model 140 Thermal Chamber"
def __init__(self, *args, **kwargs):
#self.__dict__.setdefault('_instrument_id', '8099')
super(testequity140, self).__init__(*args, **kwargs)
self._identity_description = "TestEquity Model 140 Thermal Chamber"
self._identity_identifier = ""
self._identity_revision = ""
self._identity_vendor = "TestEquity"
self._identity_instrument_manufacturer = "TestEquity"
self._identity_instrument_model = "140"
self._identity_instrument_firmware_revision = ""
self._identity_specification_major_version = 0
self._identity_specification_minor_version = 0
self._identity_supported_instrument_models = ['140']
"""
self._add_method('read_register',
self._read_register,
"Read Modbus register")
self._add_method('write_register',
self._write_register,
"Write Modbus register")
"""
|
paulocastro31/android_kernel_motorola_msm8226 | refs/heads/master | scripts/gcc-wrapper.py | 182 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Invoke gcc, looking for warnings, and causing a failure if there are
# non-whitelisted warnings.
import errno
import re
import os
import sys
import subprocess
# Note that gcc uses unicode, which may depend on the locale. TODO:
# force LANG to be set to en_US.UTF-8 to get consistent warnings.
allowed_warnings = set([
"return_address.c:62",
"workqueue.c:480"
])
# Capture the name of the object file, can find it.
ofile = None
warning_re = re.compile(r'''(.*/|)([^/]+\.[a-z]+:\d+):(\d+:)? warning:''')
def interpret_warning(line):
"""Decode the message from gcc. The messages we care about have a filename, and a warning"""
line = line.rstrip('\n')
m = warning_re.match(line)
if m and m.group(2) not in allowed_warnings:
print "error, forbidden warning:", m.group(2)
# If there is a warning, remove any object if it exists.
if ofile:
try:
os.remove(ofile)
except OSError:
pass
sys.exit(1)
def run_gcc():
args = sys.argv[1:]
# Look for -o
try:
i = args.index('-o')
global ofile
ofile = args[i+1]
except (ValueError, IndexError):
pass
compiler = sys.argv[0]
try:
proc = subprocess.Popen(args, stderr=subprocess.PIPE)
for line in proc.stderr:
print line,
interpret_warning(line)
result = proc.wait()
except OSError as e:
result = e.errno
if result == errno.ENOENT:
print args[0] + ':',e.strerror
print 'Is your PATH set correctly?'
else:
print ' '.join(args), str(e)
return result
if __name__ == '__main__':
status = run_gcc()
sys.exit(status)
|
hanicker/odoo | refs/heads/8.0 | addons/hr_payroll/wizard/__init__.py | 442 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# d$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_payroll_payslips_by_employees
import hr_payroll_contribution_register_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
mKeRix/home-assistant | refs/heads/dev | homeassistant/helpers/signal.py | 24 | """Signal handling related helpers."""
import logging
import signal
import sys
from types import FrameType
from homeassistant.const import RESTART_EXIT_CODE
from homeassistant.core import HomeAssistant, callback
from homeassistant.loader import bind_hass
_LOGGER = logging.getLogger(__name__)
@callback
@bind_hass
def async_register_signal_handling(hass: HomeAssistant) -> None:
"""Register system signal handler for core."""
if sys.platform != "win32":
@callback
def async_signal_handle(exit_code: int) -> None:
"""Wrap signal handling.
* queue call to shutdown task
* re-instate default handler
"""
hass.loop.remove_signal_handler(signal.SIGTERM)
hass.loop.remove_signal_handler(signal.SIGINT)
hass.async_create_task(hass.async_stop(exit_code))
try:
hass.loop.add_signal_handler(signal.SIGTERM, async_signal_handle, 0)
except ValueError:
_LOGGER.warning("Could not bind to SIGTERM")
try:
hass.loop.add_signal_handler(signal.SIGINT, async_signal_handle, 0)
except ValueError:
_LOGGER.warning("Could not bind to SIGINT")
try:
hass.loop.add_signal_handler(
signal.SIGHUP, async_signal_handle, RESTART_EXIT_CODE
)
except ValueError:
_LOGGER.warning("Could not bind to SIGHUP")
else:
old_sigterm = None
old_sigint = None
@callback
def async_signal_handle(exit_code: int, frame: FrameType) -> None:
"""Wrap signal handling.
* queue call to shutdown task
* re-instate default handler
"""
signal.signal(signal.SIGTERM, old_sigterm)
signal.signal(signal.SIGINT, old_sigint)
hass.async_create_task(hass.async_stop(exit_code))
try:
old_sigterm = signal.signal(signal.SIGTERM, async_signal_handle)
except ValueError:
_LOGGER.warning("Could not bind to SIGTERM")
try:
old_sigint = signal.signal(signal.SIGINT, async_signal_handle)
except ValueError:
_LOGGER.warning("Could not bind to SIGINT")
|
nhomar/odoo-mirror | refs/heads/8.0 | addons/product/tests/test_uom.py | 127 | from openerp.tests.common import TransactionCase
class TestUom(TransactionCase):
"""Tests for unit of measure conversion"""
def setUp(self):
super(TestUom, self).setUp()
self.product = self.registry('product.product')
self.uom = self.registry('product.uom')
self.imd = self.registry('ir.model.data')
def test_10_conversion(self):
cr, uid = self.cr, self.uid
gram_id = self.imd.get_object_reference(cr, uid, 'product', 'product_uom_gram')[1]
tonne_id = self.imd.get_object_reference(cr, uid, 'product', 'product_uom_ton')[1]
qty = self.uom._compute_qty(cr, uid, gram_id, 1020000, tonne_id)
self.assertEquals(qty, 1.02, "Converted quantity does not correspond.")
price = self.uom._compute_price(cr, uid, gram_id, 2, tonne_id)
self.assertEquals(price, 2000000.0, "Converted price does not correspond.")
def test_20_rounding(self):
cr, uid = self.cr, self.uid
unit_id = self.imd.get_object_reference(cr, uid, 'product', 'product_uom_unit')[1]
categ_unit_id = self.imd.get_object_reference(cr, uid, 'product', 'product_uom_categ_unit')[1]
score_id = self.uom.create(cr, uid, {
'name': 'Score',
'factor_inv': 20,
'uom_type': 'bigger',
'rounding': 1.0,
'category_id': categ_unit_id
})
qty = self.uom._compute_qty(cr, uid, unit_id, 2, score_id)
self.assertEquals(qty, 1, "Converted quantity should be rounded up.")
|
wfxiang08/sqlalchemy | refs/heads/feature/wftest | test/dialect/test_sybase.py | 28 | from sqlalchemy import *
from sqlalchemy import sql
from sqlalchemy.databases import sybase
from sqlalchemy.testing import *
class CompileTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = sybase.dialect()
def test_extract(self):
t = sql.table('t', sql.column('col1'))
mapping = {
'day': 'day',
'doy': 'dayofyear',
'dow': 'weekday',
'milliseconds': 'millisecond',
'millisecond': 'millisecond',
'year': 'year',
}
for field, subst in list(mapping.items()):
self.assert_compile(
select([extract(field, t.c.col1)]),
'SELECT DATEPART("%s", t.col1) AS anon_1 FROM t' % subst)
|
huanpc/IoT-1 | refs/heads/master | gui/controller/.venv/lib/python3.5/site-packages/pip/_vendor/requests/packages/urllib3/util/request.py | 780 | from __future__ import absolute_import
from base64 import b64encode
from ..packages.six import b
ACCEPT_ENCODING = 'gzip,deflate'
def make_headers(keep_alive=None, accept_encoding=None, user_agent=None,
basic_auth=None, proxy_basic_auth=None, disable_cache=None):
"""
Shortcuts for generating request headers.
:param keep_alive:
If ``True``, adds 'connection: keep-alive' header.
:param accept_encoding:
Can be a boolean, list, or string.
``True`` translates to 'gzip,deflate'.
List will get joined by comma.
String will be used as provided.
:param user_agent:
String representing the user-agent you want, such as
"python-urllib3/0.6"
:param basic_auth:
Colon-separated username:password string for 'authorization: basic ...'
auth header.
:param proxy_basic_auth:
Colon-separated username:password string for 'proxy-authorization: basic ...'
auth header.
:param disable_cache:
If ``True``, adds 'cache-control: no-cache' header.
Example::
>>> make_headers(keep_alive=True, user_agent="Batman/1.0")
{'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
>>> make_headers(accept_encoding=True)
{'accept-encoding': 'gzip,deflate'}
"""
headers = {}
if accept_encoding:
if isinstance(accept_encoding, str):
pass
elif isinstance(accept_encoding, list):
accept_encoding = ','.join(accept_encoding)
else:
accept_encoding = ACCEPT_ENCODING
headers['accept-encoding'] = accept_encoding
if user_agent:
headers['user-agent'] = user_agent
if keep_alive:
headers['connection'] = 'keep-alive'
if basic_auth:
headers['authorization'] = 'Basic ' + \
b64encode(b(basic_auth)).decode('utf-8')
if proxy_basic_auth:
headers['proxy-authorization'] = 'Basic ' + \
b64encode(b(proxy_basic_auth)).decode('utf-8')
if disable_cache:
headers['cache-control'] = 'no-cache'
return headers
|
ambitioninc/ambition-python | refs/heads/master | ambition/tests/api_client_tests.py | 1 | import os
from functools import partial
import datetime
import unittest
from mock import patch
from ..api_client import ApiClient
from ..configuration import ApiConfiguration
from .. import models
test_dict = {
'name': 'Test Name',
'display_name': 'Test Display Name',
'data_format': 'Test Format',
}
class TestModel(object):
def __init__(self):
self.swagger_types = {
'display_name': 'str',
'name': 'str',
'data_format': 'str',
}
self.attribute_map = {
'display_name': 'display_name',
'name': 'name',
'data_format': 'data_format',
}
self.display_name = None
self.name = None
self.data_format = None
self.some_other_attribute = None
class ApiClientTest(unittest.TestCase):
def setUp(self):
host = 'http://example.com'
api_key = 'keyboardcat'
configuration = ApiConfiguration(host, api_key)
self.client = ApiClient(configuration=configuration)
self.base_expected_headers = {
'Authorization': 'Token keyboardcat',
'User-Agent': 'Python-Swagger',
}
def test_sanitization_for_serialization(self):
"""
Verify that data are normalized
"""
model = TestModel()
for key in test_dict.keys():
setattr(model, key, test_dict[key])
sanitized_model = self.client.sanitize_for_serialization(model)
self.assertEqual(sanitized_model, test_dict)
def test_deserialization(self):
obj = [{'foo': 'bar'}, {'baz': 'qux'}]
deserialized = self.client.deserialize(obj, 'list[dict]')
self.assertEqual(obj, deserialized)
obj = 1
deserialized = self.client.deserialize(obj, 'dict')
self.assertEqual(deserialized, obj)
# deserialize model from dict that doesn't have all model attributes
models.TestModel = TestModel
obj = {'name': 'some name'}
deserialized = self.client.deserialize(obj, 'TestModel')
self.assertIsNone(deserialized.display_name)
self.assertIsNone(deserialized.data_format)
# deserialize datetimes
now = datetime.datetime.now()
deserialized = self.client.deserialize(now.isoformat(), 'datetime')
self.assertEqual(now, deserialized)
@patch('ambition.api_client.RESTClient.GET')
@patch('ambition.api_client.RESTClient.HEAD')
@patch('ambition.api_client.RESTClient.POST')
@patch('ambition.api_client.RESTClient.PATCH')
@patch('ambition.api_client.RESTClient.PUT')
@patch('ambition.api_client.RESTClient.DELETE')
def test_request_method(self, delete, put, patch, post, head, get):
"""
Verify that the correct client method is called with the right kwargs
"""
query_params = {'query': 'query_param'}
post_params = {'post': 'post_param'}
body = 'body'
self.client.request(
'GET', 'some_url', query_params=query_params, body=body,
post_params=post_params, headers=self.base_expected_headers)
self.client.request(
'HEAD', 'some_url', query_params=query_params, body=body,
post_params=post_params, headers=self.base_expected_headers)
self.client.request(
'POST', 'some_url', query_params=query_params, body=body,
post_params=post_params, headers=self.base_expected_headers)
self.client.request(
'PATCH', 'some_url', query_params=query_params, body=body,
post_params=post_params, headers=self.base_expected_headers)
self.client.request(
'PUT', 'some_url', query_params=query_params, body=body,
post_params=post_params, headers=self.base_expected_headers)
self.client.request(
'DELETE', 'some_url', query_params=query_params, body=body,
post_params=post_params, headers=self.base_expected_headers)
delete.assert_called_with(
'some_url', query_params=query_params,
headers=self.base_expected_headers)
put.assert_called_with(
'some_url', post_params=post_params, body=body,
headers=self.base_expected_headers)
patch.assert_called_with(
'some_url', post_params=post_params, body=body,
headers=self.base_expected_headers)
post.assert_called_with(
'some_url', post_params=post_params, body=body,
headers=self.base_expected_headers)
head.assert_called_with(
'some_url', query_params=query_params,
headers=self.base_expected_headers)
get.assert_called_with(
'some_url', query_params=query_params,
headers=self.base_expected_headers)
n = ['NOT_A_METHOD', 'some_url']
self.assertRaises(ValueError, partial(self.client.request, *n))
def test_files(self):
"""
Verifies that the files are included in post params
"""
file_path = os.path.abspath(__file__)
files = {
'this_file': file_path
}
post_params = self.client.prepare_post_parameters(files=files)
self.assertIn('this_file', post_params)
def test_select_accepts(self):
"""
Verifies that the accept header is correctly selected (or not)
from a list
"""
self.assertIsNone(self.client.select_header_accept([]))
accepts = ['application/vnd.ms-excel', 'application/json']
self.assertEqual('application/json', self.client.select_header_accept(accepts))
accepts = ['application/vnd.ms-excel', 'text/csv']
self.assertEqual(', '.join(accepts), self.client.select_header_accept(accepts))
def test_select_content_type(self):
"""
Verifies that the content type header is correctly selected
"""
self.assertEqual('application/json', self.client.select_header_content_type([]))
content_types = ['application/vnd.ms-excel', 'application/json']
self.assertEqual('application/json', self.client.select_header_content_type(content_types))
content_types = ['application/vnd.ms-excel', 'text/csv']
self.assertEqual('application/vnd.ms-excel', self.client.select_header_content_type(content_types))
@patch('ambition.api_client.models')
@patch('ambition.api_client.RESTClient.GET')
def test_deserialization_single_model(self, rest_get, models):
"""
Verify that api responses are cast as the right model type
"""
rest_get.return_value = test_dict
models.TestModel = TestModel
model = self.client.call_api('/fake', 'GET', response='TestModel')
self.assertIsInstance(model, TestModel)
self.assertEqual(model.display_name, test_dict.get('display_name'))
self.assertEqual(model.name, test_dict.get('name'))
self.assertEqual(model.data_format, test_dict.get('data_format'))
@patch('ambition.api_client.models')
@patch('ambition.api_client.RESTClient.GET')
def test_deserialization_multiple_models(self, rest_get, models):
"""
Verify that list api responses are model iterators
"""
serialized_response = [test_dict, test_dict]
rest_get.return_value = serialized_response
models.TestModel = TestModel
response = self.client.call_api('/fake', 'GET', response='TestModel')
self.assertEqual(len(list(response)), 2)
for model in response:
self.assertIsInstance(model, TestModel)
@patch('ambition.api_client.ApiClient.request')
def test_path_params(self, request_mock):
"""
Verify that path parameters are constructed properly
"""
path_params = {
'foo': 'f',
'bar': 'b',
}
self.client.call_api('/{foo}/{bar}/', 'GET', path_params=path_params)
expected_url = 'http://example.com/f/b/'
request_mock.assert_called_with(
'GET', expected_url, body=None,
headers=self.base_expected_headers,
post_params=None, query_params=None)
@patch('ambition.api_client.ApiClient.request')
def test_query_params(self, request_mock):
"""
Verify that query parameters are normalized
"""
today = datetime.datetime.now().date()
query_params = {
'today': today,
'users': ['Marty McFly', 'H. G. Wells'],
'none_thing': None,
}
self.client.call_api('/stuff/', 'GET', query_params=query_params)
expected_query_params = {
'today': datetime.datetime.now().date().isoformat(),
'users': 'Marty McFly,H. G. Wells',
'none_thing': 'None',
}
request_mock.assert_called_with(
'GET', 'http://example.com/stuff/', body=None,
headers=self.base_expected_headers,
post_params=None, query_params=expected_query_params)
@patch('ambition.api_client.ApiClient.request')
def test_post_params(self, request_mock):
"""
Verify that post parameters are normalized
"""
today = datetime.datetime.now().date()
post_params = {
'today': today,
}
self.client.call_api('/stuff/', 'POST', post_params=post_params)
expected_post_params = {
'today': datetime.datetime.now().date().isoformat()
}
request_mock.assert_called_with(
'POST', 'http://example.com/stuff/', body=None,
headers=self.base_expected_headers,
post_params=expected_post_params, query_params=None)
@patch('ambition.api_client.ApiClient.request')
def test_body_normalization(self, request_mock):
"""
Verify that body is normalized
"""
today = datetime.datetime.now().date()
body = today
self.client.call_api('/stuff/', 'POST', body=body)
request_mock.assert_called_with(
'POST', 'http://example.com/stuff/', body=today.isoformat(),
headers=self.base_expected_headers,
post_params=None, query_params=None)
def test_update_params_for_auth(self):
"""
Verify that authentication is defined correctly
"""
auth_settings = None
headers = {}
query_params = {}
self.client.update_params_for_auth(headers, query_params, auth_settings)
# confirm that neither dict was modified
self.assertEqual({}, headers)
self.assertEqual({}, query_params)
def test_user_agent(self):
"""
Verifies that clients are being constructed with user agent
"""
self.assertEqual('Python-Swagger', self.client.user_agent)
def test_deserialize_model_gracefully_handles_bad_input(self):
"""
Verifies that we won't try to enumerate an object not of list/dict type
when trying to cast it to a model type
"""
from ambition.models import PublicApiDataTypeRetrieveResponse
model = self.client.deserialize_model(PublicApiDataTypeRetrieveResponse, None)
self.assertIsInstance(model, PublicApiDataTypeRetrieveResponse)
for attribute in model.attribute_map:
self.assertIsNone(getattr(model, attribute))
def test_deserialize_datetimes(self):
"""
Verifies that datetimes are deserialized
"""
now = datetime.datetime.now()
now_deserialized = self.client.deserialize(now.isoformat(), 'datetime')
self.assertEqual(now, now_deserialized)
|
balloob/home-assistant | refs/heads/dev | homeassistant/components/onvif/config_flow.py | 8 | """Config flow for ONVIF."""
from pprint import pformat
from typing import List
from urllib.parse import urlparse
from onvif.exceptions import ONVIFError
import voluptuous as vol
from wsdiscovery.discovery import ThreadedWSDiscovery as WSDiscovery
from wsdiscovery.scope import Scope
from wsdiscovery.service import Service
from zeep.exceptions import Fault
from homeassistant import config_entries
from homeassistant.components.ffmpeg import CONF_EXTRA_ARGUMENTS
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
)
from homeassistant.core import callback
# pylint: disable=unused-import
from .const import (
CONF_DEVICE_ID,
CONF_RTSP_TRANSPORT,
DEFAULT_ARGUMENTS,
DEFAULT_PORT,
DOMAIN,
LOGGER,
RTSP_TRANS_PROTOCOLS,
)
from .device import get_device
CONF_MANUAL_INPUT = "Manually configure ONVIF device"
def wsdiscovery() -> List[Service]:
"""Get ONVIF Profile S devices from network."""
discovery = WSDiscovery(ttl=4)
discovery.start()
services = discovery.searchServices(
scopes=[Scope("onvif://www.onvif.org/Profile/Streaming")]
)
discovery.stop()
return services
async def async_discovery(hass) -> bool:
"""Return if there are devices that can be discovered."""
LOGGER.debug("Starting ONVIF discovery...")
services = await hass.async_add_executor_job(wsdiscovery)
devices = []
for service in services:
url = urlparse(service.getXAddrs()[0])
device = {
CONF_DEVICE_ID: None,
CONF_NAME: service.getEPR(),
CONF_HOST: url.hostname,
CONF_PORT: url.port or 80,
}
for scope in service.getScopes():
scope_str = scope.getValue()
if scope_str.lower().startswith("onvif://www.onvif.org/name"):
device[CONF_NAME] = scope_str.split("/")[-1]
if scope_str.lower().startswith("onvif://www.onvif.org/mac"):
device[CONF_DEVICE_ID] = scope_str.split("/")[-1]
devices.append(device)
return devices
class OnvifFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a ONVIF config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return OnvifOptionsFlowHandler(config_entry)
def __init__(self):
"""Initialize the ONVIF config flow."""
self.device_id = None
self.devices = []
self.onvif_config = {}
async def async_step_user(self, user_input=None):
"""Handle user flow."""
if user_input is not None:
return await self.async_step_device()
return self.async_show_form(step_id="user")
async def async_step_device(self, user_input=None):
"""Handle WS-Discovery.
Let user choose between discovered devices and manual configuration.
If no device is found allow user to manually input configuration.
"""
if user_input:
if CONF_MANUAL_INPUT == user_input[CONF_HOST]:
return await self.async_step_manual_input()
for device in self.devices:
name = f"{device[CONF_NAME]} ({device[CONF_HOST]})"
if name == user_input[CONF_HOST]:
self.device_id = device[CONF_DEVICE_ID]
self.onvif_config = {
CONF_NAME: device[CONF_NAME],
CONF_HOST: device[CONF_HOST],
CONF_PORT: device[CONF_PORT],
}
return await self.async_step_auth()
discovery = await async_discovery(self.hass)
for device in discovery:
configured = any(
entry.unique_id == device[CONF_DEVICE_ID]
for entry in self._async_current_entries()
)
if not configured:
self.devices.append(device)
LOGGER.debug("Discovered ONVIF devices %s", pformat(self.devices))
if self.devices:
names = [
f"{device[CONF_NAME]} ({device[CONF_HOST]})" for device in self.devices
]
names.append(CONF_MANUAL_INPUT)
return self.async_show_form(
step_id="device",
data_schema=vol.Schema({vol.Optional(CONF_HOST): vol.In(names)}),
)
return await self.async_step_manual_input()
async def async_step_manual_input(self, user_input=None):
"""Manual configuration."""
if user_input:
self.onvif_config = user_input
return await self.async_step_auth()
return self.async_show_form(
step_id="manual_input",
data_schema=vol.Schema(
{
vol.Required(CONF_NAME): str,
vol.Required(CONF_HOST): str,
vol.Required(CONF_PORT, default=DEFAULT_PORT): int,
}
),
)
async def async_step_auth(self, user_input=None):
"""Username and Password configuration for ONVIF device."""
if user_input:
self.onvif_config[CONF_USERNAME] = user_input[CONF_USERNAME]
self.onvif_config[CONF_PASSWORD] = user_input[CONF_PASSWORD]
return await self.async_step_profiles()
# Username and Password are optional and default empty
# due to some cameras not allowing you to change ONVIF user settings.
# See https://github.com/home-assistant/core/issues/39182
# and https://github.com/home-assistant/core/issues/35904
return self.async_show_form(
step_id="auth",
data_schema=vol.Schema(
{
vol.Optional(CONF_USERNAME, default=""): str,
vol.Optional(CONF_PASSWORD, default=""): str,
}
),
)
async def async_step_profiles(self, user_input=None):
"""Fetch ONVIF device profiles."""
errors = {}
LOGGER.debug(
"Fetching profiles from ONVIF device %s", pformat(self.onvif_config)
)
device = get_device(
self.hass,
self.onvif_config[CONF_HOST],
self.onvif_config[CONF_PORT],
self.onvif_config[CONF_USERNAME],
self.onvif_config[CONF_PASSWORD],
)
try:
await device.update_xaddrs()
device_mgmt = device.create_devicemgmt_service()
# Get the MAC address to use as the unique ID for the config flow
if not self.device_id:
try:
network_interfaces = await device_mgmt.GetNetworkInterfaces()
for interface in network_interfaces:
if interface.Enabled:
self.device_id = interface.Info.HwAddress
except Fault as fault:
if "not implemented" not in fault.message:
raise fault
LOGGER.debug(
"Couldn't get network interfaces from ONVIF deivice '%s'. Error: %s",
self.onvif_config[CONF_NAME],
fault,
)
# If no network interfaces are exposed, fallback to serial number
if not self.device_id:
device_info = await device_mgmt.GetDeviceInformation()
self.device_id = device_info.SerialNumber
if not self.device_id:
return self.async_abort(reason="no_mac")
await self.async_set_unique_id(self.device_id, raise_on_progress=False)
self._abort_if_unique_id_configured(
updates={
CONF_HOST: self.onvif_config[CONF_HOST],
CONF_PORT: self.onvif_config[CONF_PORT],
CONF_NAME: self.onvif_config[CONF_NAME],
}
)
# Verify there is an H264 profile
media_service = device.create_media_service()
profiles = await media_service.GetProfiles()
h264 = any(
profile.VideoEncoderConfiguration
and profile.VideoEncoderConfiguration.Encoding == "H264"
for profile in profiles
)
if not h264:
return self.async_abort(reason="no_h264")
await device.close()
title = f"{self.onvif_config[CONF_NAME]} - {self.device_id}"
return self.async_create_entry(title=title, data=self.onvif_config)
except ONVIFError as err:
LOGGER.error(
"Couldn't setup ONVIF device '%s'. Error: %s",
self.onvif_config[CONF_NAME],
err,
)
await device.close()
return self.async_abort(reason="onvif_error")
except Fault:
errors["base"] = "cannot_connect"
await device.close()
return self.async_show_form(step_id="auth", errors=errors)
async def async_step_import(self, user_input):
"""Handle import."""
self.onvif_config = user_input
return await self.async_step_profiles()
class OnvifOptionsFlowHandler(config_entries.OptionsFlow):
"""Handle ONVIF options."""
def __init__(self, config_entry):
"""Initialize ONVIF options flow."""
self.config_entry = config_entry
self.options = dict(config_entry.options)
async def async_step_init(self, user_input=None):
"""Manage the ONVIF options."""
return await self.async_step_onvif_devices()
async def async_step_onvif_devices(self, user_input=None):
"""Manage the ONVIF devices options."""
if user_input is not None:
self.options[CONF_EXTRA_ARGUMENTS] = user_input[CONF_EXTRA_ARGUMENTS]
self.options[CONF_RTSP_TRANSPORT] = user_input[CONF_RTSP_TRANSPORT]
return self.async_create_entry(title="", data=self.options)
return self.async_show_form(
step_id="onvif_devices",
data_schema=vol.Schema(
{
vol.Optional(
CONF_EXTRA_ARGUMENTS,
default=self.config_entry.options.get(
CONF_EXTRA_ARGUMENTS, DEFAULT_ARGUMENTS
),
): str,
vol.Optional(
CONF_RTSP_TRANSPORT,
default=self.config_entry.options.get(
CONF_RTSP_TRANSPORT, RTSP_TRANS_PROTOCOLS[0]
),
): vol.In(RTSP_TRANS_PROTOCOLS),
}
),
)
|
ghjm/ansible | refs/heads/devel | lib/ansible/module_utils/facts/virtual/openbsd.py | 33 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
from ansible.module_utils.facts.virtual.base import Virtual, VirtualCollector
from ansible.module_utils.facts.virtual.sysctl import VirtualSysctlDetectionMixin
from ansible.module_utils.facts.utils import get_file_content
class OpenBSDVirtual(Virtual, VirtualSysctlDetectionMixin):
"""
This is a OpenBSD-specific subclass of Virtual. It defines
- virtualization_type
- virtualization_role
"""
platform = 'OpenBSD'
DMESG_BOOT = '/var/run/dmesg.boot'
def get_virtual_facts(self):
virtual_facts = {}
host_tech = set()
guest_tech = set()
# Set empty values as default
virtual_facts['virtualization_type'] = ''
virtual_facts['virtualization_role'] = ''
virtual_product_facts = self.detect_virt_product('hw.product')
guest_tech.update(virtual_product_facts['virtualization_tech_guest'])
host_tech.update(virtual_product_facts['virtualization_tech_host'])
virtual_facts.update(virtual_product_facts)
virtual_vendor_facts = self.detect_virt_vendor('hw.vendor')
guest_tech.update(virtual_vendor_facts['virtualization_tech_guest'])
host_tech.update(virtual_vendor_facts['virtualization_tech_host'])
if virtual_facts['virtualization_type'] == '':
virtual_facts.update(virtual_vendor_facts)
# Check the dmesg if vmm(4) attached, indicating the host is
# capable of virtualization.
dmesg_boot = get_file_content(OpenBSDVirtual.DMESG_BOOT)
for line in dmesg_boot.splitlines():
match = re.match('^vmm0 at mainbus0: (SVM/RVI|VMX/EPT)$', line)
if match:
host_tech.add('vmm')
virtual_facts['virtualization_type'] = 'vmm'
virtual_facts['virtualization_role'] = 'host'
virtual_facts['virtualization_tech_guest'] = guest_tech
virtual_facts['virtualization_tech_host'] = host_tech
return virtual_facts
class OpenBSDVirtualCollector(VirtualCollector):
_fact_class = OpenBSDVirtual
_platform = 'OpenBSD'
|
serge-sans-paille/pythran | refs/heads/master | pythran/tests/openmp.legacy/omp_taskyield.py | 1 | def omp_taskyield():
import omp
from time import sleep
NUM_TASKS = 25
count = 0
start_id = [0 for _ in range(NUM_TASKS)]
current_id = [0 for _ in range(NUM_TASKS)]
if 'omp parallel':
use_omp = omp.in_parallel()
if 'omp single':
for i in range(NUM_TASKS):
myi = i
if 'omp task firstprivate(myi) untied':
sleep(0.01)
start_id[myi] = omp.get_thread_num()
'omp taskyield'
if start_id[myi] % 2 == 0:
sleep(0.01)
current_id[myi] = omp.get_thread_num()
for i in range(NUM_TASKS):
if current_id[i] == start_id[i]:
count += 1
return count < NUM_TASKS or not use_omp
|
UXE/local-edx | refs/heads/master | lms/djangoapps/branding/tests.py | 1 | """
Tests for branding page
"""
import datetime
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.http import HttpResponseRedirect
from django.test.utils import override_settings
from django.test.client import RequestFactory
from pytz import UTC
from mock import patch, Mock
from edxmako.shortcuts import render_to_response
from branding.views import index
from xmodule.modulestore.tests.django_utils import TEST_DATA_MOCK_MODULESTORE
from edxmako.tests import mako_middleware_process_request
import student.views
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
FEATURES_WITH_STARTDATE = settings.FEATURES.copy()
FEATURES_WITH_STARTDATE['DISABLE_START_DATES'] = False
FEATURES_WO_STARTDATE = settings.FEATURES.copy()
FEATURES_WO_STARTDATE['DISABLE_START_DATES'] = True
def mock_render_to_response(*args, **kwargs):
"""
Mock the render_to_response function
"""
return render_to_response(*args, **kwargs)
RENDER_MOCK = Mock(side_effect=mock_render_to_response)
@override_settings(MODULESTORE=TEST_DATA_MOCK_MODULESTORE)
class AnonymousIndexPageTest(ModuleStoreTestCase):
"""
Tests that anonymous users can access the '/' page, Need courses with start date
"""
def setUp(self):
super(AnonymousIndexPageTest, self).setUp()
self.factory = RequestFactory()
self.course = CourseFactory.create(
days_early_for_beta=5,
enrollment_start=datetime.datetime.now(UTC) + datetime.timedelta(days=3),
user_id=self.user.id,
)
@override_settings(FEATURES=FEATURES_WITH_STARTDATE)
def test_none_user_index_access_with_startdate_fails(self):
"""
This is a regression test for a bug where the incoming user is
anonymous and start dates are being checked. It replaces a previous
test as it solves the issue in a different way
"""
request = self.factory.get('/')
request.user = AnonymousUser()
mako_middleware_process_request(request)
student.views.index(request)
@override_settings(FEATURES=FEATURES_WITH_STARTDATE)
def test_anon_user_with_startdate_index(self):
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
@override_settings(FEATURES=FEATURES_WO_STARTDATE)
def test_anon_user_no_startdate_index(self):
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
def test_allow_x_frame_options(self):
"""
Check the x-frame-option response header
"""
# check to see that the default setting is to ALLOW iframing
resp = self.client.get('/')
self.assertEquals(resp['X-Frame-Options'], 'ALLOW')
@override_settings(X_FRAME_OPTIONS='DENY')
def test_deny_x_frame_options(self):
"""
Check the x-frame-option response header
"""
# check to see that the override value is honored
resp = self.client.get('/')
self.assertEquals(resp['X-Frame-Options'], 'DENY')
def test_edge_redirect_to_login(self):
"""
Test edge homepage redirect to lms login.
"""
request = self.factory.get('/')
request.user = AnonymousUser()
# HTTP Host changed to edge.
request.META["HTTP_HOST"] = "edge.edx.org"
response = index(request)
# Response should be instance of HttpResponseRedirect.
self.assertIsInstance(response, HttpResponseRedirect)
# Location should be "/login".
self.assertEqual(response._headers.get("location")[1], "/login")
@override_settings(MODULESTORE=TEST_DATA_MOCK_MODULESTORE)
class IndexPageCourseCardsSortingTests(ModuleStoreTestCase):
"""
Test for Index page course cards sorting
"""
def setUp(self):
super(IndexPageCourseCardsSortingTests, self).setUp()
self.starting_later = CourseFactory.create(
org='MITx',
number='1000',
display_name='Starting later, Announced later',
metadata={
'start': datetime.datetime.now(UTC) + datetime.timedelta(days=4),
'announcement': datetime.datetime.now(UTC) + datetime.timedelta(days=3),
}
)
self.starting_earlier = CourseFactory.create(
org='MITx',
number='1001',
display_name='Starting earlier, Announced earlier',
metadata={
'start': datetime.datetime.now(UTC) + datetime.timedelta(days=2),
'announcement': datetime.datetime.now(UTC) + datetime.timedelta(days=1),
}
)
self.course_with_default_start_date = CourseFactory.create(
org='MITx',
number='1002',
display_name='Tech Beta Course',
)
self.factory = RequestFactory()
@patch('student.views.render_to_response', RENDER_MOCK)
def test_course_cards_sorted_by_default_sorting(self):
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
((template, context), _) = RENDER_MOCK.call_args
self.assertEqual(template, 'index.html')
# Now the courses will be stored in their announcement dates.
self.assertEqual(context['courses'][0].id, self.starting_later.id)
self.assertEqual(context['courses'][1].id, self.starting_earlier.id)
self.assertEqual(context['courses'][2].id, self.course_with_default_start_date.id)
@patch('student.views.render_to_response', RENDER_MOCK)
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_COURSE_SORTING_BY_START_DATE': True})
def test_course_cards_sorted_by_start_date_show_earliest_first(self):
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
((template, context), _) = RENDER_MOCK.call_args
self.assertEqual(template, 'index.html')
# now the courses will be sorted by their creation dates, earliest first.
self.assertEqual(context['courses'][0].id, self.starting_earlier.id)
self.assertEqual(context['courses'][1].id, self.starting_later.id)
self.assertEqual(context['courses'][2].id, self.course_with_default_start_date.id)
|
dmckinney5/SlackOff | refs/heads/master | slackoff/lib/python2.7/site-packages/slackclient/__init__.py | 3 | from slackclient._client import SlackClient # noqa
|
RuiNascimento/krepo | refs/heads/master | script.module.lambdascrapers/lib/lambdascrapers/sources_ lambdascrapers/en/myprojectfreetv.py | 1 | # -*- coding: UTF-8 -*-
# -Cleaned and Checked on 10-10-2018 by JewBMX in Yoda.
import re,urllib,urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import proxy
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['my-project-free.tv']
self.base_link = 'https://my-project-free.tv' #https://www8.project-free-tv.ag
self.search_link = '/episode/%s-season-%s-episode-%s'
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
clean_title = cleantitle.geturl(tvshowtitle)
url = clean_title
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if not url: return
tvshowtitle = url
url = self.base_link + self.search_link % (tvshowtitle, int(season), int(episode))
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
r = client.request(url)
try:
data = re.compile("callvalue\('.+?','.+?','(.+?)://(.+?)/(.+?)'\)",re.DOTALL).findall(r)
for http,host,url in data:
url = '%s://%s/%s' % (http,host,url)
sources.append({
'source': host,
'quality': 'SD',
'language': 'en',
'url': url,
'direct': False,
'debridonly': False
})
except:
pass
return sources
except Exception:
return
def resolve(self, url):
return url |
zigama/rapidsms-rwanda | refs/heads/master | apps/old.ubuzima/config.py | 4 | #!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
title = "Child & Maternity Health"
tab_link = "/ubuzima"
|
Chenmxs/pyspider | refs/heads/master | tests/data_fetcher_processor_handler.py | 67 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<[email protected]>
# http://binux.me
# Created on 2015-01-18 14:12:55
from pyspider.libs.base_handler import *
class Handler(BaseHandler):
@not_send_status
def not_send_status(self, response):
self.crawl('http://www.baidu.com/')
return response.text
def url_deduplicated(self, response):
self.crawl('http://www.baidu.com/')
self.crawl('http://www.google.com/')
self.crawl('http://www.baidu.com/')
self.crawl('http://www.google.com/')
self.crawl('http://www.google.com/')
@catch_status_code_error
def catch_http_error(self, response):
self.crawl('http://www.baidu.com/')
return response.status_code
def json(self, response):
return response.json
def html(self, response):
return response.doc('h1').text()
def links(self, response):
self.crawl([x.attr.href for x in response.doc('a').items()], callback=self.links)
def cookies(self, response):
return response.cookies
def get_save(self, response):
return response.save
def get_process_save(self, response):
return self.save
def set_process_save(self, response):
self.save['roy'] = 'binux'
class IgnoreHandler(BaseHandler):
pass
__handler_cls__ = Handler
|
epandurski/django | refs/heads/master | tests/user_commands/management/commands/leave_locale_alone_true.py | 428 | from django.core.management.base import BaseCommand
from django.utils import translation
class Command(BaseCommand):
can_import_settings = True
leave_locale_alone = True
def handle(self, *args, **options):
return translation.get_language()
|
tima/ansible | refs/heads/devel | lib/ansible/modules/network/bigswitch/bigmon_policy.py | 27 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Ansible module to manage Big Monitoring Fabric service chains
# (c) 2016, Ted Elhourani <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: bigmon_policy
author: "Ted (@tedelhourani)"
short_description: Create and remove a bigmon out-of-band policy.
description:
- Create and remove a bigmon out-of-band policy.
version_added: "2.3"
options:
name:
description:
- The name of the policy.
required: true
policy_description:
description:
- Description of policy.
action:
description:
- Forward matching packets to delivery interfaces, Drop is for measure rate of matching packets,
but do not forward to delivery interfaces, capture packets and write to a PCAP file, or enable NetFlow generation.
default: forward
choices: ['forward', 'drop', 'flow-gen']
priority:
description:
- A priority associated with this policy. The higher priority policy takes precedence over a lower priority.
default: 100
duration:
description:
- Run policy for duration duration or until delivery_packet_count packets are delivered, whichever comes first.
default: 0
start_time:
description:
- Date the policy becomes active
default: ansible_date_time.iso8601
delivery_packet_count:
description:
- Run policy until delivery_packet_count packets are delivered.
default: 0
state:
description:
- Whether the policy should be present or absent.
default: present
choices: ['present', 'absent']
controller:
description:
- The controller address.
required: true
validate_certs:
description:
- If C(false), SSL certificates will not be validated. This should only be used
on personally controlled devices using self-signed certificates.
required: false
default: true
choices: [true, false]
access_token:
description:
- Bigmon access token. If this isn't set, the environment variable C(BIGSWITCH_ACCESS_TOKEN) is used.
'''
EXAMPLES = '''
- name: policy to aggregate filter and deliver data center (DC) 1 traffic
bigmon_policy:
name: policy1
policy_description: DC 1 traffic policy
action: drop
controller: '{{ inventory_hostname }}'
state: present
validate_certs: false
'''
RETURN = ''' # '''
import datetime
import os
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.bigswitch.bigswitch import Rest
from ansible.module_utils._text import to_native
def policy(module):
try:
access_token = module.params['access_token'] or os.environ['BIGSWITCH_ACCESS_TOKEN']
except KeyError as e:
module.fail_json(msg='Unable to load %s' % e.message, exception=traceback.format_exc())
name = module.params['name']
policy_description = module.params['policy_description']
action = module.params['action']
priority = module.params['priority']
duration = module.params['duration']
start_time = module.params['start_time']
delivery_packet_count = module.params['delivery_packet_count']
state = module.params['state']
controller = module.params['controller']
rest = Rest(module,
{'content-type': 'application/json', 'Cookie': 'session_cookie=' + access_token},
'https://' + controller + ':8443/api/v1/data/controller/applications/bigtap')
if name is None:
module.fail_json(msg='parameter `name` is missing')
response = rest.get('policy?config=true', data={})
if response.status_code != 200:
module.fail_json(msg="failed to obtain existing policy config: {}".format(response.json['description']))
config_present = False
matching = [policy for policy in response.json
if policy['name'] == name and
policy['duration'] == duration and
policy['delivery-packet-count'] == delivery_packet_count and
policy['policy-description'] == policy_description and
policy['action'] == action and
policy['priority'] == priority]
if matching:
config_present = True
if state in ('present') and config_present:
module.exit_json(changed=False)
if state in ('absent') and not config_present:
module.exit_json(changed=False)
if state in ('present'):
data = {'name': name, 'action': action, 'policy-description': policy_description,
'priority': priority, 'duration': duration, 'start-time': start_time,
'delivery-packet-count': delivery_packet_count}
response = rest.put('policy[name="%s"]' % name, data=data)
if response.status_code == 204:
module.exit_json(changed=True)
else:
module.fail_json(msg="error creating policy '{}': {}".format(name, response.json['description']))
if state in ('absent'):
response = rest.delete('policy[name="%s"]' % name, data={})
if response.status_code == 204:
module.exit_json(changed=True)
else:
module.fail_json(msg="error deleting policy '{}': {}".format(name, response.json['description']))
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True),
policy_description=dict(type='str', default=''),
action=dict(choices=['forward', 'drop', 'capture', 'flow-gen'], default='forward'),
priority=dict(type='int', default=100),
duration=dict(type='int', default=0),
start_time=dict(type='str', default=datetime.datetime.now().isoformat() + '+00:00'),
delivery_packet_count=dict(type='int', default=0),
controller=dict(type='str', required=True),
state=dict(choices=['present', 'absent'], default='present'),
validate_certs=dict(type='bool', default='True'),
access_token=dict(type='str', no_log=True)
)
)
try:
policy(module)
except Exception as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
|
ashishbaghudana/mthesis-ashish | refs/heads/develop | resources/tees/ExampleBuilders/FeatureBuilders/EVEXFeatureBuilder.py | 2 | """
EVEX Feature Builder
"""
__version__ = "$Revision: 1.5 $"
from FeatureBuilder import FeatureBuilder
class EVEXFeatureBuilder(FeatureBuilder):
def __init__(self, featureSet):
"""
This is called, when the ExampleBuilder object is created.
@type featureSet: Core.IdSet
@param featureSet: The feature ids
"""
FeatureBuilder.__init__(self, featureSet)
def initSentence(self, sentenceGraph):
"""
This function is called once for each sentence, before any calls to "buildFeatures". It
should be used to initialize per-sentence data structures.
@type sentenceGraph: Core.SentenceGraph
@param sentenceGraph: a SentenceGraph object providing access to the aligned semantic and syntactic
information of the sentence. The underlying XML can also be accessed through
this class.
"""
### Sentence initialization code here ###
pass
def buildEdgeFeatures(self, entity1, entity2, token1, token2, path, sentenceGraph):
"""
This is the main-function for feature generation. It is called once for each
directed entity pair in the sentence.
For defining features, please use the member function "setFeature(self, name, value=1)",
derived from the parent class. This ensures features get correctly tagged, if needed.
@type entity1: cElementTree.Element
@param entity1: First entity of the candidate edge, an Interaction XML "entity"-element
@type entity2: cElementTree.Element
@param entity2: Second entity of the candidate edge, an Interaction XML "entity"-element
@type token1: cElementTree.Element
@param token1: The head token of entity1, an Interaction XML "token"-element
@type token2: cElementTree.Element
@param token2: The head token of entity2, an Interaction XML "token"-element
@type path: list of cElementTree.Elements (when "no_path" style is set, this is always [token1, token2])
@param path: the shortest connecting path of tokens (Interaction XML "token"-elements)
@type sentenceGraph: Core.SentenceGraph
@param sentenceGraph: a SentenceGraph object providing access to the aligned semantic and syntactic
information of the sentence. The underlying XML can also be accessed through
this class.
"""
### Feature generation code here ###
pass
if __name__=="__main__":
"""
The main-function is the test program for the EVEX feature builder. It takes as a parameter an
Interaction XML corpus file, and builds edge-examples using MultiEdgeExampleBuilder. When the
"evex" style parameter is set, MultiEdgeExampleBuilder will call EVEXFeatureBuilder for feature
generation.
"""
import sys
sys.path.append("../..")
from Core.IdSet import IdSet
import Core.ExampleUtils as ExampleUtils
from ExampleBuilders.MultiEdgeExampleBuilder import MultiEdgeExampleBuilder
# Import Psyco if available
try:
import psyco
psyco.full()
print >> sys.stderr, "Found Psyco, using"
except ImportError:
print >> sys.stderr, "Psyco not installed"
from optparse import OptionParser
optparser = OptionParser(usage="%prog [options]\nTest EVEX Feature Builder.")
defaultInput = "/usr/share/biotext/BioNLP2011/data/main-tasks/GE/GE-devel-nodup.xml"
optparser.add_option("-i", "--input", default=defaultInput, dest="input", help="Corpus in analysis format", metavar="FILE")
optparser.add_option("-o", "--output", default="evex-examples.txt", dest="output", help="Output feature file")
optparser.add_option("-d", "--edgeIds", default="evex-ids", dest="edgeIds", help="Example class and feature id file stem (files = STEM.class_names and STEM.feature_names)")
optparser.add_option("-t", "--tokenization", default="split-mccc-preparsed", dest="tokenization", help="tokenization")
optparser.add_option("-p", "--parse", default="split-mccc-preparsed", dest="parse", help="parse")
optparser.add_option("-s", "--styles", default="typed,directed,no_path,no_task,no_dependency,no_linear,entities,genia_limits,noMasking,maxFeatures,evex", dest="edgeStyles", help="")
(options, args) = optparser.parse_args()
assert options.input != None
assert options.output != None
assert options.edgeIds != None
exampleBuilder = MultiEdgeExampleBuilder()
exampleBuilder.run(options.input, options.output, options.parse, options.tokenization, "style:"+options.edgeStyles, options.edgeIds)
|
gf712/AbPyTools | refs/heads/master | abpytools/utils/abpytools_exceptions.py | 1 | class NumberingException(Exception):
pass
|
googleapis/google-cloud-dotnet | refs/heads/master | apis/Google.Cloud.Functions.V1/synth.py | 353 | # GENERATED BY Google.Cloud.Tools.ProjectGenerator - DO NOT EDIT!
import json
import sys
from synthtool import shell
from synthtool import metadata
from pathlib import Path
# generateapis.sh updates synth.metadata itself
metadata.enable_write_metadata(False)
AUTOSYNTH_MULTIPLE_COMMITS = True
# Parent of the script is the API-specific directory
# Parent of the API-specific directory is the apis directory
# Parent of the apis directory is the repo root
root = Path(__file__).parent.parent.parent
package = Path(__file__).parent.name
bash = '/bin/bash'
if sys.platform == 'win32':
bash = 'C:\\Program Files\\Git\\bin\\bash.exe'
shell.run(
(bash, 'generateapis.sh', '--check_compatibility', package),
cwd = root,
hide_output = False)
|
PeterDaveHello/eden | refs/heads/master | modules/s3db/climate.py | 13 | # -*- coding: utf-8 -*-
""" Sahana Eden Climate Model
@copyright: 2011-2015 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3ClimateModel",
"climate_first_run",
)
from gluon import *
from gluon.storage import Storage
from ..s3 import *
# =============================================================================
class S3ClimateModel(S3Model):
"""
Climate data is stored in dynamically created tables.
These tables can be added from the command line script add_table.py
in modules.ClimateDataPortal.
The table definitions are stored in climate_sample_table_spec.
A data is an observed value over a time quantum at a given place.
e.g. observed temperature in Kathmandu between Feb 2006 - April 2007
Places are currently points, i.e. lat/lon coordinates.
Places may be stations.
Places may have elevation or other optional information.
@ToDo: i18n
@ToDo: Deprecate raw SQL (Tested only on PostgreSQL)
"""
names = ("climate_place",
"climate_place_elevation",
"climate_place_station_name",
"climate_place_station_id",
"climate_sample_table_spec",
"climate_monthly_aggregation",
"climate_station_parameter",
"climate_prices",
"climate_purchase",
"climate_save_query",
)
def model(self):
T = current.T
db = current.db
auth = current.auth
NONE = current.messages["NONE"]
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
# ---------------------------------------------------------------------
# Climate Place
#
# This resource is spread over 4 tables, which we assume are linked by
# common IDs
#
# @ToDo: Migrate to gis_location?
# Although this table has many fields unused so a performance hit?
# elevation is not included as it would just mean a performance hit
# when we are generating 2D maps without elevation info.
define_table("climate_place",
Field("longitude", "double",
notnull=True,
required=True,
),
Field("latitude", "double",
notnull=True,
required=True,
)
)
# ---------------------------------------------------------------------
# elevation may not be useful for future projects
# e.g. where not available, or sea-based stations
# also, elevation may be supplied for gridded data
define_table("climate_place_elevation",
Field("elevation_metres", "double",
notnull=True,
required=True,
),
)
# ---------------------------------------------------------------------
# not all places are stations with elevations
# as in the case of "gridded" data
# a station can only be in one place
define_table("climate_place_station_name",
Field("name", "double",
notnull=True,
required=True,
),
)
station_id = S3ReusableField("station_id", "reference %s" % tablename,
sortby="name",
requires = IS_ONE_OF(db,
"climate_place_station_name.id",
climate_station_represent,
orderby="climate_place_station_name.name",
sort=True
),
represent = climate_station_represent,
label = "Station",
ondelete = "RESTRICT"
)
# ---------------------------------------------------------------------
# station id may not be useful or even meaningful
# e.g. gridded data has no stations.
# this is passive data so ok to store separately
define_table("climate_place_station_id",
Field("station_id", "integer",
notnull=True,
required=True,
),
)
# ---------------------------------------------------------------------
# coefficient of variance is meaningless for degrees C but Ok for Kelvin
# internally all scales must be ratio scales if coefficient
# of variations is to be allowed, (which it is)
# rainfall (mm), temp (K) are ok
# output units
define_table("climate_sample_table_spec",
Field("name",
notnull=True,
required=True,
),
Field("sample_type_code",
length = 1,
notnull = True,
# web2py requires a default value for not null fields
default = "",
required = True
),
Field("field_type",
notnull=True,
required=True,
),
Field("units",
notnull=True,
required=True,
),
Field("date_mapping",
default="",
notnull=True,
required=True
),
Field("grid_size", "double",
default = 0,
notnull = True,
required = True
)
)
parameter_id = S3ReusableField("parameter_id", "reference %s" % tablename,
sortby="name",
requires = IS_ONE_OF(db,
"climate_sample_table_spec.id",
sample_table_spec_represent,
sort=True
),
represent = sample_table_spec_represent,
label = "Parameter",
ondelete = "RESTRICT"
)
# ---------------------------------------------------------------------
define_table("climate_monthly_aggregation",
Field("sample_table_id",
db.climate_sample_table_spec,
notnull = True,
required = True
),
# this maps to the name of a python class
# that deals with the monthly aggregated data.
Field("aggregation",
notnull=True,
required=True,
)
)
# ---------------------------------------------------------------------
# Station Parameters
#
tablename = "climate_station_parameter"
define_table(tablename,
station_id(),
parameter_id(requires = IS_ONE_OF(db,
"climate_sample_table_spec.id",
sample_table_spec_represent,
sort=True
),
),
Field.Method("range_from",
climate_station_parameter_range_from),
Field.Method("range_to",
climate_station_parameter_range_to),
)
ADD = T("Add new Station Parameter")
crud_strings[tablename] = Storage(
label_create = ADD,
title_display = T("Station Parameter Details"),
title_list = T("Station Parameters"),
title_update = T("Edit Station Parameter"),
label_list_button = T("List Station Parameters"),
label_delete_button = T("Remove Station Parameter"),
msg_record_created = T("Station Parameter added"),
msg_record_modified = T("Station Parameter updated"),
msg_record_deleted = T("Station Parameter removed"),
msg_list_empty = T("No Station Parameters"))
configure(tablename,
insertable = False,
list_fields = [
"station_id",
"parameter_id",
(T("Range From"), "range_from"),
(T("Range To"), "range_to"),
]
)
# =====================================================================
# Purchase Data
#
nationality_opts = {
1:"Nepali Student",
2:"Others"
}
tablename = "climate_prices"
define_table(tablename,
Field("category", "integer",
label = T("Category"),
requires = IS_IN_SET(nationality_opts),
represent = lambda id: nationality_opts.get(id, NONE),
notnull = True,
required = True
),
parameter_id(
requires = IS_ONE_OF(db,
"climate_sample_table_spec.id",
sample_table_spec_represent,
filterby = "sample_type_code",
filter_opts = ("O",),
sort=True
),
notnull = True,
required = True,
represent = sample_table_spec_represent
),
Field("nrs_per_datum", "double",
label = T("NRs per datum"),
notnull = True,
required = True
)
)
configure(tablename,
create_onvalidation = self.climate_price_create_onvalidation,
list_fields=[
"category",
"parameter_id",
"nrs_per_datum"
]
)
ADD = T("Add new Dataset Price")
crud_strings[tablename] = Storage(
label_create = ADD,
title_display = T("Dataset Price Details"),
title_list = T("Dataset Prices"),
title_update = T("Edit Dataset Price"),
label_list_button = T("List Dataset Prices"),
label_delete_button = T("Remove Dataset Price"),
msg_record_created = T("Dataset Price added"),
msg_record_modified = T("Dataset Price updated"),
msg_record_deleted = T("Dataset Price removed"),
msg_list_empty = T("No Dataset Prices"))
tablename = "climate_purchase"
define_table(tablename,
#user_id(),
#Field("sample_type_code",
# "string",
# requires = IS_IN_SET(sample_type_code_opts),
# represent = lambda code: ClimateDataPortal.sample_table_types_by_code[code]
#),
Field("parameter_id", "integer",
requires = IS_ONE_OF(db,
"climate_prices.parameter_id",
sample_table_spec_represent,
),
represent = sample_table_spec_represent,
label = "Parameter",
ondelete = "RESTRICT"
),
station_id(),
s3_date("date_from",
default = "now",
empty=False
),
s3_date("date_to",
default = "now",
empty=False
),
Field("nationality", "integer",
label = T("Category"),
requires = IS_IN_SET(nationality_opts),
represent = lambda id: nationality_opts.get(id, NONE),
required = True
),
Field("notes", "text",
label = T("Receipt number / Student ID / other notes")
),
Field("price"),
Field("paid", "boolean",
represent = lambda opt: \
opt and "Yes" or "No",
),
Field("i_agree_to_the_terms_and_conditions", "boolean",
required = True,
represent = lambda agrees: agrees and "Yes" or "No",
comment = DIV(_class="stickytip",
_title="%s|%s" % (
T("Important"),
T("Check this box when you have read, "
"understand and agree to the "
"<a href='terms' target='_blank'>"
"terms and conditions"
"</a>."
)
)
)
),
*s3_meta_fields()
)
# @todo: make lazy_table
table = db[tablename]
table.owned_by_user.label = T("User")
system_roles = auth.get_system_roles()
ADMIN = system_roles.ADMIN
if not auth.s3_has_role(ADMIN):
table.paid.writable = False
ADD = T("Purchase New Data")
crud_strings[tablename] = Storage(
label_create = ADD,
title_display = T("Purchased Data Details"),
title_list = T("All Purchased Data"),
title_update = T("Edit Purchased Data"),
label_list_button = T("List Dataset Prices"),
label_delete_button = T("Remove Purchased Data"),
msg_record_created = T("Data Purchase In Process"),
msg_record_modified = T("Purchased Data updated"),
msg_record_deleted = T("Purchased Data removed"),
msg_list_empty = T("No Data Purchased"))
configure(tablename,
onaccept = self.climate_purchase_onaccept,
create_next = URL(args = ["[id]", "read"]),
list_fields=[
"owned_by_user",
"parameter_id",
"station_id",
"date_from",
"date_to",
"nationality",
#"purpose",
"price",
"paid",
"i_agree_to_terms_and_conditions"
]
)
# =====================================================================
# Saved Queries
#
tablename = "climate_save_query"
define_table(tablename,
#user_id(),
Field("description"),
Field("query_definition", "text"),
)
ADD = T("Save Query")
crud_strings[tablename] = Storage(
label_create = ADD,
title_display = T("Saved Query Details"),
title_list = T("Saved Queries"),
title_update = T("Edit Saved Query"),
label_list_button = T("List Saved Queries"),
label_delete_button = T("Remove Saved Query"),
msg_record_created = T("Query Saved"),
msg_record_modified = T("Saved Query updated"),
msg_record_deleted = T("Saved Query removed"),
msg_list_empty = T("No Saved Queries"))
configure(tablename,
listadd = False)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return Storage()
# -------------------------------------------------------------------------
@staticmethod
def climate_price_create_onvalidation(form):
"""
"""
vars = form.request_vars
db = current.db
table = db.climate_prices
query = (table.category == vars["category"]) & \
(table.parameter_id == vars["parameter_id"])
price = db(query).select(table.id,
limitby=(0, 1)).first()
if price is not None:
form.errors["nrs_per_datum"] = [
"There is a conflicting price for the above category and parameter."
]
return False
else:
return True
# -------------------------------------------------------------------------
@staticmethod
def climate_purchase_onaccept(form):
"""
Calculate Price
"""
import ClimateDataPortal
vars = form.vars
id = vars.id
db = current.db
ptable = db.climate_purchase
purchase = db(ptable.id == id).select(ptable.paid,
limitby=(0, 1)).first()
if (purchase and purchase.paid == True):
pass
else:
parameter_id = vars.parameter_id
table = db.climate_sample_table_spec
query = (table.id == parameter_id)
parameter_table = db(query).select(table.id,
table.date_mapping,
limitby=(0, 1)).first()
parameter_table_id = parameter_table.id
date_mapping_name = parameter_table.date_mapping
period = date_mapping_name
date_from = vars.date_from
date_to = vars.date_to
nationality = int(vars.nationality)
table = db.climate_prices
query = (table.category == nationality) & \
(table.parameter_id == parameter_id)
price_row = db(query).select(table.nrs_per_datum,
limitby=(0, 1)).first()
if price_row is None:
form.errors["price"] = ["There is no price set for this data"]
else:
price = price_row.nrs_per_datum
currency = {
1: "%.2f NRs",
2: "US$ %.2f"
}[nationality]
date_mapping = getattr(ClimateDataPortal, date_mapping_name)
start_date_number = date_mapping.date_to_time_period(date_from)
end_date_number = date_mapping.date_to_time_period(date_to)
place_id = int(vars.station_id)
datum_count = db.executesql(
"SELECT COUNT(*) "
"FROM climate_sample_table_%(parameter_table_id)i "
"WHERE place_id = %(place_id)i "
"AND time_period >= %(start_date_number)i "
"AND time_period <= %(end_date_number)i;" % locals()
)[0][0]
ptable[id] = {"price": currency % (datum_count * price)}
# =============================================================================
def climate_station_represent(id, row=None):
"""
"""
if row:
id = row.id
s3db = current.s3db
table = s3db.climate_place_station_id
row_id = db(table.id == id).select(table.station_id,
limitby=(0,1)).first()
table = s3db.climate_place_station_name
row_name = db(table.id == id).select(table.name,
limitby=(0,1)).first()
if row_id and row_id.station_id:
represent = " (%s)" % row_id.station_id
else:
represent = ""
if row_name and row_name.name:
represent = "%s%s" % (row_name.name, represent)
return represent or current.messages["NONE"]
# =============================================================================
def sample_table_spec_represent(id, row=None):
"""
"""
if row:
id = row.id
import ClimateDataPortal
table = current.s3db.climate_sample_table_spec
row = current.db(table.id == id).select(table.name,
table.sample_type_code,
limitby=(0, 1)).first()
if row:
return "%s %s" % (
ClimateDataPortal.sample_table_types_by_code[row.sample_type_code].__name__,
row.name
)
else:
return current.messages["NONE"]
# =============================================================================
def climate_station_parameter_range_from(row):
default = current.messages["NONE"]
if hasattr(row, "climate_station_parameter"):
row = row.climate_station_parameter
try:
parameter_id = row.parameter_id
station_id = row.station_id
except AttributeError:
return default
table = current.s3db.table("climate_sample_table_%s" % parameter_id)
if not table:
return default
date = table.time_period.min()
row = db(table.place_id == station_id).select(date).first()
if row:
date = row[date]
import ClimateDataPortal
year, month = ClimateDataPortal.month_number_to_year_month(date)
return "%s-%s" % (month, year)
else:
return default
# -------------------------------------------------------------------------
def climate_station_parameter_range_to(self):
default = current.messages["NONE"]
if hasattr(row, "climate_station_parameter"):
row = row.climate_station_parameter
try:
parameter_id = row.parameter_id
station_id = row.station_id
except AttributeError:
return default
table = current.s3db.table("climate_sample_table_%s" % parameter_id)
if not table:
return default
date = table.time_period.max()
row = db(table.place_id == station_id).select(date).first()
if row:
date = row[date]
import ClimateDataPortal
year, month = ClimateDataPortal.month_number_to_year_month(date)
return "%s-%s" % (month, year)
else:
return default
# =============================================================================
def climate_first_run():
"""
Called from zzz_1st_run.py
Manual SQL Statements to run after tables are created
"""
errors = []
settings = current.deployment_settings
if settings.get_database_type() != "postgres":
errors.append("Climate unresolved dependency: PostgreSQL required")
try:
import rpy2
except ImportError:
errors.append("""
R is required by the climate data portal to generate charts
To install R: refer to:
http://cran.r-project.org/doc/manuals/R-admin.html
rpy2 is required to interact with python.
To install rpy2, refer to:
http://rpy.sourceforge.net/rpy2/doc-dev/html/overview.html
""")
try:
from Scientific.IO import NetCDF
except ImportError:
errors.append("Climate unresolved dependency: NetCDF required if you want to import readings")
try:
from scipy import stats
except ImportError:
errors.append("Climate unresolved dependency: SciPy required if you want to generate graphs on the map")
if errors:
# Report errors and stop.
prefix = "\n%s: " % current.T("ACTION REQUIRED")
msg = prefix + prefix.join(errors)
current.log.critical(msg)
raise HTTP(500, body=msg)
db = current.db
# Load all stations and parameters
s3db = current.s3db
ptable = s3db.climate_station_parameter
if not db(ptable.id > 0).select(ptable.id,
limitby=(0, 1)):
table = s3db.climate_place_station_name
station_rows = db(table.id > 0).select(table.id)
table = db.climate_sample_table_spec
query = (table.sample_type_code == "O")
for station_row in station_rows:
parameter_rows = db(query).select(table.id)
for parameter_row in parameter_rows:
ptable.insert(
station_id = station_row.id,
parameter_id = parameter_row.id
)
db.executesql(
"ALTER TABLE climate_sample_table_spec"
"ADD CONSTRAINT climate_sample_table_name_sample_type_unique"
"UNIQUE (name, sample_type_code);"
"ALTER TABLE climate_prices"
"ADD CONSTRAINT climate_price_unique"
"UNIQUE (category, parameter_id);"
)
db.commit()
# END =========================================================================
|
ossdemura/django-miniblog | refs/heads/dev | Scripts/viewer.py | 1 | #!c:\users\juan.digicash\documents\workspace-django\miblog\scripts\python.exe
#
# The Python Imaging Library
# $Id$
#
from __future__ import print_function
import sys
if sys.version_info[0] > 2:
import tkinter
else:
import Tkinter as tkinter
from PIL import Image, ImageTk
#
# an image viewer
class UI(tkinter.Label):
def __init__(self, master, im):
if im.mode == "1":
# bitmap image
self.image = ImageTk.BitmapImage(im, foreground="white")
tkinter.Label.__init__(self, master, image=self.image, bd=0,
bg="black")
else:
# photo image
self.image = ImageTk.PhotoImage(im)
tkinter.Label.__init__(self, master, image=self.image, bd=0)
#
# script interface
if __name__ == "__main__":
if not sys.argv[1:]:
print("Syntax: python viewer.py imagefile")
sys.exit(1)
filename = sys.argv[1]
root = tkinter.Tk()
root.title(filename)
im = Image.open(filename)
UI(root, im).pack()
root.mainloop()
|
yakky/django | refs/heads/master | django/core/wsgi.py | 161 | import django
from django.core.handlers.wsgi import WSGIHandler
def get_wsgi_application():
"""
The public interface to Django's WSGI support. Should return a WSGI
callable.
Allows us to avoid making django.core.handlers.WSGIHandler public API, in
case the internal WSGI implementation changes or moves in the future.
"""
django.setup(set_prefix=False)
return WSGIHandler()
|
arrayexpress/ae_auto | refs/heads/master | utils/maintenance/__init__.py | 34 | __author__ = 'Ahmed G. Ali'
|
breathe/ansible | refs/heads/devel | examples/scripts/uptime.py | 278 | #!/usr/bin/python
# (c) 2012, Michael DeHaan <[email protected]>
# example of getting the uptime of all hosts, 10 at a time
import ansible.runner
import sys
# construct the ansible runner and execute on all hosts
results = ansible.runner.Runner(
pattern='*', forks=10,
module_name='command', module_args='/usr/bin/uptime',
).run()
if results is None:
print "No hosts found"
sys.exit(1)
print "UP ***********"
for (hostname, result) in results['contacted'].items():
if not 'failed' in result:
print "%s >>> %s" % (hostname, result['stdout'])
print "FAILED *******"
for (hostname, result) in results['contacted'].items():
if 'failed' in result:
print "%s >>> %s" % (hostname, result['msg'])
print "DOWN *********"
for (hostname, result) in results['dark'].items():
print "%s >>> %s" % (hostname, result)
|
mohamedattahri/python-docx | refs/heads/master | features/steps/image.py | 10 | # encoding: utf-8
"""
Step implementations for image characterization features
"""
from __future__ import absolute_import, print_function, unicode_literals
from behave import given, then, when
from docx.image.image import Image
from helpers import test_file
# given ===================================================
@given('the image file \'{filename}\'')
def given_image_filename(context, filename):
context.image_path = test_file(filename)
# when ====================================================
@when('I construct an image using the image path')
def when_construct_image_using_path(context):
context.image = Image.from_file(context.image_path)
# then ====================================================
@then('the image has content type \'{mime_type}\'')
def then_image_has_content_type(context, mime_type):
content_type = context.image.content_type
assert content_type == mime_type, (
"expected MIME type '%s', got '%s'" % (mime_type, content_type)
)
@then('the image has {horz_dpi_str} horizontal dpi')
def then_image_has_horizontal_dpi(context, horz_dpi_str):
expected_horz_dpi = int(horz_dpi_str)
horz_dpi = context.image.horz_dpi
assert horz_dpi == expected_horz_dpi, (
"expected horizontal dpi %d, got %d" % (expected_horz_dpi, horz_dpi)
)
@then('the image has {vert_dpi_str} vertical dpi')
def then_image_has_vertical_dpi(context, vert_dpi_str):
expected_vert_dpi = int(vert_dpi_str)
vert_dpi = context.image.vert_dpi
assert vert_dpi == expected_vert_dpi, (
"expected vertical dpi %d, got %d" % (expected_vert_dpi, vert_dpi)
)
@then('the image is {px_height_str} pixels high')
def then_image_is_cx_pixels_high(context, px_height_str):
expected_px_height = int(px_height_str)
px_height = context.image.px_height
assert px_height == expected_px_height, (
"expected pixel height %d, got %d" % (expected_px_height, px_height)
)
@then('the image is {px_width_str} pixels wide')
def then_image_is_cx_pixels_wide(context, px_width_str):
expected_px_width = int(px_width_str)
px_width = context.image.px_width
assert px_width == expected_px_width, (
"expected pixel width %d, got %d" % (expected_px_width, px_width)
)
|
DIT-Tools/drqueue | refs/heads/master | etc/lightwave_sg.py | 4 | #
# THIS IS A PYTHON SCRIPT FILE
#
# Default configuration for Lightwave script generator
#
# Python variables
# SCENE, PROJECTDIR, CONFIGDIR, RF_OWNER, FFORMAT, RESX, RESY, CAMERA
#
# shell variables
# DRQUEUE_BLOCKSIZE, DRQUEUE_COMPID, DRQUEUE_ENDFRAME, DRQUEUE_ETC, DRQUEUE_FRAME,
# DRQUEUE_JOBID, DRQUEUE_JOBNAME, DRQUEUE_OS, DRQUEUE_OWNER, DRQUEUE_PADFRAME,
# DRQUEUE_PADFRAMES, DRQUEUE_STARTFRAME, DRQUEUE_STEPFRAME
#
#
# For platform dependend environment setting a form like this
# can be used :
#
# if DRQUEUE_OS == "LINUX":
# # Environment for Linux
# elsif DRQUEUE_OS == "IRIX":
# # Environment for Irix
# else
# # Some error messages
#
import os,signal,subprocess,sys
os.umask(0)
# fetch DrQueue environment
DRQUEUE_BLOCKSIZE = int(os.getenv("DRQUEUE_BLOCKSIZE"))
DRQUEUE_COMPID = int(os.getenv("DRQUEUE_COMPID"))
DRQUEUE_ENDFRAME = int(os.getenv("DRQUEUE_ENDFRAME"))
DRQUEUE_ETC = os.getenv("DRQUEUE_ETC")
DRQUEUE_FRAME = int(os.getenv("DRQUEUE_FRAME"))
DRQUEUE_JOBID = int(os.getenv("DRQUEUE_JOBID"))
DRQUEUE_JOBNAME = os.getenv("DRQUEUE_JOBNAME")
DRQUEUE_OS = os.getenv("DRQUEUE_OS")
DRQUEUE_OWNER = os.getenv("DRQUEUE_OWNER")
DRQUEUE_PADFRAME = int(os.getenv("DRQUEUE_PADFRAME"))
DRQUEUE_PADFRAMES = int(os.getenv("DRQUEUE_PADFRAMES"))
DRQUEUE_STARTFRAME = int(os.getenv("DRQUEUE_STARTFRAME"))
DRQUEUE_STEPFRAME = int(os.getenv("DRQUEUE_STEPFRAME"))
if DRQUEUE_OS == "WINDOWS":
# convert to windows path with drive letter
SCENE = subprocess.Popen(["cygpath.exe", "-w "+SCENE], stdout=subprocess.PIPE).communicate()[0]
PROJECTDIR = subprocess.Popen(["cygpath.exe", "-w "+PROJECTDIR], stdout=subprocess.PIPE).communicate()[0]
CONFIGDIR = subprocess.Popen(["cygpath.exe", "-w "+CONFIGDIR], stdout=subprocess.PIPE).communicate()[0]
BLOCK = DRQUEUE_FRAME + DRQUEUE_BLOCKSIZE - 1
if BLOCK > DRQUEUE_ENDFRAME:
BLOCK = DRQUEUE_ENDFRAME
ENGINE_PATH="lwsn"
command = ENGINE_PATH+" -3 -c "+CONFIGDIR+" -d "+PROJECTDIR+" -q "+SCENE+" "+str(DRQUEUE_FRAME)+" "+str(BLOCK)+" "+str(DRQUEUE_STEPFRAME)
print(command)
sys.stdout.flush()
p = subprocess.Popen(command, shell=True)
sts = os.waitpid(p.pid, 0)
# This should requeue the frame if failed
if sts[1] != 0:
print("Requeueing frame...")
os.kill(os.getppid(), signal.SIGINT)
exit(1)
else:
#if DRQUEUE_OS != "WINDOWS" then:
# The frame was rendered properly
# We don't know the output image name. If we knew we could set this correctly
# chown_block RF_OWNER RD/IMAGE DRQUEUE_FRAME BLOCK
# change userid and groupid
#chown 1002:1004 $SCENE:h/*
print("Finished.")
#
# Notice that the exit code of the last command is received by DrQueue
#
|
akhilaananthram/nupic | refs/heads/master | tests/unit/nupic/utils_test.py | 7 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013-2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for utils module."""
import pickle
import tempfile
import unittest
from nupic.utils import MovingAverage
# Import capnp to force import hook
import capnp
from nupic.movingaverage_capnp import MovingAverageProto
class UtilsTest(unittest.TestCase):
"""testing common.utils"""
def testMovingAverage(self):
"""
Test that the (internal) moving average maintains the averages correctly,
even for null initial condition and when the number of values goes over
windowSize. Pass in integers and floats.
"""
historicalValues = []
total = 0
windowSize = 3
newAverage, historicalValues, total = (
MovingAverage.compute(historicalValues, total, 3, windowSize)
)
self.assertEqual(newAverage, 3.0)
self.assertEqual(historicalValues, [3.0])
self.assertEqual(total, 3.0)
newAverage, historicalValues, total = (
MovingAverage.compute(historicalValues, total, 4, windowSize)
)
self.assertEqual(newAverage, 3.5)
self.assertListEqual(historicalValues, [3.0, 4.0])
self.assertEqual(total, 7.0)
newAverage, historicalValues, total = (
MovingAverage.compute(historicalValues, total, 5.0, windowSize)
)
self.assertEqual(newAverage, 4.0)
self.assertListEqual(historicalValues, [3.0, 4.0, 5.0])
self.assertEqual(total, 12.0)
# Ensure the first value gets popped
newAverage, historicalValues, total = (
MovingAverage.compute(historicalValues, total, 6.0, windowSize)
)
self.assertEqual(newAverage, 5.0)
self.assertListEqual(historicalValues, [4.0, 5.0, 6.0])
self.assertEqual(total, 15.0)
def testMovingAverageInstance(self):
"""
Test that the (internal) moving average maintains the averages correctly,
even for null initial condition and when the number of values goes over
windowSize. Pass in integers and floats.
this is for the instantce method next()
"""
ma = MovingAverage(windowSize=3)
newAverage = ma.next(3)
self.assertEqual(newAverage, 3.0)
self.assertListEqual(ma.getSlidingWindow(), [3.0])
self.assertEqual(ma.total, 3.0)
newAverage = ma.next(4)
self.assertEqual(newAverage, 3.5)
self.assertListEqual(ma.getSlidingWindow(), [3.0, 4.0])
self.assertEqual(ma.total, 7.0)
newAverage = ma.next(5)
self.assertEqual(newAverage, 4.0)
self.assertListEqual(ma.getSlidingWindow(), [3.0, 4.0, 5.0])
self.assertEqual(ma.total, 12.0)
# Ensure the first value gets popped
newAverage = ma.next(6)
self.assertEqual(newAverage, 5.0)
self.assertListEqual(ma.getSlidingWindow(), [4.0, 5.0, 6.0])
self.assertEqual(ma.total, 15.0)
def testMovingAverageSlidingWindowInit(self):
"""
Test the slidingWindow value is correctly assigned when initializing a
new MovingAverage object.
"""
# With exisiting historical values; same values as tested in testMovingAverage()
ma = MovingAverage(windowSize=3, existingHistoricalValues=[3.0, 4.0, 5.0])
self.assertListEqual(ma.getSlidingWindow(), [3.0, 4.0, 5.0])
# Withoout exisiting historical values
ma = MovingAverage(windowSize=3)
self.assertListEqual(ma.getSlidingWindow(), [])
def testMovingAverageReadWrite(self):
ma = MovingAverage(windowSize=3)
ma.next(3)
ma.next(4)
ma.next(5)
proto1 = MovingAverageProto.new_message()
ma.write(proto1)
# Write the proto to a temp file and read it back into a new proto
with tempfile.TemporaryFile() as f:
proto1.write(f)
f.seek(0)
proto2 = MovingAverageProto.read(f)
resurrectedMa = MovingAverage.read(proto2)
newAverage = ma.next(6)
self.assertEqual(newAverage, resurrectedMa.next(6))
self.assertListEqual(ma.getSlidingWindow(),
resurrectedMa.getSlidingWindow())
self.assertEqual(ma.total, resurrectedMa.total)
self.assertTrue(ma, resurrectedMa) #using the __eq__ method
def testSerialization(self):
"""serialization using pickle"""
ma = MovingAverage(windowSize=3)
ma.next(3)
ma.next(4)
ma.next(5)
stored = pickle.dumps(ma)
restored = pickle.loads(stored)
self.assertEqual(restored, ma)
self.assertEqual(ma.next(6), restored.next(6))
def testEquals(self):
ma = MovingAverage(windowSize=3)
maP = MovingAverage(windowSize=3)
self.assertEqual(ma, maP)
maN = MovingAverage(windowSize=10)
self.assertNotEqual(ma, maN)
ma = MovingAverage(windowSize=2, existingHistoricalValues=[3.0, 4.0, 5.0])
maP = MovingAverage(windowSize=2, existingHistoricalValues=[3.0, 4.0, 5.0])
self.assertEqual(ma, maP)
maP.next(6)
self.assertNotEqual(ma, maP)
ma.next(6)
self.assertEqual(ma, maP)
if __name__ == "__main__":
unittest.main()
|
lichengshuang/createvhost | refs/heads/master | python/others/System/Threads/thread-alt.py | 2 | #!/usr/bin/env python3
import _thread
def action(i):
print(i ** 32)
class Power:
def __int__(self, i):
self.i = i
def action(self):
print(self.i ** 32)
_thread.start_new_thread(action, (2,))
_thread.start_new_thread((lambda: action(2)), ())
obj = Power()
_thread.start_new_thread(obj.action, (2,))
|
madduck/reclass | refs/heads/master | reclass/datatypes/entity.py | 4 | #
# -*- coding: utf-8 -*-
#
# This file is part of reclass (http://github.com/madduck/reclass)
#
# Copyright © 2007–14 martin f. krafft <[email protected]>
# Released under the terms of the Artistic Licence 2.0
#
from classes import Classes
from applications import Applications
from parameters import Parameters
class Entity(object):
'''
A collection of Classes, Parameters, and Applications, mainly as a wrapper
for merging. The name and uri of an Entity will be updated to the name and
uri of the Entity that is being merged.
'''
def __init__(self, classes=None, applications=None, parameters=None,
uri=None, name=None, environment=None):
if classes is None: classes = Classes()
self._set_classes(classes)
if applications is None: applications = Applications()
self._set_applications(applications)
if parameters is None: parameters = Parameters()
self._set_parameters(parameters)
self._uri = uri or ''
self._name = name or ''
self._environment = environment or ''
name = property(lambda s: s._name)
uri = property(lambda s: s._uri)
environment = property(lambda s: s._environment)
classes = property(lambda s: s._classes)
applications = property(lambda s: s._applications)
parameters = property(lambda s: s._parameters)
def _set_classes(self, classes):
if not isinstance(classes, Classes):
raise TypeError('Entity.classes cannot be set to '\
'instance of type %s' % type(classes))
self._classes = classes
def _set_applications(self, applications):
if not isinstance(applications, Applications):
raise TypeError('Entity.applications cannot be set to '\
'instance of type %s' % type(applications))
self._applications = applications
def _set_parameters(self, parameters):
if not isinstance(parameters, Parameters):
raise TypeError('Entity.parameters cannot be set to '\
'instance of type %s' % type(parameters))
self._parameters = parameters
def merge(self, other):
self._classes.merge_unique(other._classes)
self._applications.merge_unique(other._applications)
self._parameters.merge(other._parameters)
self._name = other.name
self._uri = other.uri
self._environment = other.environment
def interpolate(self):
self._parameters.interpolate()
def __eq__(self, other):
return isinstance(other, type(self)) \
and self._applications == other._applications \
and self._classes == other._classes \
and self._parameters == other._parameters \
and self._name == other._name \
and self._uri == other._uri
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s(%r, %r, %r, uri=%r, name=%r)" % (self.__class__.__name__,
self.classes,
self.applications,
self.parameters,
self.uri,
self.name)
def as_dict(self):
return {'classes': self._classes.as_list(),
'applications': self._applications.as_list(),
'parameters': self._parameters.as_dict(),
'environment': self._environment
}
|
emijrp/youtube-dl | refs/heads/master | youtube_dl/extractor/dctp.py | 124 | # encoding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_str
class DctpTvIE(InfoExtractor):
_VALID_URL = r'http://www.dctp.tv/(#/)?filme/(?P<id>.+?)/$'
_TEST = {
'url': 'http://www.dctp.tv/filme/videoinstallation-fuer-eine-kaufhausfassade/',
'info_dict': {
'id': '1324',
'display_id': 'videoinstallation-fuer-eine-kaufhausfassade',
'ext': 'flv',
'title': 'Videoinstallation für eine Kaufhausfassade'
},
'params': {
# rtmp download
'skip_download': True,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
base_url = 'http://dctp-ivms2-restapi.s3.amazonaws.com/'
version_json = self._download_json(
base_url + 'version.json',
video_id, note='Determining file version')
version = version_json['version_name']
info_json = self._download_json(
'{0}{1}/restapi/slugs/{2}.json'.format(base_url, version, video_id),
video_id, note='Fetching object ID')
object_id = compat_str(info_json['object_id'])
meta_json = self._download_json(
'{0}{1}/restapi/media/{2}.json'.format(base_url, version, object_id),
video_id, note='Downloading metadata')
uuid = meta_json['uuid']
title = meta_json['title']
wide = meta_json['is_wide']
if wide:
ratio = '16x9'
else:
ratio = '4x3'
play_path = 'mp4:{0}_dctp_0500_{1}.m4v'.format(uuid, ratio)
servers_json = self._download_json(
'http://www.dctp.tv/streaming_servers/',
video_id, note='Downloading server list')
url = servers_json[0]['endpoint']
return {
'id': object_id,
'title': title,
'format': 'rtmp',
'url': url,
'play_path': play_path,
'rtmp_real_time': True,
'ext': 'flv',
'display_id': video_id
}
|
TAlonglong/trollduction-test | refs/heads/develop-ws2016 | trollduction/tests/test_trollduction.py | 2 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2014 Martin Raspaud
# Author(s):
# Martin Raspaud <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Test trollduction.py
"""
import unittest
xmlstuff = """<?xml version="1.0" encoding='utf-8'?>
<?xml-stylesheet type="text/xsl" href="prodlist2.xsl"?>
<!-- This config is used by Trollduction.-->
<product_config>
<metadata>
<platform>noaa</platform>
<number>15</number>
</metadata>
<common>
<output_dir>/tmp</output_dir>
</common>
<variables>
<path id="local_sir">/local_disk/data/sir</path>
<path id="sir">/local_disk/data/out/sir</path>
<path id="rgb">/local_disk/data/out/rgb</path>
<path id="tmp">/tmp</path>
</variables>
<product_list>
<!-- dump to netcdf -->
<!-- calibrated, satellite projection -->
<dump>
<file format="netcdf4">{time:%Y%m%d_%H%M}_{platform}{satnumber}.nc</file>
</dump>
<area id="eurol" name="Europe_large">
<!-- Generate the product only if sun is above the horizon at the
defined longitude/latitude -->
<product id="overview" name="overview" sunzen_day_maximum="90" sunzen_lonlat="25, 60">
<file output_dir="tmp">{time:%Y%m%d_%H%M}_{platform}{satnumber}_{areaname}_{composite}.png</file>
</product>
<product id="natural" name="dnc" sunzen_day_maximum="90" sunzen_lonlat="25, 60">
<file>{time:%Y%m%d_%H%M}_{platform}{satnumber}_{areaname}_{composite}.png</file>
</product>
<product id="green_snow" name="green_snow" sunzen_day_maximum="90" sunzen_lonlat="25, 60">
<file>{time:%Y%m%d_%H%M}_{platform}{satnumber}_{areaname}_{composite}.png</file>
</product>
<product id="red_snow" name="red_snow" sunzen_day_maximum="90" sunzen_lonlat="25, 60">
<file format="png">{time:%Y%m%d_%H%M}_{platform}{satnumber}_{areaname}_{composite}.png</file>
</product>
<product id="cloudtop" name="cloudtop">
<file format="png">{time:%Y%m%d_%H%M}_{platform}{satnumber}_{areaname}_{composite}.png</file>
</product>
<!-- Generate only if the Sun is below the horizon -->
<product id="night_overview" name="night_overview" sunzen_night_minimum="90" sunzen_lonlat="25, 60">
<file format="png">{time:%Y%m%d_%H%M}_{platform}{satnumber}_{areaname}_{composite}.png</file>
</product>
<product id="night_fog" name="night_fog" sunzen_night_minimum="90" sunzen_lonlat="25, 60">
<file>{time:%Y%m%d_%H%M}_{platform}{satnumber}_{areaname}_{composite}.png</file>
</product>
</area>
</product_list>
</product_config>
"""
msg = 'pytroll://AAPP-HRPT/1b/norrköping/utv/polar/direct_readout/ file [email protected] 2014-10-08T11:06:36.185553 v1.01 application/json {"satellite": "NOAA 19", "format": "AAPP-HRPT", "start_time": "2014-10-08T10:50:37.848000", "level": "1b", "orbit_number": "29197", "uri": "ssh://c20035.ad.smhi.se//local_disk/data/satellite/polar/noaa19_20100224_1129_05402/hrpt_noaa19_20100224_1129_05402.l1b", "filename": "hrpt_noaa19_20100224_1129_05402.l1", "instrument": "avhrr", "end_time": "2014-10-08T11:04:37.848000", "type": "Binary"}'
from StringIO import StringIO
from posttroll.message import Message
from mock import MagicMock, patch
import time
from datetime import datetime
class TestDataProcessor(unittest.TestCase):
def setUp(self):
self.mock = MagicMock()
self.module_patcher = patch.dict('sys.modules', {'netCDF4': self.mock})
self.module_patcher.start()
def tearDown(self):
self.module_patcher.stop()
@patch('mpop.satout.cfscene.CFScene')
@patch('trollsift.Parser')
@patch('mpop.satellites.GenericFactory')
def test_run(self, GF, parser, cfs):
pass
# from trollduction.producer import DataProcessor
# from trollduction.xml_read import ProductList
# pconfig = ProductList(StringIO(xmlstuff))
# dproc = DataProcessor()
# dproc.writer.stop()
# time.sleep(1)
# dproc.writer = MagicMock()
# dproc.draw_images = MagicMock()
# dproc.run(pconfig, Message(rawstr=msg))
# GF.create_scene.assert_called_once_with(instrument='avhrr',
# satname='noaa',
# variant='',
# time_slot=datetime(
# 2014, 10, 8, 10, 50, 37, 848000),
# orbit='29197',
# satnumber='19')
# cfs.assert_any_call(GF.create_scene.return_value)
def suite():
"""The suite for test_trollduction
"""
loader = unittest.TestLoader()
mysuite = unittest.TestSuite()
mysuite.addTest(loader.loadTestsFromTestCase(TestDataProcessor))
return mysuite
|
dbcollection/dbcollection | refs/heads/master | tests/functional/load/coco_detection.py | 2 | #!/usr/bin/env python3
"""
Test loading coco.
"""
import os
from dbcollection.utils.test import TestBaseDB
# setup
name = 'coco'
task = 'detection_2015'
data_dir = os.path.join(os.path.expanduser("~"), 'tmp', 'download_data')
verbose = True
# Run tester
tester = TestBaseDB(name, task, data_dir, verbose)
tester.run('load') |
jelugbo/ddi | refs/heads/master | lms/djangoapps/django_comment_client/forum/views.py | 5 | import json
import logging
import xml.sax.saxutils as saxutils
from django.contrib.auth.decorators import login_required
from django.core.context_processors import csrf
from django.contrib.auth.models import User
from django.http import Http404, HttpResponseBadRequest
from django.views.decorators.http import require_GET
import newrelic.agent
from edxmako.shortcuts import render_to_response
from courseware.courses import get_course_with_access
from course_groups.cohorts import is_course_cohorted, get_cohort_id, get_course_cohorts, is_commentable_cohorted
from courseware.access import has_access
from django_comment_client.permissions import cached_has_permission
from django_comment_client.utils import (
merge_dict,
extract,
strip_none,
add_courseware_context,
get_group_id_for_comments_service
)
import django_comment_client.utils as utils
import lms.lib.comment_client as cc
from opaque_keys.edx.locations import SlashSeparatedCourseKey
THREADS_PER_PAGE = 20
INLINE_THREADS_PER_PAGE = 20
PAGES_NEARBY_DELTA = 2
log = logging.getLogger("edx.discussions")
def _attr_safe_json(obj):
"""
return a JSON string for obj which is safe to embed as the value of an attribute in a DOM node
"""
return saxutils.escape(json.dumps(obj), {'"': '"'})
@newrelic.agent.function_trace()
def make_course_settings(course):
"""
Generate a JSON-serializable model for course settings, which will be used to initialize a
DiscussionCourseSettings object on the client.
"""
obj = {
'is_cohorted': is_course_cohorted(course.id),
'allow_anonymous': course.allow_anonymous,
'allow_anonymous_to_peers': course.allow_anonymous_to_peers,
'cohorts': [{"id": str(g.id), "name": g.name} for g in get_course_cohorts(course)],
'category_map': utils.get_discussion_category_map(course)
}
return obj
@newrelic.agent.function_trace()
def get_threads(request, course_key, discussion_id=None, per_page=THREADS_PER_PAGE):
"""
This may raise an appropriate subclass of cc.utils.CommentClientError
if something goes wrong, or ValueError if the group_id is invalid.
"""
default_query_params = {
'page': 1,
'per_page': per_page,
'sort_key': 'date',
'sort_order': 'desc',
'text': '',
'commentable_id': discussion_id,
'course_id': course_key.to_deprecated_string(),
'user_id': request.user.id,
'group_id': get_group_id_for_comments_service(request, course_key, discussion_id), # may raise ValueError
}
if not request.GET.get('sort_key'):
# If the user did not select a sort key, use their last used sort key
cc_user = cc.User.from_django_user(request.user)
cc_user.retrieve()
# TODO: After the comment service is updated this can just be user.default_sort_key because the service returns the default value
default_query_params['sort_key'] = cc_user.get('default_sort_key') or default_query_params['sort_key']
else:
# If the user clicked a sort key, update their default sort key
cc_user = cc.User.from_django_user(request.user)
cc_user.default_sort_key = request.GET.get('sort_key')
cc_user.save()
#there are 2 dimensions to consider when executing a search with respect to group id
#is user a moderator
#did the user request a group
query_params = merge_dict(
default_query_params,
strip_none(
extract(
request.GET,
[
'page',
'sort_key',
'sort_order',
'text',
'commentable_ids',
'flagged',
'unread',
'unanswered',
]
)
)
)
threads, page, num_pages, corrected_text = cc.Thread.search(query_params)
for thread in threads:
#patch for backward compatibility to comments service
if not 'pinned' in thread:
thread['pinned'] = False
query_params['page'] = page
query_params['num_pages'] = num_pages
query_params['corrected_text'] = corrected_text
return threads, query_params
@login_required
def inline_discussion(request, course_id, discussion_id):
"""
Renders JSON for DiscussionModules
"""
nr_transaction = newrelic.agent.current_transaction()
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'load_forum', course_key)
cc_user = cc.User.from_django_user(request.user)
user_info = cc_user.to_dict()
try:
threads, query_params = get_threads(request, course_key, discussion_id, per_page=INLINE_THREADS_PER_PAGE)
except ValueError:
return HttpResponseBadRequest("Invalid group_id")
with newrelic.agent.FunctionTrace(nr_transaction, "get_metadata_for_threads"):
annotated_content_info = utils.get_metadata_for_threads(course_key, threads, request.user, user_info)
is_staff = cached_has_permission(request.user, 'openclose_thread', course.id)
threads = [utils.prepare_content(thread, course_key, is_staff) for thread in threads]
with newrelic.agent.FunctionTrace(nr_transaction, "add_courseware_context"):
add_courseware_context(threads, course)
return utils.JsonResponse({
'discussion_data': threads,
'user_info': user_info,
'annotated_content_info': annotated_content_info,
'page': query_params['page'],
'num_pages': query_params['num_pages'],
'roles': utils.get_role_ids(course_key),
'course_settings': make_course_settings(course)
})
@login_required
def forum_form_discussion(request, course_id):
"""
Renders the main Discussion page, potentially filtered by a search query
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
nr_transaction = newrelic.agent.current_transaction()
course = get_course_with_access(request.user, 'load_forum', course_key, check_if_enrolled=True)
course_settings = make_course_settings(course)
user = cc.User.from_django_user(request.user)
user_info = user.to_dict()
try:
unsafethreads, query_params = get_threads(request, course_key) # This might process a search query
is_staff = cached_has_permission(request.user, 'openclose_thread', course.id)
threads = [utils.prepare_content(thread, course_key, is_staff) for thread in unsafethreads]
except cc.utils.CommentClientMaintenanceError:
log.warning("Forum is in maintenance mode")
return render_to_response('discussion/maintenance.html', {})
except ValueError:
return HttpResponseBadRequest("Invalid group_id")
with newrelic.agent.FunctionTrace(nr_transaction, "get_metadata_for_threads"):
annotated_content_info = utils.get_metadata_for_threads(course_key, threads, request.user, user_info)
with newrelic.agent.FunctionTrace(nr_transaction, "add_courseware_context"):
add_courseware_context(threads, course)
if request.is_ajax():
return utils.JsonResponse({
'discussion_data': threads, # TODO: Standardize on 'discussion_data' vs 'threads'
'annotated_content_info': annotated_content_info,
'num_pages': query_params['num_pages'],
'page': query_params['page'],
'corrected_text': query_params['corrected_text'],
})
else:
with newrelic.agent.FunctionTrace(nr_transaction, "get_cohort_info"):
user_cohort_id = get_cohort_id(request.user, course_key)
context = {
'csrf': csrf(request)['csrf_token'],
'course': course,
#'recent_active_threads': recent_active_threads,
'staff_access': has_access(request.user, 'staff', course),
'threads': _attr_safe_json(threads),
'thread_pages': query_params['num_pages'],
'user_info': _attr_safe_json(user_info),
'flag_moderator': cached_has_permission(request.user, 'openclose_thread', course.id) or has_access(request.user, 'staff', course),
'annotated_content_info': _attr_safe_json(annotated_content_info),
'course_id': course.id.to_deprecated_string(),
'roles': _attr_safe_json(utils.get_role_ids(course_key)),
'is_moderator': cached_has_permission(request.user, "see_all_cohorts", course_key),
'cohorts': course_settings["cohorts"], # still needed to render _thread_list_template
'user_cohort': user_cohort_id, # read from container in NewPostView
'is_course_cohorted': is_course_cohorted(course_key), # still needed to render _thread_list_template
'sort_preference': user.default_sort_key,
'category_map': course_settings["category_map"],
'course_settings': _attr_safe_json(course_settings)
}
# print "start rendering.."
return render_to_response('discussion/index.html', context)
@require_GET
@login_required
def single_thread(request, course_id, discussion_id, thread_id):
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
nr_transaction = newrelic.agent.current_transaction()
course = get_course_with_access(request.user, 'load_forum', course_key)
course_settings = make_course_settings(course)
cc_user = cc.User.from_django_user(request.user)
user_info = cc_user.to_dict()
is_moderator = cached_has_permission(request.user, "see_all_cohorts", course_key)
# Currently, the front end always loads responses via AJAX, even for this
# page; it would be a nice optimization to avoid that extra round trip to
# the comments service.
try:
thread = cc.Thread.find(thread_id).retrieve(
recursive=request.is_ajax(),
user_id=request.user.id,
response_skip=request.GET.get("resp_skip"),
response_limit=request.GET.get("resp_limit")
)
except cc.utils.CommentClientRequestError as e:
if e.status_code == 404:
raise Http404
raise
# verify that the thread belongs to the requesting student's cohort
if is_commentable_cohorted(course_key, discussion_id) and not is_moderator:
user_group_id = get_cohort_id(request.user, course_key)
if getattr(thread, "group_id", None) is not None and user_group_id != thread.group_id:
raise Http404
is_staff = cached_has_permission(request.user, 'openclose_thread', course.id)
if request.is_ajax():
with newrelic.agent.FunctionTrace(nr_transaction, "get_annotated_content_infos"):
annotated_content_info = utils.get_annotated_content_infos(course_key, thread, request.user, user_info=user_info)
content = utils.prepare_content(thread.to_dict(), course_key, is_staff)
with newrelic.agent.FunctionTrace(nr_transaction, "add_courseware_context"):
add_courseware_context([content], course)
return utils.JsonResponse({
'content': content,
'annotated_content_info': annotated_content_info,
})
else:
try:
threads, query_params = get_threads(request, course_key)
except ValueError:
return HttpResponseBadRequest("Invalid group_id")
threads.append(thread.to_dict())
with newrelic.agent.FunctionTrace(nr_transaction, "add_courseware_context"):
add_courseware_context(threads, course)
for thread in threads:
#patch for backward compatibility with comments service
if not "pinned" in thread:
thread["pinned"] = False
threads = [utils.prepare_content(thread, course_key, is_staff) for thread in threads]
with newrelic.agent.FunctionTrace(nr_transaction, "get_metadata_for_threads"):
annotated_content_info = utils.get_metadata_for_threads(course_key, threads, request.user, user_info)
with newrelic.agent.FunctionTrace(nr_transaction, "get_cohort_info"):
user_cohort = get_cohort_id(request.user, course_key)
context = {
'discussion_id': discussion_id,
'csrf': csrf(request)['csrf_token'],
'init': '', # TODO: What is this?
'user_info': _attr_safe_json(user_info),
'annotated_content_info': _attr_safe_json(annotated_content_info),
'course': course,
#'recent_active_threads': recent_active_threads,
'course_id': course.id.to_deprecated_string(), # TODO: Why pass both course and course.id to template?
'thread_id': thread_id,
'threads': _attr_safe_json(threads),
'roles': _attr_safe_json(utils.get_role_ids(course_key)),
'is_moderator': is_moderator,
'thread_pages': query_params['num_pages'],
'is_course_cohorted': is_course_cohorted(course_key),
'flag_moderator': cached_has_permission(request.user, 'openclose_thread', course.id) or has_access(request.user, 'staff', course),
'cohorts': course_settings["cohorts"],
'user_cohort': user_cohort,
'sort_preference': cc_user.default_sort_key,
'category_map': course_settings["category_map"],
'course_settings': _attr_safe_json(course_settings)
}
return render_to_response('discussion/index.html', context)
@require_GET
@login_required
def user_profile(request, course_id, user_id):
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
nr_transaction = newrelic.agent.current_transaction()
#TODO: Allow sorting?
course = get_course_with_access(request.user, 'load_forum', course_key)
try:
query_params = {
'page': request.GET.get('page', 1),
'per_page': THREADS_PER_PAGE, # more than threads_per_page to show more activities
}
try:
group_id = get_group_id_for_comments_service(request, course_key)
except ValueError:
return HttpResponseBadRequest("Invalid group_id")
if group_id is not None:
query_params['group_id'] = group_id
profiled_user = cc.User(id=user_id, course_id=course_key, group_id=group_id)
else:
profiled_user = cc.User(id=user_id, course_id=course_key)
threads, page, num_pages = profiled_user.active_threads(query_params)
query_params['page'] = page
query_params['num_pages'] = num_pages
user_info = cc.User.from_django_user(request.user).to_dict()
with newrelic.agent.FunctionTrace(nr_transaction, "get_metadata_for_threads"):
annotated_content_info = utils.get_metadata_for_threads(course_key, threads, request.user, user_info)
is_staff = cached_has_permission(request.user, 'openclose_thread', course.id)
threads = [utils.prepare_content(thread, course_key, is_staff) for thread in threads]
if request.is_ajax():
return utils.JsonResponse({
'discussion_data': threads,
'page': query_params['page'],
'num_pages': query_params['num_pages'],
'annotated_content_info': _attr_safe_json(annotated_content_info),
})
else:
context = {
'course': course,
'user': request.user,
'django_user': User.objects.get(id=user_id),
'profiled_user': profiled_user.to_dict(),
'threads': _attr_safe_json(threads),
'user_info': _attr_safe_json(user_info),
'annotated_content_info': _attr_safe_json(annotated_content_info),
'page': query_params['page'],
'num_pages': query_params['num_pages'],
}
return render_to_response('discussion/user_profile.html', context)
except User.DoesNotExist:
raise Http404
@login_required
def followed_threads(request, course_id, user_id):
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
nr_transaction = newrelic.agent.current_transaction()
course = get_course_with_access(request.user, 'load_forum', course_key)
try:
profiled_user = cc.User(id=user_id, course_id=course_key)
default_query_params = {
'page': 1,
'per_page': THREADS_PER_PAGE, # more than threads_per_page to show more activities
'sort_key': 'date',
'sort_order': 'desc',
}
query_params = merge_dict(
default_query_params,
strip_none(
extract(
request.GET,
[
'page',
'sort_key',
'sort_order',
'flagged',
'unread',
'unanswered',
]
)
)
)
try:
group_id = get_group_id_for_comments_service(request, course_key)
except ValueError:
return HttpResponseBadRequest("Invalid group_id")
if group_id is not None:
query_params['group_id'] = group_id
threads, page, num_pages = profiled_user.subscribed_threads(query_params)
query_params['page'] = page
query_params['num_pages'] = num_pages
user_info = cc.User.from_django_user(request.user).to_dict()
with newrelic.agent.FunctionTrace(nr_transaction, "get_metadata_for_threads"):
annotated_content_info = utils.get_metadata_for_threads(course_key, threads, request.user, user_info)
if request.is_ajax():
is_staff = cached_has_permission(request.user, 'openclose_thread', course.id)
return utils.JsonResponse({
'annotated_content_info': annotated_content_info,
'discussion_data': [utils.prepare_content(thread, course_key, is_staff) for thread in threads],
'page': query_params['page'],
'num_pages': query_params['num_pages'],
})
#TODO remove non-AJAX support, it does not appear to be used and does not appear to work.
else:
context = {
'course': course,
'user': request.user,
'django_user': User.objects.get(id=user_id),
'profiled_user': profiled_user.to_dict(),
'threads': _attr_safe_json(threads),
'user_info': _attr_safe_json(user_info),
'annotated_content_info': _attr_safe_json(annotated_content_info),
# 'content': content,
}
return render_to_response('discussion/user_profile.html', context)
except User.DoesNotExist:
raise Http404
|
bunnyitvn/webptn | refs/heads/master | build/lib.linux-i686-2.7/django/contrib/formtools/tests/wizard/wizardtests/forms.py | 313 | import os
import tempfile
from django import forms
from django.contrib.auth.models import User
from django.core.files.storage import FileSystemStorage
from django.forms.formsets import formset_factory
from django.forms.models import modelformset_factory
from django.http import HttpResponse
from django.template import Template, Context
from django.contrib.auth.models import User
from django.contrib.formtools.wizard.views import WizardView
temp_storage_location = tempfile.mkdtemp(dir=os.environ.get('DJANGO_TEST_TEMP_DIR'))
temp_storage = FileSystemStorage(location=temp_storage_location)
class Page1(forms.Form):
name = forms.CharField(max_length=100)
user = forms.ModelChoiceField(queryset=User.objects.all())
thirsty = forms.NullBooleanField()
class Page2(forms.Form):
address1 = forms.CharField(max_length=100)
address2 = forms.CharField(max_length=100)
file1 = forms.FileField()
class Page3(forms.Form):
random_crap = forms.CharField(max_length=100)
Page4 = formset_factory(Page3, extra=2)
class ContactWizard(WizardView):
file_storage = temp_storage
def done(self, form_list, **kwargs):
c = Context({
'form_list': [x.cleaned_data for x in form_list],
'all_cleaned_data': self.get_all_cleaned_data(),
})
for form in self.form_list.keys():
c[form] = self.get_cleaned_data_for_step(form)
c['this_will_fail'] = self.get_cleaned_data_for_step('this_will_fail')
return HttpResponse(Template('').render(c))
def get_context_data(self, form, **kwargs):
context = super(ContactWizard, self).get_context_data(form, **kwargs)
if self.storage.current_step == 'form2':
context.update({'another_var': True})
return context
class UserForm(forms.ModelForm):
class Meta:
model = User
fields = ('username', 'email')
UserFormSet = modelformset_factory(User, form=UserForm)
class SessionContactWizard(ContactWizard):
storage_name = 'django.contrib.formtools.wizard.storage.session.SessionStorage'
class CookieContactWizard(ContactWizard):
storage_name = 'django.contrib.formtools.wizard.storage.cookie.CookieStorage'
|
aspiers/automation | refs/heads/master | scripts/jenkins/cloud/gerrit/gerrit_merge.py | 3 | #!/usr/bin/env python
import argparse
import os
import sys
sys.path.append(os.path.dirname(__file__))
from gerrit import GerritChange # noqa: E402
from gerrit_settings import gerrit_project_map # noqa: E402
def check_all_dependencies_satisfied(change):
change_deps = change.get_dependencies()
unmerged_deps = [change_dep
for change_dep in change_deps
if change_dep.status != "MERGED"]
if unmerged_deps:
print("Unmerged dependencies:\n{}".format('\n'.join([
str(c) for c in unmerged_deps])))
return False
return True
def gerrit_merge(change, dry_run=False):
"""
Attempt to merge a Gerrit change.
:param change:
:param dry_run:
:return:
"""
project_map = gerrit_project_map(change.branch)
print('Attempting to merge change {}'.format(change))
if not change.is_current and not dry_run:
print("Skipping - change is not current: {}".format(change))
return 1
if change.gerrit_project not in project_map:
print("Skipping - project {} not in the list of "
"allowed projects ".format(change.gerrit_project))
return 1
if change.status != 'NEW':
print("Skipping - change is {}: {}".format(
change.status.lower(), change))
return 1
if not change.mergeable:
print("Change cannot be merged due to conflicts: {}".format(change))
return 1
if not change.submittable:
print("Change doesn't meet submit requirements: {}".format(change))
return 1
if not check_all_dependencies_satisfied(change):
msg = "Unable to merge: Commit dependencies are not satisifed."
print(msg)
if not dry_run:
change.review(message=msg)
return 1
if not dry_run:
change.merge()
print("Change merged: {}".format(change))
else:
print("[DRY-RUN] Change can be merged: {}".format(change))
return 0
def main():
parser = argparse.ArgumentParser(
description='Merge a Gerrit change if its dependencies have merged '
'and if it submittable')
parser.add_argument('change', type=int,
help='the Gerrit change number (e.g. 1234)')
parser.add_argument('--patch', type=int,
default=None,
help='the Gerrit patch number (e.g. 3). If not '
'supplied, the latest patch will be used')
parser.add_argument('--dry-run', default=False, action='store_true',
help='do a dry run')
args = parser.parse_args()
change = GerritChange(str(args.change), patchset=args.patch)
gerrit_merge(change, args.dry_run)
if __name__ == '__main__':
main()
|
cokelaer/spectrum | refs/heads/master | test/test_spetrogram.py | 1 |
from spectrum import Spectrogram, dolphin_filename, readwav
def test_spectrogram():
data, samplerate = readwav(dolphin_filename)
p = Spectrogram(data, ws=128, W=4096, sampling=samplerate)
p.periodogram()
p.plot()
|
d9pouces/swampdragon | refs/heads/master | tests/test_model_serializer_deserialize.py | 10 | from swampdragon.serializers.model_serializer import ModelSerializer
from swampdragon.testing.dragon_testcase import DragonTestCase
from .models import TextModel, SDModel
from datetime import datetime
from django.db import models
class DateModel(SDModel):
date = models.DateTimeField()
class DateModelSerializer(ModelSerializer):
class Meta:
model = DateModel
publish_fields = ('date')
update_fields = ('date')
class TextModelSerializer(ModelSerializer):
class Meta:
model = TextModel
publish_fields = ('text')
update_fields = ('text')
class TestModelSerializer(DragonTestCase):
def test_deserialize_model(self):
data = {'text': 'foo'}
serializer = TextModelSerializer(data)
model_instance = serializer.save()
self.assertEqual(model_instance.text, data['text'])
def test_passing_invalid_data(self):
foo = 'text'
with self.assertRaises(Exception):
TextModelSerializer(foo)
def test_ignore_non_model_fields(self):
data = {'text': 'foo', 'random_field': 'val'}
serializer = TextModelSerializer(data)
model_instance = serializer.deserialize()
self.assertEqual(model_instance.text, data['text'])
def test_deserialize_field(self):
date = datetime.now()
data = {'date': str(date)}
serializer = DateModelSerializer(data)
object = serializer.save()
self.assertEqual(object.date, date)
|
nathanaevitas/odoo | refs/heads/master | openerp/addons/lunch/__openerp__.py | 267 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2012 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Lunch Orders',
'author': 'OpenERP SA',
'version': '0.2',
'depends': ['base', 'report'],
'category' : 'Tools',
'summary': 'Lunch Order, Meal, Food',
'description': """
The base module to manage lunch.
================================
Many companies order sandwiches, pizzas and other, from usual suppliers, for their employees to offer them more facilities.
However lunches management within the company requires proper administration especially when the number of employees or suppliers is important.
The “Lunch Order” module has been developed to make this management easier but also to offer employees more tools and usability.
In addition to a full meal and supplier management, this module offers the possibility to display warning and provides quick order selection based on employee’s preferences.
If you want to save your employees' time and avoid them to always have coins in their pockets, this module is essential.
""",
'data': [
'security/lunch_security.xml',
'lunch_view.xml',
'wizard/lunch_order_view.xml',
'wizard/lunch_validation_view.xml',
'wizard/lunch_cancel_view.xml',
'lunch_report.xml',
'report/report_lunch_order_view.xml',
'security/ir.model.access.csv',
'views/report_lunchorder.xml',
'views/lunch.xml',
],
'demo': ['lunch_demo.xml',],
'installable': True,
'website': 'https://www.odoo.com/page/employees',
'application' : True,
'certificate' : '001292377792581874189',
}
|
jguyomard/phantomjs | refs/heads/master | src/breakpad/src/tools/gyp/pylib/gyp/generator/msvs.py | 137 | #!/usr/bin/python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import re
import subprocess
import sys
import gyp.MSVSNew as MSVSNew
import gyp.MSVSProject as MSVSProject
import gyp.MSVSToolFile as MSVSToolFile
import gyp.MSVSUserFile as MSVSUserFile
import gyp.MSVSVersion as MSVSVersion
import gyp.common
# Regular expression for validating Visual Studio GUIDs. If the GUID
# contains lowercase hex letters, MSVS will be fine. However,
# IncrediBuild BuildConsole will parse the solution file, but then
# silently skip building the target causing hard to track down errors.
# Note that this only happens with the BuildConsole, and does not occur
# if IncrediBuild is executed from inside Visual Studio. This regex
# validates that the string looks like a GUID with all uppercase hex
# letters.
VALID_MSVS_GUID_CHARS = re.compile('^[A-F0-9\-]+$')
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '.exe',
'STATIC_LIB_PREFIX': '',
'SHARED_LIB_PREFIX': '',
'STATIC_LIB_SUFFIX': '.lib',
'SHARED_LIB_SUFFIX': '.dll',
'INTERMEDIATE_DIR': '$(IntDir)',
'SHARED_INTERMEDIATE_DIR': '$(OutDir)/obj/global_intermediate',
'OS': 'win',
'PRODUCT_DIR': '$(OutDir)',
'LIB_DIR': '$(OutDir)/lib',
'RULE_INPUT_ROOT': '$(InputName)',
'RULE_INPUT_EXT': '$(InputExt)',
'RULE_INPUT_NAME': '$(InputFileName)',
'RULE_INPUT_PATH': '$(InputPath)',
'CONFIGURATION_NAME': '$(ConfigurationName)',
}
# The msvs specific sections that hold paths
generator_additional_path_sections = [
'msvs_cygwin_dirs',
'msvs_props',
]
generator_additional_non_configuration_keys = [
'msvs_cygwin_dirs',
'msvs_cygwin_shell',
]
cached_username = None
cached_domain = None
# TODO(gspencer): Switch the os.environ calls to be
# win32api.GetDomainName() and win32api.GetUserName() once the
# python version in depot_tools has been updated to work on Vista
# 64-bit.
def _GetDomainAndUserName():
global cached_username
global cached_domain
if not cached_domain or not cached_username:
domain = os.environ.get('USERDOMAIN')
username = os.environ.get('USERNAME')
if not domain or not username:
call = subprocess.Popen(['net', 'config', 'Workstation'],
stdout=subprocess.PIPE)
config = call.communicate()[0]
username_re = re.compile('^User name\s+(\S+)', re.MULTILINE)
username_match = username_re.search(config)
if username_match:
username = username_match.group(1)
domain_re = re.compile('^Logon domain\s+(\S+)', re.MULTILINE)
domain_match = domain_re.search(config)
if domain_match:
domain = domain_match.group(1)
cached_domain = domain
cached_username = username
return (cached_domain, cached_username)
fixpath_prefix = None
def _FixPath(path):
"""Convert paths to a form that will make sense in a vcproj file.
Arguments:
path: The path to convert, may contain / etc.
Returns:
The path with all slashes made into backslashes.
"""
if fixpath_prefix and path and not os.path.isabs(path) and not path[0] == '$':
path = os.path.join(fixpath_prefix, path)
return path.replace('/', '\\')
def _SourceInFolders(sources, prefix=None, excluded=None):
"""Converts a list split source file paths into a vcproj folder hierarchy.
Arguments:
sources: A list of source file paths split.
prefix: A list of source file path layers meant to apply to each of sources.
Returns:
A hierarchy of filenames and MSVSProject.Filter objects that matches the
layout of the source tree.
For example:
_SourceInFolders([['a', 'bob1.c'], ['b', 'bob2.c']], prefix=['joe'])
-->
[MSVSProject.Filter('a', contents=['joe\\a\\bob1.c']),
MSVSProject.Filter('b', contents=['joe\\b\\bob2.c'])]
"""
if not prefix: prefix = []
result = []
excluded_result = []
folders = dict()
# Gather files into the final result, excluded, or folders.
for s in sources:
if len(s) == 1:
filename = '\\'.join(prefix + s)
if filename in excluded:
excluded_result.append(filename)
else:
result.append(filename)
else:
if not folders.get(s[0]):
folders[s[0]] = []
folders[s[0]].append(s[1:])
# Add a folder for excluded files.
if excluded_result:
excluded_folder = MSVSProject.Filter('_excluded_files',
contents=excluded_result)
result.append(excluded_folder)
# Populate all the folders.
for f in folders:
contents = _SourceInFolders(folders[f], prefix=prefix + [f],
excluded=excluded)
contents = MSVSProject.Filter(f, contents=contents)
result.append(contents)
return result
def _ToolAppend(tools, tool_name, setting, value, only_if_unset=False):
if not value: return
if not tools.get(tool_name):
tools[tool_name] = dict()
tool = tools[tool_name]
if tool.get(setting):
if only_if_unset: return
if type(tool[setting]) == list:
tool[setting] += value
else:
raise TypeError(
'Appending "%s" to a non-list setting "%s" for tool "%s" is '
'not allowed, previous value: %s' % (
value, setting, tool_name, str(tool[setting])))
else:
tool[setting] = value
def _ConfigPlatform(config_data):
return config_data.get('msvs_configuration_platform', 'Win32')
def _ConfigBaseName(config_name, platform_name):
if config_name.endswith('_' + platform_name):
return config_name[0:-len(platform_name)-1]
else:
return config_name
def _ConfigFullName(config_name, config_data):
platform_name = _ConfigPlatform(config_data)
return '%s|%s' % (_ConfigBaseName(config_name, platform_name), platform_name)
def _PrepareActionRaw(spec, cmd, cygwin_shell, has_input_path, quote_cmd):
if cygwin_shell:
# Find path to cygwin.
cygwin_dir = _FixPath(spec.get('msvs_cygwin_dirs', ['.'])[0])
# Prepare command.
direct_cmd = cmd
direct_cmd = [i.replace('$(IntDir)',
'`cygpath -m "${INTDIR}"`') for i in direct_cmd]
direct_cmd = [i.replace('$(OutDir)',
'`cygpath -m "${OUTDIR}"`') for i in direct_cmd]
if has_input_path:
direct_cmd = [i.replace('$(InputPath)',
'`cygpath -m "${INPUTPATH}"`')
for i in direct_cmd]
direct_cmd = ['"%s"' % i for i in direct_cmd]
direct_cmd = [i.replace('"', '\\"') for i in direct_cmd]
#direct_cmd = gyp.common.EncodePOSIXShellList(direct_cmd)
direct_cmd = ' '.join(direct_cmd)
# TODO(quote): regularize quoting path names throughout the module
cmd = (
'"$(ProjectDir)%(cygwin_dir)s\\setup_env.bat" && '
'set CYGWIN=nontsec&& ')
if direct_cmd.find('NUMBER_OF_PROCESSORS') >= 0:
cmd += 'set /a NUMBER_OF_PROCESSORS_PLUS_1=%%NUMBER_OF_PROCESSORS%%+1&& '
if direct_cmd.find('INTDIR') >= 0:
cmd += 'set INTDIR=$(IntDir)&& '
if direct_cmd.find('OUTDIR') >= 0:
cmd += 'set OUTDIR=$(OutDir)&& '
if has_input_path and direct_cmd.find('INPUTPATH') >= 0:
cmd += 'set INPUTPATH=$(InputPath) && '
cmd += (
'bash -c "%(cmd)s"')
cmd = cmd % {'cygwin_dir': cygwin_dir,
'cmd': direct_cmd}
return cmd
else:
if quote_cmd:
# Support a mode for using cmd directly.
# Convert any paths to native form (first element is used directly).
# TODO(quote): regularize quoting path names throughout the module
direct_cmd = [cmd[0]] + ['"%s"' % _FixPath(i) for i in cmd[1:]]
else:
direct_cmd = [cmd[0]] + [_FixPath(i) for i in cmd[1:]]
# Collapse into a single command.
return ' '.join(direct_cmd)
def _PrepareAction(spec, rule, has_input_path):
# Find path to cygwin.
cygwin_dir = _FixPath(spec.get('msvs_cygwin_dirs', ['.'])[0])
# Currently this weird argument munging is used to duplicate the way a
# python script would need to be run as part of the chrome tree.
# Eventually we should add some sort of rule_default option to set this
# per project. For now the behavior chrome needs is the default.
mcs = rule.get('msvs_cygwin_shell')
if mcs is None:
mcs = int(spec.get('msvs_cygwin_shell', 1))
elif isinstance(mcs, str):
mcs = int(mcs)
quote_cmd = int(rule.get('msvs_quote_cmd', 1))
return _PrepareActionRaw(spec, rule['action'], mcs,
has_input_path, quote_cmd)
def _PickPrimaryInput(inputs):
# Pick second input as the primary one, unless there's only one.
# TODO(bradnelson): this is a bit of a hack,
# find something more general.
if len(inputs) > 1:
return inputs[1]
else:
return inputs[0]
def _SetRunAs(user_file, config_name, c_data, command,
environment={}, working_directory=""):
"""Add a run_as rule to the user file.
Arguments:
user_file: The MSVSUserFile to add the command to.
config_name: The name of the configuration to add it to
c_data: The dict of the configuration to add it to
command: The path to the command to execute.
args: An array of arguments to the command. (optional)
working_directory: Directory to run the command in. (optional)
"""
user_file.AddDebugSettings(_ConfigFullName(config_name, c_data),
command, environment, working_directory)
def _AddCustomBuildTool(p, spec, inputs, outputs, description, cmd):
"""Add a custom build tool to execute something.
Arguments:
p: the target project
spec: the target project dict
inputs: list of inputs
outputs: list of outputs
description: description of the action
cmd: command line to execute
"""
inputs = [_FixPath(i) for i in inputs]
outputs = [_FixPath(i) for i in outputs]
tool = MSVSProject.Tool(
'VCCustomBuildTool', {
'Description': description,
'AdditionalDependencies': ';'.join(inputs),
'Outputs': ';'.join(outputs),
'CommandLine': cmd,
})
primary_input = _PickPrimaryInput(inputs)
# Add to the properties of primary input for each config.
for config_name, c_data in spec['configurations'].iteritems():
p.AddFileConfig(primary_input,
_ConfigFullName(config_name, c_data), tools=[tool])
def _RuleExpandPath(path, input_file):
"""Given the input file to which a rule applied, string substitute a path.
Arguments:
path: a path to string expand
input_file: the file to which the rule applied.
Returns:
The string substituted path.
"""
path = path.replace('$(InputName)',
os.path.splitext(os.path.split(input_file)[1])[0])
path = path.replace('$(InputExt)',
os.path.splitext(os.path.split(input_file)[1])[1])
path = path.replace('$(InputFileName)', os.path.split(input_file)[1])
path = path.replace('$(InputPath)', input_file)
return path
def _FindRuleTriggerFiles(rule, sources):
"""Find the list of files which a particular rule applies to.
Arguments:
rule: the rule in question
sources: the set of all known source files for this project
Returns:
The list of sources that trigger a particular rule.
"""
rule_ext = rule['extension']
return [s for s in sources if s.endswith('.' + rule_ext)]
def _RuleInputsAndOutputs(rule, trigger_file):
"""Find the inputs and outputs generated by a rule.
Arguments:
rule: the rule in question
sources: the set of all known source files for this project
Returns:
The pair of (inputs, outputs) involved in this rule.
"""
raw_inputs = rule.get('inputs', [])
raw_outputs = rule.get('outputs', [])
inputs = set()
outputs = set()
inputs.add(trigger_file)
for i in raw_inputs:
inputs.add(_RuleExpandPath(i, trigger_file))
for o in raw_outputs:
outputs.add(_RuleExpandPath(o, trigger_file))
return (inputs, outputs)
def _GenerateNativeRules(p, rules, output_dir, spec, options):
"""Generate a native rules file.
Arguments:
p: the target project
rules: the set of rules to include
output_dir: the directory in which the project/gyp resides
spec: the project dict
options: global generator options
"""
rules_filename = '%s%s.rules' % (spec['target_name'],
options.suffix)
rules_file = MSVSToolFile.Writer(os.path.join(output_dir, rules_filename))
rules_file.Create(spec['target_name'])
# Add each rule.
for r in rules:
rule_name = r['rule_name']
rule_ext = r['extension']
inputs = [_FixPath(i) for i in r.get('inputs', [])]
outputs = [_FixPath(i) for i in r.get('outputs', [])]
cmd = _PrepareAction(spec, r, has_input_path=True)
rules_file.AddCustomBuildRule(name=rule_name,
description=r.get('message', rule_name),
extensions=[rule_ext],
additional_dependencies=inputs,
outputs=outputs,
cmd=cmd)
# Write out rules file.
rules_file.Write()
# Add rules file to project.
p.AddToolFile(rules_filename)
def _Cygwinify(path):
path = path.replace('$(OutDir)', '$(OutDirCygwin)')
path = path.replace('$(IntDir)', '$(IntDirCygwin)')
return path
def _GenerateExternalRules(p, rules, output_dir, spec,
sources, options, actions_to_add):
"""Generate an external makefile to do a set of rules.
Arguments:
p: the target project
rules: the list of rules to include
output_dir: path containing project and gyp files
spec: project specification data
sources: set of sources known
options: global generator options
"""
filename = '%s_rules%s.mk' % (spec['target_name'], options.suffix)
file = gyp.common.WriteOnDiff(os.path.join(output_dir, filename))
# Find cygwin style versions of some paths.
file.write('OutDirCygwin:=$(shell cygpath -u "$(OutDir)")\n')
file.write('IntDirCygwin:=$(shell cygpath -u "$(IntDir)")\n')
# Gather stuff needed to emit all: target.
all_inputs = set()
all_outputs = set()
all_output_dirs = set()
first_outputs = []
for rule in rules:
trigger_files = _FindRuleTriggerFiles(rule, sources)
for tf in trigger_files:
inputs, outputs = _RuleInputsAndOutputs(rule, tf)
all_inputs.update(set(inputs))
all_outputs.update(set(outputs))
# Only use one target from each rule as the dependency for
# 'all' so we don't try to build each rule multiple times.
first_outputs.append(list(outputs)[0])
# Get the unique output directories for this rule.
output_dirs = [os.path.split(i)[0] for i in outputs]
for od in output_dirs:
all_output_dirs.add(od)
first_outputs_cyg = [_Cygwinify(i) for i in first_outputs]
# Write out all: target, including mkdir for each output directory.
file.write('all: %s\n' % ' '.join(first_outputs_cyg))
for od in all_output_dirs:
file.write('\tmkdir -p %s\n' % od)
file.write('\n')
# Define how each output is generated.
for rule in rules:
trigger_files = _FindRuleTriggerFiles(rule, sources)
for tf in trigger_files:
# Get all the inputs and outputs for this rule for this trigger file.
inputs, outputs = _RuleInputsAndOutputs(rule, tf)
inputs = [_Cygwinify(i) for i in inputs]
outputs = [_Cygwinify(i) for i in outputs]
# Prepare the command line for this rule.
cmd = [_RuleExpandPath(c, tf) for c in rule['action']]
cmd = ['"%s"' % i for i in cmd]
cmd = ' '.join(cmd)
# Add it to the makefile.
file.write('%s: %s\n' % (' '.join(outputs), ' '.join(inputs)))
file.write('\t%s\n\n' % cmd)
# Close up the file.
file.close()
# Add makefile to list of sources.
sources.add(filename)
# Add a build action to call makefile.
cmd = ['make',
'OutDir=$(OutDir)',
'IntDir=$(IntDir)',
'-j', '${NUMBER_OF_PROCESSORS_PLUS_1}',
'-f', filename]
cmd = _PrepareActionRaw(spec, cmd, True, False, True)
# TODO(bradnelson): this won't be needed if we have a better way to pick
# the primary input.
all_inputs = list(all_inputs)
all_inputs.insert(1, filename)
actions_to_add.append({
'inputs': [_FixPath(i) for i in all_inputs],
'outputs': [_FixPath(i) for i in all_outputs],
'description': 'Running %s' % cmd,
'cmd': cmd,
})
def _GenerateRules(p, output_dir, options, spec,
sources, excluded_sources,
actions_to_add):
"""Generate all the rules for a particular project.
Arguments:
output_dir: directory to emit rules to
options: global options passed to the generator
spec: the specification for this project
sources: the set of all known source files in this project
excluded_sources: the set of sources excluded from normal processing
actions_to_add: deferred list of actions to add in
"""
rules = spec.get('rules', [])
rules_native = [r for r in rules if not int(r.get('msvs_external_rule', 0))]
rules_external = [r for r in rules if int(r.get('msvs_external_rule', 0))]
# Handle rules that use a native rules file.
if rules_native:
_GenerateNativeRules(p, rules_native, output_dir, spec, options)
# Handle external rules (non-native rules).
if rules_external:
_GenerateExternalRules(p, rules_external, output_dir, spec,
sources, options, actions_to_add)
# Add outputs generated by each rule (if applicable).
for rule in rules:
# Done if not processing outputs as sources.
if int(rule.get('process_outputs_as_sources', False)):
# Add in the outputs from this rule.
trigger_files = _FindRuleTriggerFiles(rule, sources)
for tf in trigger_files:
inputs, outputs = _RuleInputsAndOutputs(rule, tf)
inputs.remove(tf)
sources.update(inputs)
excluded_sources.update(inputs)
sources.update(outputs)
def _GenerateProject(vcproj_filename, build_file, spec, options, version):
"""Generates a vcproj file.
Arguments:
vcproj_filename: Filename of the vcproj file to generate.
build_file: Filename of the .gyp file that the vcproj file comes from.
spec: The target dictionary containing the properties of the target.
"""
# Pluck out the default configuration.
default_config = spec['configurations'][spec['default_configuration']]
# Decide the guid of the project.
guid = default_config.get('msvs_guid')
if guid:
if VALID_MSVS_GUID_CHARS.match(guid) == None:
raise ValueError('Invalid MSVS guid: "%s". Must match regex: "%s".' %
(guid, VALID_MSVS_GUID_CHARS.pattern))
guid = '{%s}' % guid
# Skip emitting anything if told to with msvs_existing_vcproj option.
if default_config.get('msvs_existing_vcproj'):
return guid
#print 'Generating %s' % vcproj_filename
vcproj_dir = os.path.dirname(vcproj_filename)
if vcproj_dir and not os.path.exists(vcproj_dir):
os.makedirs(vcproj_dir)
# Gather list of unique platforms.
platforms = set()
for configuration in spec['configurations']:
platforms.add(_ConfigPlatform(spec['configurations'][configuration]))
platforms = list(platforms)
p = MSVSProject.Writer(vcproj_filename, version=version)
p.Create(spec['target_name'], guid=guid, platforms=platforms)
# Create the user file.
(domain, username) = _GetDomainAndUserName()
vcuser_filename = '.'.join([vcproj_filename, domain, username, 'user'])
user_file = MSVSUserFile.Writer(vcuser_filename, version=version)
user_file.Create(spec['target_name'])
# Get directory project file is in.
gyp_dir = os.path.split(vcproj_filename)[0]
# Pick target configuration type.
try:
config_type = {
'executable': '1', # .exe
'shared_library': '2', # .dll
'loadable_module': '2', # .dll
'static_library': '4', # .lib
'none': '10', # Utility type
'dummy_executable': '1', # .exe
}[spec['type']]
except KeyError, e:
if spec.get('type'):
raise Exception('Target type %s is not a valid target type for '
'target %s in %s.' %
(spec['type'], spec['target_name'], build_file))
else:
raise Exception('Missing type field for target %s in %s.' %
(spec['target_name'], build_file))
for config_name, c in spec['configurations'].iteritems():
# Process each configuration.
vsprops_dirs = c.get('msvs_props', [])
vsprops_dirs = [_FixPath(i) for i in vsprops_dirs]
# Prepare the list of tools as a dictionary.
tools = dict()
# Add in msvs_settings.
for tool in c.get('msvs_settings', {}):
settings = c['msvs_settings'][tool]
for setting in settings:
_ToolAppend(tools, tool, setting, settings[setting])
# Add in includes.
# TODO(bradnelson): include_dirs should really be flexible enough not to
# require this sort of thing.
include_dirs = (
c.get('include_dirs', []) +
c.get('msvs_system_include_dirs', []))
resource_include_dirs = c.get('resource_include_dirs', include_dirs)
include_dirs = [_FixPath(i) for i in include_dirs]
resource_include_dirs = [_FixPath(i) for i in resource_include_dirs]
_ToolAppend(tools, 'VCCLCompilerTool',
'AdditionalIncludeDirectories', include_dirs)
_ToolAppend(tools, 'VCResourceCompilerTool',
'AdditionalIncludeDirectories', resource_include_dirs)
# Add in libraries.
libraries = spec.get('libraries', [])
# Strip out -l, as it is not used on windows (but is needed so we can pass
# in libraries that are assumed to be in the default library path).
libraries = [re.sub('^(\-l)', '', lib) for lib in libraries]
# Add them.
_ToolAppend(tools, 'VCLinkerTool',
'AdditionalDependencies', libraries)
# Select a name for the output file.
output_file_map = {
'executable': ('VCLinkerTool', '$(OutDir)\\', '.exe'),
'shared_library': ('VCLinkerTool', '$(OutDir)\\', '.dll'),
'loadable_module': ('VCLinkerTool', '$(OutDir)\\', '.dll'),
'static_library': ('VCLibrarianTool', '$(OutDir)\\lib\\', '.lib'),
'dummy_executable': ('VCLinkerTool', '$(IntDir)\\', '.junk'),
}
output_file_props = output_file_map.get(spec['type'])
if output_file_props and int(spec.get('msvs_auto_output_file', 1)):
vc_tool, out_dir, suffix = output_file_props
out_dir = spec.get('msvs_product_directory', out_dir)
out_file = os.path.join(out_dir,
spec.get('product_name',
'$(ProjectName)') + suffix)
_ToolAppend(tools, vc_tool, 'OutputFile', out_file,
only_if_unset=True)
# Add defines.
defines = []
for d in c.get('defines', []):
if type(d) == list:
fd = '='.join([str(dpart).replace('"', '\\"') for dpart in d])
else:
fd = str(d).replace('"', '\\"')
defines.append(fd)
_ToolAppend(tools, 'VCCLCompilerTool',
'PreprocessorDefinitions', defines)
_ToolAppend(tools, 'VCResourceCompilerTool',
'PreprocessorDefinitions', defines)
# Change program database directory to prevent collisions.
_ToolAppend(tools, 'VCCLCompilerTool', 'ProgramDataBaseFileName',
'$(IntDir)\\$(ProjectName)\\vc80.pdb')
# Add disabled warnings.
disabled_warnings = [str(i) for i in c.get('msvs_disabled_warnings', [])]
_ToolAppend(tools, 'VCCLCompilerTool',
'DisableSpecificWarnings', disabled_warnings)
# Add Pre-build.
prebuild = c.get('msvs_prebuild')
_ToolAppend(tools, 'VCPreBuildEventTool', 'CommandLine', prebuild)
# Add Post-build.
postbuild = c.get('msvs_postbuild')
_ToolAppend(tools, 'VCPostBuildEventTool', 'CommandLine', postbuild)
# Turn on precompiled headers if appropriate.
header = c.get('msvs_precompiled_header')
if header:
header = os.path.split(header)[1]
_ToolAppend(tools, 'VCCLCompilerTool', 'UsePrecompiledHeader', '2')
_ToolAppend(tools, 'VCCLCompilerTool',
'PrecompiledHeaderThrough', header)
_ToolAppend(tools, 'VCCLCompilerTool',
'ForcedIncludeFiles', header)
# Loadable modules don't generate import libraries;
# tell dependent projects to not expect one.
if spec['type'] == 'loadable_module':
_ToolAppend(tools, 'VCLinkerTool', 'IgnoreImportLibrary', 'true')
# Set the module definition file if any.
if spec['type'] in ['shared_library', 'loadable_module']:
def_files = [s for s in spec.get('sources', []) if s.endswith('.def')]
if len(def_files) == 1:
_ToolAppend(tools, 'VCLinkerTool', 'ModuleDefinitionFile',
_FixPath(def_files[0]))
elif def_files:
raise ValueError('Multiple module definition files in one target, '
'target %s lists multiple .def files: %s' % (
spec['target_name'], ' '.join(def_files)))
# Convert tools to expected form.
tool_list = []
for tool, settings in tools.iteritems():
# Collapse settings with lists.
settings_fixed = {}
for setting, value in settings.iteritems():
if type(value) == list:
if tool == 'VCLinkerTool' and setting == 'AdditionalDependencies':
settings_fixed[setting] = ' '.join(value)
else:
settings_fixed[setting] = ';'.join(value)
else:
settings_fixed[setting] = value
# Add in this tool.
tool_list.append(MSVSProject.Tool(tool, settings_fixed))
# Prepare configuration attributes.
prepared_attrs = {}
source_attrs = c.get('msvs_configuration_attributes', {})
for a in source_attrs:
prepared_attrs[a] = source_attrs[a]
# Add props files.
if vsprops_dirs:
prepared_attrs['InheritedPropertySheets'] = ';'.join(vsprops_dirs)
# Set configuration type.
prepared_attrs['ConfigurationType'] = config_type
if not prepared_attrs.has_key('OutputDirectory'):
prepared_attrs['OutputDirectory'] = '$(SolutionDir)$(ConfigurationName)'
if not prepared_attrs.has_key('IntermediateDirectory'):
intermediate = '$(ConfigurationName)\\obj\\$(ProjectName)'
prepared_attrs['IntermediateDirectory'] = intermediate
# Add in this configuration.
p.AddConfig(_ConfigFullName(config_name, c),
attrs=prepared_attrs, tools=tool_list)
# Prepare list of sources and excluded sources.
sources = set(spec.get('sources', []))
excluded_sources = set()
# Add in the gyp file.
gyp_file = os.path.split(build_file)[1]
sources.add(gyp_file)
# Add in 'action' inputs and outputs.
for a in spec.get('actions', []):
inputs = a.get('inputs')
if not inputs:
# This is an action with no inputs. Make the primary input
# by the .gyp file itself so Visual Studio has a place to
# hang the custom build rule.
inputs = [gyp_file]
a['inputs'] = inputs
primary_input = _PickPrimaryInput(inputs)
inputs = set(inputs)
sources.update(inputs)
inputs.remove(primary_input)
excluded_sources.update(inputs)
if int(a.get('process_outputs_as_sources', False)):
outputs = set(a.get('outputs', []))
sources.update(outputs)
# Add in 'copies' inputs and outputs.
for cpy in spec.get('copies', []):
files = set(cpy.get('files', []))
sources.update(files)
# Add rules.
actions_to_add = []
_GenerateRules(p, gyp_dir, options, spec,
sources, excluded_sources,
actions_to_add)
# Exclude excluded sources coming into the generator.
excluded_sources.update(set(spec.get('sources_excluded', [])))
# Add excluded sources into sources for good measure.
sources.update(excluded_sources)
# Convert to proper windows form.
# NOTE: sources goes from being a set to a list here.
# NOTE: excluded_sources goes from being a set to a list here.
sources = [_FixPath(i) for i in sources]
# Convert to proper windows form.
excluded_sources = [_FixPath(i) for i in excluded_sources]
# If any non-native rules use 'idl' as an extension exclude idl files.
# Gather a list here to use later.
using_idl = False
for rule in spec.get('rules', []):
if rule['extension'] == 'idl' and int(rule.get('msvs_external_rule', 0)):
using_idl = True
break
if using_idl:
excluded_idl = [i for i in sources if i.endswith('.idl')]
else:
excluded_idl = []
# List of precompiled header related keys.
precomp_keys = [
'msvs_precompiled_header',
'msvs_precompiled_source',
]
# Gather a list of precompiled header related sources.
precompiled_related = []
for config_name, c in spec['configurations'].iteritems():
for k in precomp_keys:
f = c.get(k)
if f:
precompiled_related.append(_FixPath(f))
# Find the excluded ones, minus the precompiled header related ones.
fully_excluded = [i for i in excluded_sources if i not in precompiled_related]
# Convert to folders and the right slashes.
sources = [i.split('\\') for i in sources]
sources = _SourceInFolders(sources, excluded=fully_excluded)
# Add in dummy file for type none.
if spec['type'] == 'dummy_executable':
# Pull in a dummy main so it can link successfully.
dummy_relpath = gyp.common.RelativePath(
options.depth + '\\tools\\gyp\\gyp_dummy.c', gyp_dir)
sources.append(dummy_relpath)
# Add in files.
p.AddFiles(sources)
# Add deferred actions to add.
for a in actions_to_add:
_AddCustomBuildTool(p, spec,
inputs=a['inputs'],
outputs=a['outputs'],
description=a['description'],
cmd=a['cmd'])
# Exclude excluded sources from being built.
for f in excluded_sources:
for config_name, c in spec['configurations'].iteritems():
precomped = [_FixPath(c.get(i, '')) for i in precomp_keys]
# Don't do this for ones that are precompiled header related.
if f not in precomped:
p.AddFileConfig(f, _ConfigFullName(config_name, c),
{'ExcludedFromBuild': 'true'})
# If any non-native rules use 'idl' as an extension exclude idl files.
# Exclude them now.
for config_name, c in spec['configurations'].iteritems():
for f in excluded_idl:
p.AddFileConfig(f, _ConfigFullName(config_name, c),
{'ExcludedFromBuild': 'true'})
# Add in tool files (rules).
tool_files = set()
for config_name, c in spec['configurations'].iteritems():
for f in c.get('msvs_tool_files', []):
tool_files.add(f)
for f in tool_files:
p.AddToolFile(f)
# Handle pre-compiled headers source stubs specially.
for config_name, c in spec['configurations'].iteritems():
source = c.get('msvs_precompiled_source')
if source:
source = _FixPath(source)
# UsePrecompiledHeader=1 for if using precompiled headers.
tool = MSVSProject.Tool('VCCLCompilerTool',
{'UsePrecompiledHeader': '1'})
p.AddFileConfig(source, _ConfigFullName(config_name, c),
{}, tools=[tool])
# Add actions.
actions = spec.get('actions', [])
for a in actions:
cmd = _PrepareAction(spec, a, has_input_path=False)
_AddCustomBuildTool(p, spec,
inputs=a.get('inputs', []),
outputs=a.get('outputs', []),
description=a.get('message', a['action_name']),
cmd=cmd)
# Add run_as and test targets.
has_run_as = False
if spec.get('run_as') or int(spec.get('test', 0)):
has_run_as = True
run_as = spec.get('run_as', {
'action' : ['$(TargetPath)', '--gtest_print_time'],
})
working_directory = run_as.get('working_directory', '.')
action = run_as.get('action', [])
environment = run_as.get('environment', [])
for config_name, c_data in spec['configurations'].iteritems():
_SetRunAs(user_file, config_name, c_data,
action, environment, working_directory)
# Add copies.
for cpy in spec.get('copies', []):
for src in cpy.get('files', []):
dst = os.path.join(cpy['destination'], os.path.basename(src))
# _AddCustomBuildTool() will call _FixPath() on the inputs and
# outputs, so do the same for our generated command line.
cmd = 'mkdir "%s" 2>nul & set ERRORLEVEL=0 & copy /Y "%s" "%s"' % (
_FixPath(cpy['destination']), _FixPath(src), _FixPath(dst))
_AddCustomBuildTool(p, spec,
inputs=[src], outputs=[dst],
description='Copying %s to %s' % (src, dst),
cmd=cmd)
# Write it out.
p.Write()
# Write out the user file, but only if we need to.
if has_run_as:
user_file.Write()
# Return the guid so we can refer to it elsewhere.
return p.guid
def _GetPathDict(root, path):
if path == '':
return root
parent, folder = os.path.split(path)
parent_dict = _GetPathDict(root, parent)
if folder not in parent_dict:
parent_dict[folder] = dict()
return parent_dict[folder]
def _DictsToFolders(base_path, bucket, flat):
# Convert to folders recursively.
children = []
for folder, contents in bucket.iteritems():
if type(contents) == dict:
folder_children = _DictsToFolders(os.path.join(base_path, folder),
contents, flat)
if flat:
children += folder_children
else:
folder_children = MSVSNew.MSVSFolder(os.path.join(base_path, folder),
name='(' + folder + ')',
entries=folder_children)
children.append(folder_children)
else:
children.append(contents)
return children
def _CollapseSingles(parent, node):
# Recursively explorer the tree of dicts looking for projects which are
# the sole item in a folder which has the same name as the project. Bring
# such projects up one level.
if (type(node) == dict and
len(node) == 1 and
node.keys()[0] == parent + '.vcproj'):
return node[node.keys()[0]]
if type(node) != dict:
return node
for child in node.keys():
node[child] = _CollapseSingles(child, node[child])
return node
def _GatherSolutionFolders(project_objs, flat):
root = {}
# Convert into a tree of dicts on path.
for p in project_objs.keys():
gyp_file, target = gyp.common.ParseQualifiedTarget(p)[0:2]
gyp_dir = os.path.dirname(gyp_file)
path_dict = _GetPathDict(root, gyp_dir)
path_dict[target + '.vcproj'] = project_objs[p]
# Walk down from the top until we hit a folder that has more than one entry.
# In practice, this strips the top-level "src/" dir from the hierarchy in
# the solution.
while len(root) == 1 and type(root[root.keys()[0]]) == dict:
root = root[root.keys()[0]]
# Collapse singles.
root = _CollapseSingles('', root)
# Merge buckets until everything is a root entry.
return _DictsToFolders('', root, flat)
def _ProjectObject(sln, qualified_target, project_objs, projects):
# Done if this project has an object.
if project_objs.get(qualified_target):
return project_objs[qualified_target]
# Get dependencies for this project.
spec = projects[qualified_target]['spec']
deps = spec.get('dependencies', [])
# Get objects for each dependency.
deps = [_ProjectObject(sln, d, project_objs, projects) for d in deps]
# Find relative path to vcproj from sln.
vcproj_rel_path = gyp.common.RelativePath(
projects[qualified_target]['vcproj_path'], os.path.split(sln)[0])
vcproj_rel_path = _FixPath(vcproj_rel_path)
# Prepare a dict indicating which project configurations are used for which
# solution configurations for this target.
config_platform_overrides = {}
for config_name, c in spec['configurations'].iteritems():
config_fullname = _ConfigFullName(config_name, c)
platform = c.get('msvs_target_platform', _ConfigPlatform(c))
fixed_config_fullname = '%s|%s' % (
_ConfigBaseName(config_name, _ConfigPlatform(c)), platform)
config_platform_overrides[config_fullname] = fixed_config_fullname
# Create object for this project.
obj = MSVSNew.MSVSProject(
vcproj_rel_path,
name=spec['target_name'],
guid=projects[qualified_target]['guid'],
dependencies=deps,
config_platform_overrides=config_platform_overrides)
# Store it to the list of objects.
project_objs[qualified_target] = obj
# Return project object.
return obj
def CalculateVariables(default_variables, params):
"""Generated variables that require params to be known."""
generator_flags = params.get('generator_flags', {})
# Select project file format version (if unset, default to auto detecting).
msvs_version = \
MSVSVersion.SelectVisualStudioVersion(generator_flags.get('msvs_version',
'auto'))
# Stash msvs_version for later (so we don't have to probe the system twice).
params['msvs_version'] = msvs_version
# Set a variable so conditions can be based on msvs_version.
default_variables['MSVS_VERSION'] = msvs_version.ShortName()
# To determine processor word size on Windows, in addition to checking
# PROCESSOR_ARCHITECTURE (which reflects the word size of the current
# process), it is also necessary to check PROCESSOR_ARCITEW6432 (which
# contains the actual word size of the system when running thru WOW64).
if (os.environ.get('PROCESSOR_ARCHITECTURE', '').find('64') >= 0 or
os.environ.get('PROCESSOR_ARCHITEW6432', '').find('64') >= 0):
default_variables['MSVS_OS_BITS'] = 64
else:
default_variables['MSVS_OS_BITS'] = 32
def GenerateOutput(target_list, target_dicts, data, params):
"""Generate .sln and .vcproj files.
This is the entry point for this generator.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
data: Dictionary containing per .gyp data.
"""
global fixpath_prefix
options = params['options']
generator_flags = params.get('generator_flags', {})
# Get the project file format version back out of where we stashed it in
# GeneratorCalculatedVariables.
msvs_version = params['msvs_version']
# Prepare the set of configurations.
configs = set()
for qualified_target in target_list:
build_file = gyp.common.BuildFile(qualified_target)
spec = target_dicts[qualified_target]
for config_name, c in spec['configurations'].iteritems():
configs.add(_ConfigFullName(config_name, c))
configs = list(configs)
# Generate each project.
projects = {}
for qualified_target in target_list:
build_file = gyp.common.BuildFile(qualified_target)
spec = target_dicts[qualified_target]
if spec['toolset'] != 'target':
raise Exception(
'Multiple toolsets not supported in msvs build (target %s)' %
qualified_target)
default_config = spec['configurations'][spec['default_configuration']]
vcproj_filename = default_config.get('msvs_existing_vcproj')
if not vcproj_filename:
vcproj_filename = spec['target_name'] + options.suffix + '.vcproj'
vcproj_path = os.path.join(os.path.split(build_file)[0], vcproj_filename)
if options.generator_output:
projectDirPath = os.path.dirname(os.path.abspath(vcproj_path))
vcproj_path = os.path.join(options.generator_output, vcproj_path)
fixpath_prefix = gyp.common.RelativePath(projectDirPath,
os.path.dirname(vcproj_path))
projects[qualified_target] = {
'vcproj_path': vcproj_path,
'guid': _GenerateProject(vcproj_path, build_file,
spec, options, version=msvs_version),
'spec': spec,
}
fixpath_prefix = None
for build_file in data.keys():
# Validate build_file extension
if build_file[-4:] != '.gyp':
continue
sln_path = build_file[:-4] + options.suffix + '.sln'
if options.generator_output:
sln_path = os.path.join(options.generator_output, sln_path)
#print 'Generating %s' % sln_path
# Get projects in the solution, and their dependents.
sln_projects = gyp.common.BuildFileTargets(target_list, build_file)
sln_projects += gyp.common.DeepDependencyTargets(target_dicts, sln_projects)
# Convert projects to Project Objects.
project_objs = {}
for p in sln_projects:
_ProjectObject(sln_path, p, project_objs, projects)
# Create folder hierarchy.
root_entries = _GatherSolutionFolders(
project_objs, flat=msvs_version.FlatSolution())
# Create solution.
sln = MSVSNew.MSVSSolution(sln_path,
entries=root_entries,
variants=configs,
websiteProperties=False,
version=msvs_version)
sln.Write()
|
thumt/THUMT | refs/heads/master | thumt/utils/checkpoint.py | 1 | # coding=utf-8
# Copyright 2017-2020 The THUMT Authors
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import glob
import torch
def oldest_checkpoint(path):
names = glob.glob(os.path.join(path, "*.pt"))
if not names:
return None
oldest_counter = 10000000
checkpoint_name = names[0]
for name in names:
counter = name.rstrip(".pt").split("-")[-1]
if not counter.isdigit():
continue
else:
counter = int(counter)
if counter < oldest_counter:
checkpoint_name = name
oldest_counter = counter
return checkpoint_name
def latest_checkpoint(path):
names = glob.glob(os.path.join(path, "*.pt"))
if not names:
return None
latest_counter = 0
checkpoint_name = names[0]
for name in names:
counter = name.rstrip(".pt").split("-")[-1]
if not counter.isdigit():
continue
else:
counter = int(counter)
if counter > latest_counter:
checkpoint_name = name
latest_counter = counter
return checkpoint_name
def save(state, path, max_to_keep=None):
checkpoints = glob.glob(os.path.join(path, "*.pt"))
if not checkpoints:
counter = 1
else:
checkpoint = latest_checkpoint(path)
counter = int(checkpoint.rstrip(".pt").split("-")[-1]) + 1
if max_to_keep and len(checkpoints) >= max_to_keep:
checkpoint = oldest_checkpoint(path)
os.remove(checkpoint)
checkpoint = os.path.join(path, "model-%d.pt" % counter)
print("Saving checkpoint: %s" % checkpoint)
torch.save(state, checkpoint)
|
BiRG/Omics-Dashboard | refs/heads/master | omics/omics_dashboard/dashboards/nmr_metabolomics/opls/layouts.py | 1 | import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from flask_login import current_user
from data_tools.wrappers.analyses import get_analyses
from data_tools.wrappers.collections import get_collections
from .model import OPLSModel
def get_load_results_form():
try:
analysis_options = [
{'label': f'{analysis.id}: {analysis.name}', 'value': analysis.id}
for analysis in get_analyses(current_user)
]
except:
analysis_options = []
try:
collection_options = [
{'label': f'{collection.id}: {collection.name}', 'value': collection.id}
for collection in get_collections(current_user, {'kind': 'results'})
if collection.get_attr('analysis_type', safe=True) == 'opls'
]
except:
collection_options = []
try:
opls_data = OPLSModel(load_data=True)
loaded_badges = opls_data.get_results_collection_badges()
except:
loaded_badges = [html.Span([dbc.Badge('None', className='badge-pill')])]
return dbc.Form(
[
html.H5('Load Results'),
dbc.Row(
[
dbc.Col(
[
dbc.FormGroup(
[
dbc.Label('Results Collection ID', html_for='results-collection-id'),
dcc.Dropdown(options=collection_options, id='results-collection-id', multi=False)
]
)
]
),
dbc.Col(
[
dbc.FormGroup(
[
dbc.Label('Results collection'),
dcc.Loading(
[
dbc.InputGroup(
[
dbc.Button('Get', id='get-results-collection',
className='col-sm-2 btn-success'),
html.H4(html.Div(loaded_badges, id='loaded-results-collection'),
id='loaded-results-collection-wrapper',
className='col-sm-10')
], id='loaded-results-display'
)
]
)
]
)
]
)
]
),
html.H5('Save Results'),
dbc.Row(
[
dbc.Col(
[
dbc.FormGroup(
[
dbc.Label('Results', html_for='results-select'),
dcc.Dropdown(id='results-select',
multi=True,
options=[
{'label': 'Metrics', 'value': 'metrics'},
{'label': 'Loadings + p-Values', 'value': 'loadings'},
{'label': 'Scores', 'value': 'scores'},
{'label': 'Weights', 'value': 'weights'}
])
]
)
]
),
dbc.Col(
[
dbc.FormGroup(
[
dbc.Label('File Format', html_for='file-format-select'),
dcc.Dropdown(id='file-format-select',
multi=False,
options=[
{'label': 'CSV', 'value': 'csv'}
])
]
)
]
),
dbc.Col(
[
dbc.FormGroup(
[
dbc.Label('Assemble results', html_for='download-button-group'),
dbc.FormGroup(
[
dbc.Button([html.I(className='fas fa-cogs')],
id='download-button',
className='btn btn-info')
], id='download-button-group'
)
]
)
]
),
dbc.Col(
[
dbc.FormGroup(
[
dbc.Label('Download', html_for='download-link-group'),
dbc.FormGroup(
[
html.A([html.I(className='fas fa-download'), ' Download'],
id='download-link', className='btn btn-secondary disabled')
], id='download-link-group'
)
]
)
]
)
], className='form-row'
),
html.Div(html.Small('', id='download-message', className='form-text')),
# dcc.Loading(html.Small('', id='download-message', className='form-text')),
# will inject link when results posted
html.H5('Save Plots'),
dbc.Row(
[
dbc.Col(
[
dbc.FormGroup(
[
dbc.Label('Width', html_for='width-input'),
dbc.Input(type='number', min=0, step=0.25, value=6.5, id='width-input')
]
)
], className='col-2'
),
dbc.Col(
[
dbc.FormGroup(
[
dbc.Label('Height', html_for='height-input'),
dbc.Input(type='number', min=0, step=0.25, value=4, id='height-input')
]
)
], className='col-2'
),
dbc.Col(
[
dbc.FormGroup(
[
dbc.Label('Units', html_for='units-select'),
dcc.Dropdown(options=[
{'label': 'in', 'value': 'in'},
{'label': 'cm', 'value': 'cm'},
{'label': 'px', 'value': 'px'}
], value='in', clearable=False, id='units-select')
]
)
], className='col-1'
),
dbc.Col(
[
dbc.FormGroup(
[
dbc.Label('DPI', html_for='dpi-select'),
dbc.Input(type='number', min=50, step=25, value=100, id='dpi-input')
]
)
], className='col-2'
),
dbc.Col(
[
dbc.FormGroup(
[
dbc.Label('File Formats', html_for='plot-file-format-select'),
dcc.Dropdown(options=[
{'label': 'SVG', 'value': 'svg'},
{'label': 'PNG', 'value': 'png'},
{'label': 'JPEG', 'value': 'jpg'},
{'label': 'PDF', 'value': 'pdf'},
{'label': 'TIFF', 'value': 'tif'},
{'label': 'EPS', 'value': 'eps'}
], value=['png'], clearable=False, multi=True, id='plot-file-format-select')
]
)
], className='col-2'
),
dbc.Col(
[
dbc.FormGroup(
[
dbc.Label('Prepare', html_for='plot-download-button-group'),
dbc.FormGroup(
[
dbc.Button([html.I(className='fas fa-cogs'), ' Prepare'],
id='plot-download-button',
className='btn btn-info')
], id='plot-download-button-group'
)
]
)
], className='col-1'
),
dbc.Col(
[
dbc.FormGroup(
[
dbc.Label('Download', html_for='plot-download-link-group'),
dbc.FormGroup(
[
#dcc.Loading(
html.Div(
html.A([html.I(className='fas fa-download'), ' Download'],
id='plot-download-link',
className='btn btn-secondary disabled')
)
], id='plot-download-link-group'
)
]
)
], className='col-1'
)
]
),
dbc.Row(
[
dcc.Interval(id='progress-interval', n_intervals=0, interval=3600000),
html.Div(dbc.FormText('Image export progress'), id='progress-label'),
dbc.Progress(html.Div(dbc.Badge('0/0', color='light', pill=True, id='progress-badge')),
id='progress', striped=True, animated=True, style={'height': '25px'},
color='info', className='w-100')
],
id='progress-div'
),
# will inject link when results posted
# dcc.Loading(html.Small('', id='plot-download-message', className='form-text')),
html.Div(html.Small('', id='plot-download-message', className='form-text')),
html.H5('Post transformed collection'),
dbc.Row(
[
dbc.Col(
[
dbc.FormGroup(
[
dbc.Label('Name', html_for='name-input'),
dbc.Input(id='name-input')
]
)
]
),
dbc.Col(
[
dbc.FormGroup(
[
dbc.Label('Analysis', html_for='analysis-select'),
dcc.Dropdown(id='analysis-select', options=analysis_options)
]
)
]
),
dbc.Col(
[
dbc.FormGroup(
[
dbc.Label('Post', html_for='post-button-group'),
dbc.FormGroup(
[
dbc.Button([html.I(className='fas fa-upload'), ' Post'],
id='post-button',
className='btn btn-success')
], id='post-button-group'
)
]
)
]
)
], className='form-row'
),
dcc.Loading(html.Small('', id='post-message', className='form-text'))
# will inject link when results posted
]
)
def get_opls_options_form():
try:
collection_options = [
{'label': f'{collection.id}: {collection.name}', 'value': collection.id}
for collection in get_collections(current_user, {'kind': 'data'})
]
except:
collection_options = []
try:
opls_data = OPLSModel(load_data=True)
label_options = [{'label': label, 'value': label} for label in opls_data.labels]
label_options_with_type = [{'label': label, 'value': label} for label in opls_data.get_label_data(True)]
loaded_badges = opls_data.get_collection_badges()
collection_load_info = opls_data.get_collection_load_info()
except:
loaded_badges = [html.Span([dbc.Badge('None', className='badge-pill')])]
collection_load_info = 'Loaded collections.'
label_options = []
label_options_with_type = []
return dbc.Form(
[
dbc.Row(
[
dbc.Col(
[
dbc.FormGroup(
[
dbc.Label('Collection ID', html_for='collection-id'),
dcc.Dropdown(options=collection_options, id='collection-id', multi=False)
]
)
]
),
dbc.Col(
[
dbc.FormGroup(
[
dbc.Label(collection_load_info, html_for='loaded-display',
id='collections-label'),
dcc.Loading(
[
dbc.InputGroup(
[
dbc.Button('Get', id='get-collection',
className='col-sm-2 btn-success'),
html.H4(loaded_badges, id='loaded-collections',
className='col-sm-10')
], id='loaded-display'
)
]
)
]
)
]
)
]
),
dbc.Row(
[
dbc.Col(
[
dbc.FormGroup(
[
dbc.Label(['Target variable.', html.Abbr('\uFE56',
title='The target ("y") variable.')],
html_for='target-variable'),
dcc.Dropdown(id='target-variable', options=label_options_with_type, multi=False)
]
)
]
),
dbc.Col(
[
dbc.FormGroup(
[
dbc.Label(['Regression type.', html.Abbr('\uFE56',
title='For categorical target variables,'
' choose Discrimination. When The'
' magnitudes of values are important'
' choose Regression.')],
html_for='regression-type'),
dcc.Dropdown(id='regression-type',
options=[
{
'label': 'Regression',
'value': 'regression'
},
{
'label': 'Discrimination',
'value': 'discrimination'
}
],
multi=False)
]
)
]
),
dbc.Col(
[
dbc.FormGroup(
[
dbc.Label(
[
'Multiclass discrimination behavior',
html.Abbr('\uFE56',
title='How to handle multiclass targets. One regressor is'
' created for each class or pair of classes and the '
'cross-validation is performed on each one separately. '
'"One v. One" will try to discriminate between each pair'
' of classes. "One v. All" will discriminate each class '
'from all the other classes. You can select both.\n\n'
'If "Regression" is selected in the previous option, an '
'attempt will be made to coerce the values of the target '
'variable into floating-point numbers and treat the target '
'as a continuous variable and this option will be ignored.')
], html_for='multiclass-behavior'),
dcc.Dropdown(id='multiclass-behavior',
options=[
{
'label': 'One v. one',
'value': 'one_v_one',
},
{
'label': 'One v. all',
'value': 'one_v_all'
}
],
multi=True)
]
)
]
),
]
),
dbc.Row(
[
dbc.Col(
[
dbc.FormGroup(
[
dbc.Label(
[
'Cross-validation folds',
html.Abbr('\uFE56',
title='The number of test/train splits for the test to determine '
'the significance of regression quality metrics.')
], html_for='cross-val-k'),
dbc.Input(id='cross-val-k', type='number', value=-1, min=-1)
]
)
]
),
dbc.Col(
[
dbc.FormGroup(
[
dbc.Label(
[
'Minimum orthogonal components.',
html.Abbr('\uFE56',
title='The minimum number of orthogonal components to remove.')
], html_for='min-n-components'),
dbc.Input(id='min-n-components', type='number', value=1, min=1)
]
)
]
),
]
),
dbc.Row(
[
dbc.Col(
[
dbc.FormGroup(
[
dbc.Label(
[
'Inner test \u03B1',
html.Abbr('\uFE56',
title='A two-sided p-value threshold which is used to determine '
'which features need further scrutiny.')
], html_for='inner-test-alpha'),
dbc.Input(id='inner-test-alpha', type='number', value=0.2, step=0.05)
]
)
]
),
dbc.Col(
[
dbc.FormGroup(
[
dbc.Label(
[
'Outer test \u03B1',
html.Abbr('\uFE56',
title='A two-sided p-value threshold which is used to determine '
'which features are significant.')
], html_for='outer-test-alpha'),
dbc.Input(id='outer-test-alpha', type='number', value=0.01, step=0.01)
]
)
]
)
]
),
dbc.Row(
[
dbc.Col(
[
dbc.FormGroup(
[
dbc.Label(
[
'Metric test permutations',
html.Abbr('\uFE56',
title='How many permutations (of the target) to determine '
'p-values for regression quality metrics.')
], html_for='permutations'),
dbc.Input(id='permutations', type='number', value=1000)
]
)
]
),
dbc.Col(
[
dbc.FormGroup(
[
dbc.Label(
[
'Inner test permutations',
html.Abbr('\uFE56',
title='How many permutations (of the data in one feature) to '
'perform for every feature to estimate its significance.')
], html_for='inner-permutations'),
dbc.Input(id='inner-permutations', type='number', value=100)
]
)
]
),
dbc.Col(
[
dbc.FormGroup(
[
dbc.Label(
[
'Outer test permutations',
html.Abbr('\uFE56',
title='How many permutations (of the data in one feature) to '
'perform for every feature determined to be potentially '
'significant in the first round.')
], html_for='outer-permutations'),
dbc.Input(id='outer-permutations', type='number', value=500)
]
)
]
)
]
),
dbc.Row(
[
dbc.Col(
[
dbc.FormGroup(
[
dbc.Label(['Scale by label(s)',
html.Abbr('\uFE56',
title='The mean of the records satisfying conditions on these '
'fields will be subtracted from each record, then each'
' record will be scaled by the standard deviation of the'
' records satisfying the conditions.')],
html_for='scale-by'),
dcc.Dropdown(id='scale-by', options=label_options, multi=True),
]
)
]
),
dbc.Col(
[
dbc.FormGroup(
[
dbc.Label(['Scale by conditions',
html.Abbr('\uFE56',
title='The conditions for the records to use for scaling. If '
'left blank, then no scaling is performed.')],
html_for='scale-by-value'),
dcc.Dropdown(id='scale-by-value', options=[{'label': 'All Records',
'value': 'index'}], multi=True),
]
)
]
)
]
),
dbc.Row(
[
dbc.Col(
[
dbc.FormGroup(
[
dbc.Label(['Model by label(s)',
html.Abbr('\uFE56',
title='Only consider records satisfying conditions on these'
' fields.')],
html_for='model-by'),
dcc.Dropdown(id='model-by', options=label_options, multi=True)
]
)
]
),
dbc.Col(
[
dbc.FormGroup(
[
dbc.Label(['Model by conditions',
html.Abbr('\uFE56',
title='The conditions which must be satisfied for the records'
'to be considered.')],
html_for='model-by-value'),
dcc.Dropdown(id='model-by-value', options=[], multi=True)
]
)
]
)
]
),
dbc.Row(
[
dbc.Col(
[
dbc.FormGroup(
[
dbc.Label(['Ignore by label(s)',
html.Abbr('\uFE56',
title='Exclude records satisfying conditions on these fields')],
html_for='ignore-by'),
dcc.Dropdown(id='ignore-by', options=label_options, multi=True)
]
)
]
),
dbc.Col(
[
dbc.FormGroup(
[
dbc.Label(['Ignore by conditions',
html.Abbr('\uFE56',
title='Conditions which apply to records to be excluded.')],
html_for='ignore-by-value'),
dcc.Dropdown(id='ignore-by-value', options=[], multi=True)
]
)
]
)
]
),
dbc.Row(
[
dbc.Col(
[
dbc.FormGroup(
[
dbc.Label(['Pair on label(s)',
html.Abbr('\uFE56',
title='The paired analysis works on the difference between '
'records in one class and other records, where the '
'records are "paired" by some identity condition. The '
'"pair on" label is used to pair all the records with '
'equal values for that field.')],
html_for='pair-on'),
dcc.Dropdown(id='pair-on', options=label_options, multi=True)
]
)
]
),
dbc.Col(
[
dbc.FormGroup(
[
dbc.Label(['Pair with label(s)',
html.Abbr('\uFE56',
title='The "pair with" condition applies to the records to be '
'subtracted from the others')],
html_for='pair-with'),
dcc.Dropdown(id='pair-with', options=label_options, multi=True)
]
)
]
),
dbc.Col(
[
dbc.FormGroup(
[
dbc.Label(['Pair with conditions',
html.Abbr('\uFE56',
title='The condition which must apply for the records which '
'will be subtracted.')],
html_for='pair-with-value'),
dcc.Dropdown(id='pair-with-value', options=[], multi=True)
]
)
]
),
]
),
dbc.FormGroup(
[
dcc.Loading(
[
dbc.Button('Run OPLS', id='opls-button', color='primary',
className='btn btn-block form-control'),
html.Small('', id='message', className='form-text')
]
)
]
)
]
)
def get_results_form():
# check if results are loaded
return [
dbc.CardHeader(
[
dbc.Tabs(
[
dbc.Tab(label='Summary', tab_id='summary-tab'),
dbc.Tab(label='Quality Metrics', tab_id='quality-tab'),
dbc.Tab(label='Metric Permutation Tests', tab_id='kde-tab'),
dbc.Tab(label='Feature Permutation Tests', tab_id='feature-significance-tab')
], id='results-tabs', active_tab='summary-tab', card=True
),
]
),
dcc.Loading(dbc.CardBody(id='results-content'))
]
def get_layout():
return html.Div(
[
html.Br(),
dbc.Container(
[
html.H2('Orthogonal Projection to Latent Structures'),
dbc.Tabs(
[
dbc.Tab(dbc.Card(dbc.CardBody(get_opls_options_form())),
id='opls-options-tab', label='OPLS Options'),
dbc.Tab(dbc.Card(dbc.CardBody(get_load_results_form())),
id='save-results-tab', label='Load/Export Results'),
dbc.Tab(dbc.Card(get_results_form()),
id='results-tab', label='Results')
], id='tabs'
)
]
),
html.Div('in', id='units-history', style={'display': 'none'}),
html.Div(100, id='dpi-history', style={'display': 'none'})
]
)
|
jnns/wagtail | refs/heads/master | wagtail/wagtailcore/middleware.py | 47 | from wagtail.wagtailcore.models import Site
class SiteMiddleware(object):
def process_request(self, request):
"""
Set request.site to contain the Site object responsible for handling this request,
according to hostname matching rules
"""
try:
request.site = Site.find_for_request(request)
except Site.DoesNotExist:
request.site = None
|
jlspyaozhongkai/Uter | refs/heads/master | third_party_build/Python-2.7.9/lib/python2.7/test/test_threading_local.py | 96 | import unittest
from doctest import DocTestSuite
from test import test_support
import weakref
import gc
# Modules under test
_thread = test_support.import_module('thread')
threading = test_support.import_module('threading')
import _threading_local
class Weak(object):
pass
def target(local, weaklist):
weak = Weak()
local.weak = weak
weaklist.append(weakref.ref(weak))
class BaseLocalTest:
def test_local_refs(self):
self._local_refs(20)
self._local_refs(50)
self._local_refs(100)
def _local_refs(self, n):
local = self._local()
weaklist = []
for i in range(n):
t = threading.Thread(target=target, args=(local, weaklist))
t.start()
t.join()
del t
gc.collect()
self.assertEqual(len(weaklist), n)
# XXX _threading_local keeps the local of the last stopped thread alive.
deadlist = [weak for weak in weaklist if weak() is None]
self.assertIn(len(deadlist), (n-1, n))
# Assignment to the same thread local frees it sometimes (!)
local.someothervar = None
gc.collect()
deadlist = [weak for weak in weaklist if weak() is None]
self.assertIn(len(deadlist), (n-1, n), (n, len(deadlist)))
def test_derived(self):
# Issue 3088: if there is a threads switch inside the __init__
# of a threading.local derived class, the per-thread dictionary
# is created but not correctly set on the object.
# The first member set may be bogus.
import time
class Local(self._local):
def __init__(self):
time.sleep(0.01)
local = Local()
def f(i):
local.x = i
# Simply check that the variable is correctly set
self.assertEqual(local.x, i)
threads= []
for i in range(10):
t = threading.Thread(target=f, args=(i,))
t.start()
threads.append(t)
for t in threads:
t.join()
def test_derived_cycle_dealloc(self):
# http://bugs.python.org/issue6990
class Local(self._local):
pass
locals = None
passed = [False]
e1 = threading.Event()
e2 = threading.Event()
def f():
# 1) Involve Local in a cycle
cycle = [Local()]
cycle.append(cycle)
cycle[0].foo = 'bar'
# 2) GC the cycle (triggers threadmodule.c::local_clear
# before local_dealloc)
del cycle
gc.collect()
e1.set()
e2.wait()
# 4) New Locals should be empty
passed[0] = all(not hasattr(local, 'foo') for local in locals)
t = threading.Thread(target=f)
t.start()
e1.wait()
# 3) New Locals should recycle the original's address. Creating
# them in the thread overwrites the thread state and avoids the
# bug
locals = [Local() for i in range(10)]
e2.set()
t.join()
self.assertTrue(passed[0])
def test_arguments(self):
# Issue 1522237
from thread import _local as local
from _threading_local import local as py_local
for cls in (local, py_local):
class MyLocal(cls):
def __init__(self, *args, **kwargs):
pass
MyLocal(a=1)
MyLocal(1)
self.assertRaises(TypeError, cls, a=1)
self.assertRaises(TypeError, cls, 1)
def _test_one_class(self, c):
self._failed = "No error message set or cleared."
obj = c()
e1 = threading.Event()
e2 = threading.Event()
def f1():
obj.x = 'foo'
obj.y = 'bar'
del obj.y
e1.set()
e2.wait()
def f2():
try:
foo = obj.x
except AttributeError:
# This is expected -- we haven't set obj.x in this thread yet!
self._failed = "" # passed
else:
self._failed = ('Incorrectly got value %r from class %r\n' %
(foo, c))
sys.stderr.write(self._failed)
t1 = threading.Thread(target=f1)
t1.start()
e1.wait()
t2 = threading.Thread(target=f2)
t2.start()
t2.join()
# The test is done; just let t1 know it can exit, and wait for it.
e2.set()
t1.join()
self.assertFalse(self._failed, self._failed)
def test_threading_local(self):
self._test_one_class(self._local)
def test_threading_local_subclass(self):
class LocalSubclass(self._local):
"""To test that subclasses behave properly."""
self._test_one_class(LocalSubclass)
def _test_dict_attribute(self, cls):
obj = cls()
obj.x = 5
self.assertEqual(obj.__dict__, {'x': 5})
with self.assertRaises(AttributeError):
obj.__dict__ = {}
with self.assertRaises(AttributeError):
del obj.__dict__
def test_dict_attribute(self):
self._test_dict_attribute(self._local)
def test_dict_attribute_subclass(self):
class LocalSubclass(self._local):
"""To test that subclasses behave properly."""
self._test_dict_attribute(LocalSubclass)
class ThreadLocalTest(unittest.TestCase, BaseLocalTest):
_local = _thread._local
# Fails for the pure Python implementation
def test_cycle_collection(self):
class X:
pass
x = X()
x.local = self._local()
x.local.x = x
wr = weakref.ref(x)
del x
gc.collect()
self.assertIs(wr(), None)
class PyThreadingLocalTest(unittest.TestCase, BaseLocalTest):
_local = _threading_local.local
def test_main():
suite = unittest.TestSuite()
suite.addTest(DocTestSuite('_threading_local'))
suite.addTest(unittest.makeSuite(ThreadLocalTest))
suite.addTest(unittest.makeSuite(PyThreadingLocalTest))
try:
from thread import _local
except ImportError:
pass
else:
import _threading_local
local_orig = _threading_local.local
def setUp(test):
_threading_local.local = _local
def tearDown(test):
_threading_local.local = local_orig
suite.addTest(DocTestSuite('_threading_local',
setUp=setUp, tearDown=tearDown)
)
test_support.run_unittest(suite)
if __name__ == '__main__':
test_main()
|
sekikn/bigtop | refs/heads/master | bigtop-deploy/juju/hadoop-hbase/tests/01-bundle.py | 10 | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import amulet
import os
import re
import unittest
import yaml
class TestBundle(unittest.TestCase):
bundle_file = os.path.join(os.path.dirname(__file__), '..', 'bundle.yaml')
@classmethod
def setUpClass(cls):
cls.d = amulet.Deployment(series='xenial')
with open(cls.bundle_file) as f:
bun = f.read()
bundle = yaml.safe_load(bun)
# NB: strip machine ('to') placement. We don't seem to be guaranteed
# the same machine numbering after the initial bundletester deployment,
# so we might fail when redeploying --to a specific machine to run
# these bundle tests. This is ok because all charms in this bundle are
# using 'reset: false', so we'll already have our deployment just the
# way we want it by the time this test runs. This was originally
# raised as:
# https://github.com/juju/amulet/issues/148
for service, service_config in bundle['services'].items():
if 'to' in service_config:
del service_config['to']
cls.d.load(bundle)
cls.d.setup(timeout=3600)
# we need units reporting ready before we attempt our smoke tests
cls.d.sentry.wait_for_messages({'client': re.compile('ready'),
'namenode': re.compile('ready'),
'resourcemanager': re.compile('ready'),
'slave': re.compile('ready'),
'hbase': re.compile('ready'),
'zookeeper': re.compile('ready'),
}, timeout=3600)
cls.hdfs = cls.d.sentry['namenode'][0]
cls.yarn = cls.d.sentry['resourcemanager'][0]
cls.slave = cls.d.sentry['slave'][0]
cls.hbase = cls.d.sentry['hbase'][0]
def test_components(self):
"""
Confirm that all of the required components are up and running.
"""
hdfs, retcode = self.hdfs.run("pgrep -a java")
yarn, retcode = self.yarn.run("pgrep -a java")
slave, retcode = self.slave.run("pgrep -a java")
hbase, retcode = self.hbase.run("pgrep -a java")
assert 'NameNode' in hdfs, "NameNode not started"
assert 'NameNode' not in slave, "NameNode should not be running on slave"
assert 'ResourceManager' in yarn, "ResourceManager not started"
assert 'ResourceManager' not in slave, "ResourceManager should not be running on slave"
assert 'JobHistoryServer' in yarn, "JobHistoryServer not started"
assert 'JobHistoryServer' not in slave, "JobHistoryServer should not be running on slave"
assert 'NodeManager' in slave, "NodeManager not started"
assert 'NodeManager' not in yarn, "NodeManager should not be running on resourcemanager"
assert 'NodeManager' not in hdfs, "NodeManager should not be running on namenode"
assert 'DataNode' in slave, "DataNode not started"
assert 'DataNode' not in yarn, "DataNode should not be running on resourcemanager"
assert 'DataNode' not in hdfs, "DataNode should not be running on namenode"
assert 'Master' in hbase, "HBase Master not started"
def test_hdfs(self):
"""
Validates mkdir, ls, chmod, and rm HDFS operations.
"""
uuid = self.hdfs.run_action('smoke-test')
result = self.d.action_fetch(uuid, timeout=600, full_output=True)
# action status=completed on success
if (result['status'] != "completed"):
self.fail('HDFS smoke-test did not complete: %s' % result)
def test_yarn(self):
"""
Validates YARN using the Bigtop 'yarn' smoke test.
"""
uuid = self.yarn.run_action('smoke-test')
# 'yarn' smoke takes a while (bigtop tests download lots of stuff)
result = self.d.action_fetch(uuid, timeout=1800, full_output=True)
# action status=completed on success
if (result['status'] != "completed"):
self.fail('YARN smoke-test did not complete: %s' % result)
def test_hbase(self):
"""
Validates HBase with a simple smoke test.
"""
uuid = self.hbase.run_action('smoke-test')
result = self.d.action_fetch(uuid, timeout=600, full_output=True)
# action status=completed on success
if (result['status'] != "completed"):
self.fail('HBase smoke-test did not complete: %s' % result)
@unittest.skip(
'Skipping slave smoke tests; they are too inconsistent and long running for CWR.')
def test_slave(self):
"""
Validates slave using the Bigtop 'hdfs' and 'mapred' smoke test.
"""
uuid = self.slave.run_action('smoke-test')
# 'hdfs+mapred' smoke takes a long while (bigtop tests are slow)
result = self.d.action_fetch(uuid, timeout=3600, full_output=True)
# action status=completed on success
if (result['status'] != "completed"):
self.fail('Slave smoke-test did not complete: %s' % result)
if __name__ == '__main__':
unittest.main()
|
timdiels/chicken_turtle_util | refs/heads/master | pytil/hashlib.py | 1 | # Copyright (C) 2016 VIB/BEG/UGent - Tim Diels <[email protected]>
#
# This file is part of pytil.
#
# pytil is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pytil is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with pytil. If not, see <http://www.gnu.org/licenses/>.
'''
`python:hashlib` extensions.
'''
import base64
def base85_digest(hash_):
'''
Get base 85 encoded digest of hash.
Parameters
----------
hash_ : hash object
E.g. as returned by ``hashlib.sha512()``.
Returns
-------
str
Base 85 encoded digest.
'''
return base64.b85encode(hash_.digest()).decode('ascii')
|
jn0/fb2utils | refs/heads/master | unidecode/x72.py | 252 | data = (
'He ', # 0x00
'Lan ', # 0x01
'Biao ', # 0x02
'Rong ', # 0x03
'Li ', # 0x04
'Mo ', # 0x05
'Bao ', # 0x06
'Ruo ', # 0x07
'Lu ', # 0x08
'La ', # 0x09
'Ao ', # 0x0a
'Xun ', # 0x0b
'Kuang ', # 0x0c
'Shuo ', # 0x0d
'[?] ', # 0x0e
'Li ', # 0x0f
'Lu ', # 0x10
'Jue ', # 0x11
'Liao ', # 0x12
'Yan ', # 0x13
'Xi ', # 0x14
'Xie ', # 0x15
'Long ', # 0x16
'Ye ', # 0x17
'[?] ', # 0x18
'Rang ', # 0x19
'Yue ', # 0x1a
'Lan ', # 0x1b
'Cong ', # 0x1c
'Jue ', # 0x1d
'Tong ', # 0x1e
'Guan ', # 0x1f
'[?] ', # 0x20
'Che ', # 0x21
'Mi ', # 0x22
'Tang ', # 0x23
'Lan ', # 0x24
'Zhu ', # 0x25
'[?] ', # 0x26
'Ling ', # 0x27
'Cuan ', # 0x28
'Yu ', # 0x29
'Zhua ', # 0x2a
'Tsumekanmuri ', # 0x2b
'Pa ', # 0x2c
'Zheng ', # 0x2d
'Pao ', # 0x2e
'Cheng ', # 0x2f
'Yuan ', # 0x30
'Ai ', # 0x31
'Wei ', # 0x32
'[?] ', # 0x33
'Jue ', # 0x34
'Jue ', # 0x35
'Fu ', # 0x36
'Ye ', # 0x37
'Ba ', # 0x38
'Die ', # 0x39
'Ye ', # 0x3a
'Yao ', # 0x3b
'Zu ', # 0x3c
'Shuang ', # 0x3d
'Er ', # 0x3e
'Qiang ', # 0x3f
'Chuang ', # 0x40
'Ge ', # 0x41
'Zang ', # 0x42
'Die ', # 0x43
'Qiang ', # 0x44
'Yong ', # 0x45
'Qiang ', # 0x46
'Pian ', # 0x47
'Ban ', # 0x48
'Pan ', # 0x49
'Shao ', # 0x4a
'Jian ', # 0x4b
'Pai ', # 0x4c
'Du ', # 0x4d
'Chuang ', # 0x4e
'Tou ', # 0x4f
'Zha ', # 0x50
'Bian ', # 0x51
'Die ', # 0x52
'Bang ', # 0x53
'Bo ', # 0x54
'Chuang ', # 0x55
'You ', # 0x56
'[?] ', # 0x57
'Du ', # 0x58
'Ya ', # 0x59
'Cheng ', # 0x5a
'Niu ', # 0x5b
'Ushihen ', # 0x5c
'Pin ', # 0x5d
'Jiu ', # 0x5e
'Mou ', # 0x5f
'Tuo ', # 0x60
'Mu ', # 0x61
'Lao ', # 0x62
'Ren ', # 0x63
'Mang ', # 0x64
'Fang ', # 0x65
'Mao ', # 0x66
'Mu ', # 0x67
'Gang ', # 0x68
'Wu ', # 0x69
'Yan ', # 0x6a
'Ge ', # 0x6b
'Bei ', # 0x6c
'Si ', # 0x6d
'Jian ', # 0x6e
'Gu ', # 0x6f
'You ', # 0x70
'Ge ', # 0x71
'Sheng ', # 0x72
'Mu ', # 0x73
'Di ', # 0x74
'Qian ', # 0x75
'Quan ', # 0x76
'Quan ', # 0x77
'Zi ', # 0x78
'Te ', # 0x79
'Xi ', # 0x7a
'Mang ', # 0x7b
'Keng ', # 0x7c
'Qian ', # 0x7d
'Wu ', # 0x7e
'Gu ', # 0x7f
'Xi ', # 0x80
'Li ', # 0x81
'Li ', # 0x82
'Pou ', # 0x83
'Ji ', # 0x84
'Gang ', # 0x85
'Zhi ', # 0x86
'Ben ', # 0x87
'Quan ', # 0x88
'Run ', # 0x89
'Du ', # 0x8a
'Ju ', # 0x8b
'Jia ', # 0x8c
'Jian ', # 0x8d
'Feng ', # 0x8e
'Pian ', # 0x8f
'Ke ', # 0x90
'Ju ', # 0x91
'Kao ', # 0x92
'Chu ', # 0x93
'Xi ', # 0x94
'Bei ', # 0x95
'Luo ', # 0x96
'Jie ', # 0x97
'Ma ', # 0x98
'San ', # 0x99
'Wei ', # 0x9a
'Li ', # 0x9b
'Dun ', # 0x9c
'Tong ', # 0x9d
'[?] ', # 0x9e
'Jiang ', # 0x9f
'Ikenie ', # 0xa0
'Li ', # 0xa1
'Du ', # 0xa2
'Lie ', # 0xa3
'Pi ', # 0xa4
'Piao ', # 0xa5
'Bao ', # 0xa6
'Xi ', # 0xa7
'Chou ', # 0xa8
'Wei ', # 0xa9
'Kui ', # 0xaa
'Chou ', # 0xab
'Quan ', # 0xac
'Fan ', # 0xad
'Ba ', # 0xae
'Fan ', # 0xaf
'Qiu ', # 0xb0
'Ji ', # 0xb1
'Cai ', # 0xb2
'Chuo ', # 0xb3
'An ', # 0xb4
'Jie ', # 0xb5
'Zhuang ', # 0xb6
'Guang ', # 0xb7
'Ma ', # 0xb8
'You ', # 0xb9
'Kang ', # 0xba
'Bo ', # 0xbb
'Hou ', # 0xbc
'Ya ', # 0xbd
'Yin ', # 0xbe
'Huan ', # 0xbf
'Zhuang ', # 0xc0
'Yun ', # 0xc1
'Kuang ', # 0xc2
'Niu ', # 0xc3
'Di ', # 0xc4
'Qing ', # 0xc5
'Zhong ', # 0xc6
'Mu ', # 0xc7
'Bei ', # 0xc8
'Pi ', # 0xc9
'Ju ', # 0xca
'Ni ', # 0xcb
'Sheng ', # 0xcc
'Pao ', # 0xcd
'Xia ', # 0xce
'Tuo ', # 0xcf
'Hu ', # 0xd0
'Ling ', # 0xd1
'Fei ', # 0xd2
'Pi ', # 0xd3
'Ni ', # 0xd4
'Ao ', # 0xd5
'You ', # 0xd6
'Gou ', # 0xd7
'Yue ', # 0xd8
'Ju ', # 0xd9
'Dan ', # 0xda
'Po ', # 0xdb
'Gu ', # 0xdc
'Xian ', # 0xdd
'Ning ', # 0xde
'Huan ', # 0xdf
'Hen ', # 0xe0
'Jiao ', # 0xe1
'He ', # 0xe2
'Zhao ', # 0xe3
'Ji ', # 0xe4
'Xun ', # 0xe5
'Shan ', # 0xe6
'Ta ', # 0xe7
'Rong ', # 0xe8
'Shou ', # 0xe9
'Tong ', # 0xea
'Lao ', # 0xeb
'Du ', # 0xec
'Xia ', # 0xed
'Shi ', # 0xee
'Hua ', # 0xef
'Zheng ', # 0xf0
'Yu ', # 0xf1
'Sun ', # 0xf2
'Yu ', # 0xf3
'Bi ', # 0xf4
'Mang ', # 0xf5
'Xi ', # 0xf6
'Juan ', # 0xf7
'Li ', # 0xf8
'Xia ', # 0xf9
'Yin ', # 0xfa
'Suan ', # 0xfb
'Lang ', # 0xfc
'Bei ', # 0xfd
'Zhi ', # 0xfe
'Yan ', # 0xff
)
|
Bjarne-AAU/MonteCarloLocalization | refs/heads/master | World.py | 1 |
import pygame
import numpy as np
from scipy import interpolate
from scipy.ndimage.filters import gaussian_filter
import cv2
import noise
from matplotlib import cm
from matplotlib.colors import Colormap
mapping = np.array([0.00, 0.05, 0.10, 0.15, 0.20, 0.45, 0.70, 0.90, 0.95, 0.97, 0.99, 1.00])
def world_type_from_colormap(cmap, mapping = mapping):
x = np.linspace(0,1,len(mapping))
res = np.roll(cmap(x), 1, -1)
res[:,0] = mapping
return res
def create_palette(colors):
if colors is None:
c = np.linspace(0, 1, 256)
cm = np.vstack([c, c, c]).T
elif isinstance(colors, Colormap):
cm = colors(np.linspace(0, 1, 256))
else:
c = interpolate.interp1d(colors[:,0], colors[:,1:], axis=0)
cm = c(np.linspace(0, 1, 256))
return (cm*255).astype(np.int32)
class WORLD_TYPE(object):
@staticmethod
def get(name):
if name not in WORLD_TYPE.__dict__: return None
return WORLD_TYPE.__dict__[name]
NONE = np.array([[0.0, 0.3, 0.6, 0.2], [1.0, 0.3, 0.6, 0.2]])
GREY_RAW = cm.gray
TERRAIN_RAW = cm.terrain
OCEAN_RAW = cm.ocean
EARTH_RAW = cm.gist_earth
MARS_RAW = cm.Set1
GREY = world_type_from_colormap(cm.gray)
TERRAIN = world_type_from_colormap(cm.terrain)
OCEAN = world_type_from_colormap(cm.ocean)
EARTH = world_type_from_colormap(cm.gist_earth)
MARS = world_type_from_colormap(cm.Set1)
MY_EARTH = np.array([
[0.00, 0.00, 0.00, 0.40], # base
[0.05, 0.00, 0.00, 0.70], # water
[0.10, 0.20, 0.40, 0.80], # shallow water
[0.15, 0.70, 0.65, 0.45], # beach
[0.20, 0.10, 0.50, 0.10], # bushes
# [0.45, 0.40, 0.90, 0.20], # grass
[0.45, 0.30, 0.70, 0.20], # grass
# [0.50, 0.50, 0.60, 0.20], # savanna
[0.70, 0.40, 0.90, 0.20], # grass
[0.90, 0.00, 0.50, 0.10], # forest
[0.95, 0.40, 0.70, 0.20], # grass
[0.97, 0.50, 0.50, 0.50], # rock
[0.99, 0.40, 0.40, 0.40], # rock
[1.00, 1.00, 1.00, 1.00] # snow
])
MY_MARS = np.array([
[0.00, 0.20, 0.00, 0.00], # base
[0.05, 0.40, 0.20, 0.20], # water
# [0.10, 0.50, 0.30, 0.10], # shallow water
[0.35, 0.80, 0.50, 0.20], # shallow water
[0.60, 0.60, 0.40, 0.30], # shallow water
[0.90, 0.90, 0.60, 0.50], # snow
[0.95, 0.90, 0.60, 0.50], # snow
[1.00, 0.90, 0.70, 0.60] # snow
])
class Generator(object):
def __init__(self, width, height, scale_x = 6, scale_y = 6, octaves = 6, seed = 0):
self.width = width
self.height = height
self.scale_x = scale_x
self.scale_y = scale_y
self.octaves = octaves
self.seed = seed
@property
def size(self): return np.array([self.width, self.height])
@size.setter
def size(self, size): return np.array([size[0], size[1]])
def _eval_at(self, x, y):
raise NotImplementedError("Use a specialized generator")
def at(self, points):
points = points.astype(np.float)
points[:,0] *= self.scale_x / float(self.width)
points[:,1] *= self.scale_y / float(self.height)
res = np.array([self._eval_at(x, y) for x,y in points])
res = (res + 1)/2.0
return res
def create(self, area, width, height):
Xs = np.linspace(area.left, area.right, width+1)
Ys = np.linspace(area.top, area.bottom, height+1)
Y, X = np.meshgrid(Ys, Xs)
points = np.vstack([X.ravel(), Y.ravel()]).T
return np.array(self.at(points)).reshape([width+1, height+1])[0:-1,0:-1]
class GeneratorPerlin(Generator):
def _eval_at(self, x, y):
return noise.pnoise2(x, y, self.octaves, repeatx=self.scale_x, repeaty=self.scale_y, base=self.seed, persistence=0.4, lacunarity=3.3)
class GeneratorSimplex(Generator):
def _eval_at(self, x, y):
return noise.snoise2(x, y, self.octaves, repeatx=self.scale_x, repeaty=self.scale_y, base=self.seed, persistence=0.4, lacunarity=3.3)
class NOISE(object):
PERLIN = 0
SIMPLEX = 1
class MapGenerator(object):
def __init__(self, width, height, scale = 6, seed = 0, colors = None, type = None):
self.width = width
self.height = height
self.seed = seed
self.colors = colors
if type == NOISE.PERLIN:
self._gen = GeneratorPerlin(width, height, scale, scale, 6, seed)
elif type == NOISE.SIMPLEX:
self._gen = GeneratorSimplex(width, height, scale, scale, 6, seed)
else:
self._gen = GeneratorSimplex(width, height, scale, scale, 6, seed)
@property
def size(self): return np.array([self.width, self.height])
def set_colors(self, colors):
self.colors = colors
def create(self, area=None, width=None, height=None):
if area is None: area = pygame.Rect( [0,0], self.size)
if width is None: width = self.width
if height is None: height = self.height
genmap = pygame.Surface([width, height], 0, 8)
hmap = self._gen.create(area, width, height)
hmap = (hmap*255).astype(np.uint8)
cv2.equalizeHist( hmap, hmap );
palette = create_palette(self.colors)
genmap.set_palette(palette)
pygame.surfarray.blit_array(genmap, hmap)
return genmap
class ExtendedMapGenerator(MapGenerator):
def __init__(self, width, height, pad_x = 0, pad_y = 0, scale = 6, seed = 0, colors = None, type = None):
super(ExtendedMapGenerator, self).__init__(width, height, scale, seed, colors, type)
self.pad_x = pad_x
self.pad_y = pad_y
@property
def pad(self):
return np.array([self.pad_x, self.pad_y])
@property
def width_ext(self):
return self.width + 2*self.pad_x
@property
def height_ext(self):
return self.height + 2*self.pad_y
@property
def size_ext(self):
return self.size + 2*self.pad
def create(self, area=None, width=None, height=None):
if area is None: area = pygame.Rect(-self.pad, self.size_ext)
if width is None: width = self.width_ext
if height is None: height = self.height_ext
return super(ExtendedMapGenerator, self).create(area, width, height)
class WorldMap(object):
def __init__(self, width, height, pad_x = 0, pad_y = 0, scale = 6, seed = 0, colors = None, type = None):
self._mapper = ExtendedMapGenerator(width, height, pad_x, pad_y, scale, seed, colors, type)
self._map = self._mapper.create()
@property
def width(self): return self._mapper.width
@property
def height(self): return self._mapper.height
@property
def size(self): return self._mapper.size
def map(self, roi = None, size = None):
if roi is None: roi = [0, 0, self._mapper.width, self._mapper.height]
roi[0] += self._mapper.pad_x
roi[1] += self._mapper.pad_y
if size is None:
rect = pygame.Rect( (roi[0], roi[1]), (roi[2], roi[3]) )
submap = self._map.subsurface(rect)
submap = submap.copy()
else:
rect = pygame.Rect( (roi[0]-1, roi[1]-1), (roi[2]+2, roi[3]+2) )
submap = self._map.subsurface(rect)
scale_x = float(size[0]) / roi[2]
scale_y = float(size[1]) / roi[3]
submap = pygame.transform.scale(submap, [int(size[0] + 2*scale_x), int(size[1] + 2*scale_y)])
subpix_x, _ = np.modf(roi[0])
subpix_y, _ = np.modf(roi[1])
dx = int(np.round(-subpix_x * scale_x))
dy = int(np.round(-subpix_y * scale_y))
submap.scroll(dx, dy)
view_m = pygame.surfarray.pixels2d(submap)
gaussian_filter(view_m, sigma=(scale_x/3.0, scale_y/3.0), output=view_m)
del view_m
return submap
def draw(self, image):
image.blit(self.map(), (0, 0))
|
sramsay64/python-iview | refs/heads/master | cherrypy/test/test_request_obj.py | 5 | """Basic tests for the cherrypy.Request object."""
import os
localDir = os.path.dirname(__file__)
import sys
import types
from cherrypy._cpcompat import IncompleteRead, ntob, ntou, unicodestr
import cherrypy
from cherrypy import _cptools, tools
from cherrypy.lib import httputil
defined_http_methods = ("OPTIONS", "GET", "HEAD", "POST", "PUT", "DELETE",
"TRACE", "PROPFIND")
# Client-side code #
from cherrypy.test import helper
class RequestObjectTests(helper.CPWebCase):
def setup_server():
class Root:
def index(self):
return "hello"
index.exposed = True
def scheme(self):
return cherrypy.request.scheme
scheme.exposed = True
root = Root()
class TestType(type):
"""Metaclass which automatically exposes all functions in each
subclass, and adds an instance of the subclass as an attribute
of root.
"""
def __init__(cls, name, bases, dct):
type.__init__(cls, name, bases, dct)
for value in dct.values():
if isinstance(value, types.FunctionType):
value.exposed = True
setattr(root, name.lower(), cls())
Test = TestType('Test', (object,), {})
class PathInfo(Test):
def default(self, *args):
return cherrypy.request.path_info
class Params(Test):
def index(self, thing):
return repr(thing)
def ismap(self, x, y):
return "Coordinates: %s, %s" % (x, y)
def default(self, *args, **kwargs):
return "args: %s kwargs: %s" % (args, sorted(kwargs.items()))
default._cp_config = {'request.query_string_encoding': 'latin1'}
class ParamErrorsCallable(object):
exposed = True
def __call__(self):
return "data"
class ParamErrors(Test):
def one_positional(self, param1):
return "data"
one_positional.exposed = True
def one_positional_args(self, param1, *args):
return "data"
one_positional_args.exposed = True
def one_positional_args_kwargs(self, param1, *args, **kwargs):
return "data"
one_positional_args_kwargs.exposed = True
def one_positional_kwargs(self, param1, **kwargs):
return "data"
one_positional_kwargs.exposed = True
def no_positional(self):
return "data"
no_positional.exposed = True
def no_positional_args(self, *args):
return "data"
no_positional_args.exposed = True
def no_positional_args_kwargs(self, *args, **kwargs):
return "data"
no_positional_args_kwargs.exposed = True
def no_positional_kwargs(self, **kwargs):
return "data"
no_positional_kwargs.exposed = True
callable_object = ParamErrorsCallable()
def raise_type_error(self, **kwargs):
raise TypeError("Client Error")
raise_type_error.exposed = True
def raise_type_error_with_default_param(self, x, y=None):
return '%d' % 'a' # throw an exception
raise_type_error_with_default_param.exposed = True
def callable_error_page(status, **kwargs):
return "Error %s - Well, I'm very sorry but you haven't paid!" % (
status)
class Error(Test):
_cp_config = {'tools.log_tracebacks.on': True,
}
def reason_phrase(self):
raise cherrypy.HTTPError("410 Gone fishin'")
def custom(self, err='404'):
raise cherrypy.HTTPError(
int(err), "No, <b>really</b>, not found!")
custom._cp_config = {
'error_page.404': os.path.join(localDir, "static/index.html"),
'error_page.401': callable_error_page,
}
def custom_default(self):
return 1 + 'a' # raise an unexpected error
custom_default._cp_config = {
'error_page.default': callable_error_page}
def noexist(self):
raise cherrypy.HTTPError(404, "No, <b>really</b>, not found!")
noexist._cp_config = {'error_page.404': "nonexistent.html"}
def page_method(self):
raise ValueError()
def page_yield(self):
yield "howdy"
raise ValueError()
def page_streamed(self):
yield "word up"
raise ValueError()
yield "very oops"
page_streamed._cp_config = {"response.stream": True}
def cause_err_in_finalize(self):
# Since status must start with an int, this should error.
cherrypy.response.status = "ZOO OK"
cause_err_in_finalize._cp_config = {
'request.show_tracebacks': False}
def rethrow(self):
"""Test that an error raised here will be thrown out to
the server.
"""
raise ValueError()
rethrow._cp_config = {'request.throw_errors': True}
class Expect(Test):
def expectation_failed(self):
expect = cherrypy.request.headers.elements("Expect")
if expect and expect[0].value != '100-continue':
raise cherrypy.HTTPError(400)
raise cherrypy.HTTPError(417, 'Expectation Failed')
class Headers(Test):
def default(self, headername):
"""Spit back out the value for the requested header."""
return cherrypy.request.headers[headername]
def doubledheaders(self):
# From https://bitbucket.org/cherrypy/cherrypy/issue/165:
# "header field names should not be case sensitive sayes the
# rfc. if i set a headerfield in complete lowercase i end up
# with two header fields, one in lowercase, the other in
# mixed-case."
# Set the most common headers
hMap = cherrypy.response.headers
hMap['content-type'] = "text/html"
hMap['content-length'] = 18
hMap['server'] = 'CherryPy headertest'
hMap['location'] = ('%s://%s:%s/headers/'
% (cherrypy.request.local.ip,
cherrypy.request.local.port,
cherrypy.request.scheme))
# Set a rare header for fun
hMap['Expires'] = 'Thu, 01 Dec 2194 16:00:00 GMT'
return "double header test"
def ifmatch(self):
val = cherrypy.request.headers['If-Match']
assert isinstance(val, unicodestr)
cherrypy.response.headers['ETag'] = val
return val
class HeaderElements(Test):
def get_elements(self, headername):
e = cherrypy.request.headers.elements(headername)
return "\n".join([unicodestr(x) for x in e])
class Method(Test):
def index(self):
m = cherrypy.request.method
if m in defined_http_methods or m == "CONNECT":
return m
if m == "LINK":
raise cherrypy.HTTPError(405)
else:
raise cherrypy.HTTPError(501)
def parameterized(self, data):
return data
def request_body(self):
# This should be a file object (temp file),
# which CP will just pipe back out if we tell it to.
return cherrypy.request.body
def reachable(self):
return "success"
class Divorce:
"""HTTP Method handlers shouldn't collide with normal method names.
For example, a GET-handler shouldn't collide with a method named
'get'.
If you build HTTP method dispatching into CherryPy, rewrite this
class to use your new dispatch mechanism and make sure that:
"GET /divorce HTTP/1.1" maps to divorce.index() and
"GET /divorce/get?ID=13 HTTP/1.1" maps to divorce.get()
"""
documents = {}
def index(self):
yield "<h1>Choose your document</h1>\n"
yield "<ul>\n"
for id, contents in self.documents.items():
yield (
" <li><a href='/divorce/get?ID=%s'>%s</a>:"
" %s</li>\n" % (id, id, contents))
yield "</ul>"
index.exposed = True
def get(self, ID):
return ("Divorce document %s: %s" %
(ID, self.documents.get(ID, "empty")))
get.exposed = True
root.divorce = Divorce()
class ThreadLocal(Test):
def index(self):
existing = repr(getattr(cherrypy.request, "asdf", None))
cherrypy.request.asdf = "rassfrassin"
return existing
appconf = {
'/method': {
'request.methods_with_bodies': ("POST", "PUT", "PROPFIND")
},
}
cherrypy.tree.mount(root, config=appconf)
setup_server = staticmethod(setup_server)
def test_scheme(self):
self.getPage("/scheme")
self.assertBody(self.scheme)
def testRelativeURIPathInfo(self):
self.getPage("/pathinfo/foo/bar")
self.assertBody("/pathinfo/foo/bar")
def testAbsoluteURIPathInfo(self):
# http://cherrypy.org/ticket/1061
self.getPage("http://localhost/pathinfo/foo/bar")
self.assertBody("/pathinfo/foo/bar")
def testParams(self):
self.getPage("/params/?thing=a")
self.assertBody(repr(ntou("a")))
self.getPage("/params/?thing=a&thing=b&thing=c")
self.assertBody(repr([ntou('a'), ntou('b'), ntou('c')]))
# Test friendly error message when given params are not accepted.
cherrypy.config.update({"request.show_mismatched_params": True})
self.getPage("/params/?notathing=meeting")
self.assertInBody("Missing parameters: thing")
self.getPage("/params/?thing=meeting¬athing=meeting")
self.assertInBody("Unexpected query string parameters: notathing")
# Test ability to turn off friendly error messages
cherrypy.config.update({"request.show_mismatched_params": False})
self.getPage("/params/?notathing=meeting")
self.assertInBody("Not Found")
self.getPage("/params/?thing=meeting¬athing=meeting")
self.assertInBody("Not Found")
# Test "% HEX HEX"-encoded URL, param keys, and values
self.getPage("/params/%d4%20%e3/cheese?Gruy%E8re=Bulgn%e9ville")
self.assertBody("args: %s kwargs: %s" %
(('\xd4 \xe3', 'cheese'),
[('Gruy\xe8re', ntou('Bulgn\xe9ville'))]))
# Make sure that encoded = and & get parsed correctly
self.getPage(
"/params/code?url=http%3A//cherrypy.org/index%3Fa%3D1%26b%3D2")
self.assertBody("args: %s kwargs: %s" %
(('code',),
[('url', ntou('http://cherrypy.org/index?a=1&b=2'))]))
# Test coordinates sent by <img ismap>
self.getPage("/params/ismap?223,114")
self.assertBody("Coordinates: 223, 114")
# Test "name[key]" dict-like params
self.getPage("/params/dictlike?a[1]=1&a[2]=2&b=foo&b[bar]=baz")
self.assertBody("args: %s kwargs: %s" %
(('dictlike',),
[('a[1]', ntou('1')), ('a[2]', ntou('2')),
('b', ntou('foo')), ('b[bar]', ntou('baz'))]))
def testParamErrors(self):
# test that all of the handlers work when given
# the correct parameters in order to ensure that the
# errors below aren't coming from some other source.
for uri in (
'/paramerrors/one_positional?param1=foo',
'/paramerrors/one_positional_args?param1=foo',
'/paramerrors/one_positional_args/foo',
'/paramerrors/one_positional_args/foo/bar/baz',
'/paramerrors/one_positional_args_kwargs?'
'param1=foo¶m2=bar',
'/paramerrors/one_positional_args_kwargs/foo?'
'param2=bar¶m3=baz',
'/paramerrors/one_positional_args_kwargs/foo/bar/baz?'
'param2=bar¶m3=baz',
'/paramerrors/one_positional_kwargs?'
'param1=foo¶m2=bar¶m3=baz',
'/paramerrors/one_positional_kwargs/foo?'
'param4=foo¶m2=bar¶m3=baz',
'/paramerrors/no_positional',
'/paramerrors/no_positional_args/foo',
'/paramerrors/no_positional_args/foo/bar/baz',
'/paramerrors/no_positional_args_kwargs?param1=foo¶m2=bar',
'/paramerrors/no_positional_args_kwargs/foo?param2=bar',
'/paramerrors/no_positional_args_kwargs/foo/bar/baz?'
'param2=bar¶m3=baz',
'/paramerrors/no_positional_kwargs?param1=foo¶m2=bar',
'/paramerrors/callable_object',
):
self.getPage(uri)
self.assertStatus(200)
error_msgs = [
'Missing parameters',
'Nothing matches the given URI',
'Multiple values for parameters',
'Unexpected query string parameters',
'Unexpected body parameters',
'Invalid path in Request-URI',
'Illegal #fragment in Request-URI',
]
# uri should be tested for valid absolute path, the status must be 400.
for uri, error_idx in (
('invalid/path/without/leading/slash', 5),
('/valid/path#invalid=fragment', 6),
):
self.getPage(uri)
self.assertStatus(400)
self.assertInBody(error_msgs[error_idx])
# query string parameters are part of the URI, so if they are wrong
# for a particular handler, the status MUST be a 404.
for uri, msg in (
('/paramerrors/one_positional', error_msgs[0]),
('/paramerrors/one_positional?foo=foo', error_msgs[0]),
('/paramerrors/one_positional/foo/bar/baz', error_msgs[1]),
('/paramerrors/one_positional/foo?param1=foo', error_msgs[2]),
('/paramerrors/one_positional/foo?param1=foo¶m2=foo',
error_msgs[2]),
('/paramerrors/one_positional_args/foo?param1=foo¶m2=foo',
error_msgs[2]),
('/paramerrors/one_positional_args/foo/bar/baz?param2=foo',
error_msgs[3]),
('/paramerrors/one_positional_args_kwargs/foo/bar/baz?'
'param1=bar¶m3=baz',
error_msgs[2]),
('/paramerrors/one_positional_kwargs/foo?'
'param1=foo¶m2=bar¶m3=baz',
error_msgs[2]),
('/paramerrors/no_positional/boo', error_msgs[1]),
('/paramerrors/no_positional?param1=foo', error_msgs[3]),
('/paramerrors/no_positional_args/boo?param1=foo', error_msgs[3]),
('/paramerrors/no_positional_kwargs/boo?param1=foo',
error_msgs[1]),
('/paramerrors/callable_object?param1=foo', error_msgs[3]),
('/paramerrors/callable_object/boo', error_msgs[1]),
):
for show_mismatched_params in (True, False):
cherrypy.config.update(
{'request.show_mismatched_params': show_mismatched_params})
self.getPage(uri)
self.assertStatus(404)
if show_mismatched_params:
self.assertInBody(msg)
else:
self.assertInBody("Not Found")
# if body parameters are wrong, a 400 must be returned.
for uri, body, msg in (
('/paramerrors/one_positional/foo',
'param1=foo', error_msgs[2]),
('/paramerrors/one_positional/foo',
'param1=foo¶m2=foo', error_msgs[2]),
('/paramerrors/one_positional_args/foo',
'param1=foo¶m2=foo', error_msgs[2]),
('/paramerrors/one_positional_args/foo/bar/baz',
'param2=foo', error_msgs[4]),
('/paramerrors/one_positional_args_kwargs/foo/bar/baz',
'param1=bar¶m3=baz', error_msgs[2]),
('/paramerrors/one_positional_kwargs/foo',
'param1=foo¶m2=bar¶m3=baz', error_msgs[2]),
('/paramerrors/no_positional', 'param1=foo', error_msgs[4]),
('/paramerrors/no_positional_args/boo',
'param1=foo', error_msgs[4]),
('/paramerrors/callable_object', 'param1=foo', error_msgs[4]),
):
for show_mismatched_params in (True, False):
cherrypy.config.update(
{'request.show_mismatched_params': show_mismatched_params})
self.getPage(uri, method='POST', body=body)
self.assertStatus(400)
if show_mismatched_params:
self.assertInBody(msg)
else:
self.assertInBody("400 Bad")
# even if body parameters are wrong, if we get the uri wrong, then
# it's a 404
for uri, body, msg in (
('/paramerrors/one_positional?param2=foo',
'param1=foo', error_msgs[3]),
('/paramerrors/one_positional/foo/bar',
'param2=foo', error_msgs[1]),
('/paramerrors/one_positional_args/foo/bar?param2=foo',
'param3=foo', error_msgs[3]),
('/paramerrors/one_positional_kwargs/foo/bar',
'param2=bar¶m3=baz', error_msgs[1]),
('/paramerrors/no_positional?param1=foo',
'param2=foo', error_msgs[3]),
('/paramerrors/no_positional_args/boo?param2=foo',
'param1=foo', error_msgs[3]),
('/paramerrors/callable_object?param2=bar',
'param1=foo', error_msgs[3]),
):
for show_mismatched_params in (True, False):
cherrypy.config.update(
{'request.show_mismatched_params': show_mismatched_params})
self.getPage(uri, method='POST', body=body)
self.assertStatus(404)
if show_mismatched_params:
self.assertInBody(msg)
else:
self.assertInBody("Not Found")
# In the case that a handler raises a TypeError we should
# let that type error through.
for uri in (
'/paramerrors/raise_type_error',
'/paramerrors/raise_type_error_with_default_param?x=0',
'/paramerrors/raise_type_error_with_default_param?x=0&y=0',
):
self.getPage(uri, method='GET')
self.assertStatus(500)
self.assertTrue('Client Error', self.body)
def testErrorHandling(self):
self.getPage("/error/missing")
self.assertStatus(404)
self.assertErrorPage(404, "The path '/error/missing' was not found.")
ignore = helper.webtest.ignored_exceptions
ignore.append(ValueError)
try:
valerr = '\n raise ValueError()\nValueError'
self.getPage("/error/page_method")
self.assertErrorPage(500, pattern=valerr)
self.getPage("/error/page_yield")
self.assertErrorPage(500, pattern=valerr)
if (cherrypy.server.protocol_version == "HTTP/1.0" or
getattr(cherrypy.server, "using_apache", False)):
self.getPage("/error/page_streamed")
# Because this error is raised after the response body has
# started, the status should not change to an error status.
self.assertStatus(200)
self.assertBody("word up")
else:
# Under HTTP/1.1, the chunked transfer-coding is used.
# The HTTP client will choke when the output is incomplete.
self.assertRaises((ValueError, IncompleteRead), self.getPage,
"/error/page_streamed")
# No traceback should be present
self.getPage("/error/cause_err_in_finalize")
msg = "Illegal response status from server ('ZOO' is non-numeric)."
self.assertErrorPage(500, msg, None)
finally:
ignore.pop()
# Test HTTPError with a reason-phrase in the status arg.
self.getPage('/error/reason_phrase')
self.assertStatus("410 Gone fishin'")
# Test custom error page for a specific error.
self.getPage("/error/custom")
self.assertStatus(404)
self.assertBody("Hello, world\r\n" + (" " * 499))
# Test custom error page for a specific error.
self.getPage("/error/custom?err=401")
self.assertStatus(401)
self.assertBody(
"Error 401 Unauthorized - "
"Well, I'm very sorry but you haven't paid!")
# Test default custom error page.
self.getPage("/error/custom_default")
self.assertStatus(500)
self.assertBody(
"Error 500 Internal Server Error - "
"Well, I'm very sorry but you haven't paid!".ljust(513))
# Test error in custom error page (ticket #305).
# Note that the message is escaped for HTML (ticket #310).
self.getPage("/error/noexist")
self.assertStatus(404)
if sys.version_info >= (3, 3):
exc_name = "FileNotFoundError"
else:
exc_name = "IOError"
msg = ("No, <b>really</b>, not found!<br />"
"In addition, the custom error page failed:\n<br />"
"%s: [Errno 2] "
"No such file or directory: 'nonexistent.html'") % (exc_name,)
self.assertInBody(msg)
if getattr(cherrypy.server, "using_apache", False):
pass
else:
# Test throw_errors (ticket #186).
self.getPage("/error/rethrow")
self.assertInBody("raise ValueError()")
def testExpect(self):
e = ('Expect', '100-continue')
self.getPage("/headerelements/get_elements?headername=Expect", [e])
self.assertBody('100-continue')
self.getPage("/expect/expectation_failed", [e])
self.assertStatus(417)
def testHeaderElements(self):
# Accept-* header elements should be sorted, with most preferred first.
h = [('Accept', 'audio/*; q=0.2, audio/basic')]
self.getPage("/headerelements/get_elements?headername=Accept", h)
self.assertStatus(200)
self.assertBody("audio/basic\n"
"audio/*;q=0.2")
h = [
('Accept',
'text/plain; q=0.5, text/html, text/x-dvi; q=0.8, text/x-c')
]
self.getPage("/headerelements/get_elements?headername=Accept", h)
self.assertStatus(200)
self.assertBody("text/x-c\n"
"text/html\n"
"text/x-dvi;q=0.8\n"
"text/plain;q=0.5")
# Test that more specific media ranges get priority.
h = [('Accept', 'text/*, text/html, text/html;level=1, */*')]
self.getPage("/headerelements/get_elements?headername=Accept", h)
self.assertStatus(200)
self.assertBody("text/html;level=1\n"
"text/html\n"
"text/*\n"
"*/*")
# Test Accept-Charset
h = [('Accept-Charset', 'iso-8859-5, unicode-1-1;q=0.8')]
self.getPage(
"/headerelements/get_elements?headername=Accept-Charset", h)
self.assertStatus("200 OK")
self.assertBody("iso-8859-5\n"
"unicode-1-1;q=0.8")
# Test Accept-Encoding
h = [('Accept-Encoding', 'gzip;q=1.0, identity; q=0.5, *;q=0')]
self.getPage(
"/headerelements/get_elements?headername=Accept-Encoding", h)
self.assertStatus("200 OK")
self.assertBody("gzip;q=1.0\n"
"identity;q=0.5\n"
"*;q=0")
# Test Accept-Language
h = [('Accept-Language', 'da, en-gb;q=0.8, en;q=0.7')]
self.getPage(
"/headerelements/get_elements?headername=Accept-Language", h)
self.assertStatus("200 OK")
self.assertBody("da\n"
"en-gb;q=0.8\n"
"en;q=0.7")
# Test malformed header parsing. See
# https://bitbucket.org/cherrypy/cherrypy/issue/763.
self.getPage("/headerelements/get_elements?headername=Content-Type",
# Note the illegal trailing ";"
headers=[('Content-Type', 'text/html; charset=utf-8;')])
self.assertStatus(200)
self.assertBody("text/html;charset=utf-8")
def test_repeated_headers(self):
# Test that two request headers are collapsed into one.
# See https://bitbucket.org/cherrypy/cherrypy/issue/542.
self.getPage("/headers/Accept-Charset",
headers=[("Accept-Charset", "iso-8859-5"),
("Accept-Charset", "unicode-1-1;q=0.8")])
self.assertBody("iso-8859-5, unicode-1-1;q=0.8")
# Tests that each header only appears once, regardless of case.
self.getPage("/headers/doubledheaders")
self.assertBody("double header test")
hnames = [name.title() for name, val in self.headers]
for key in ['Content-Length', 'Content-Type', 'Date',
'Expires', 'Location', 'Server']:
self.assertEqual(hnames.count(key), 1, self.headers)
def test_encoded_headers(self):
# First, make sure the innards work like expected.
self.assertEqual(
httputil.decode_TEXT(ntou("=?utf-8?q?f=C3=BCr?=")), ntou("f\xfcr"))
if cherrypy.server.protocol_version == "HTTP/1.1":
# Test RFC-2047-encoded request and response header values
u = ntou('\u212bngstr\xf6m', 'escape')
c = ntou("=E2=84=ABngstr=C3=B6m")
self.getPage("/headers/ifmatch",
[('If-Match', ntou('=?utf-8?q?%s?=') % c)])
# The body should be utf-8 encoded.
self.assertBody(ntob("\xe2\x84\xabngstr\xc3\xb6m"))
# But the Etag header should be RFC-2047 encoded (binary)
self.assertHeader("ETag", ntou('=?utf-8?b?4oSrbmdzdHLDtm0=?='))
# Test a *LONG* RFC-2047-encoded request and response header value
self.getPage("/headers/ifmatch",
[('If-Match', ntou('=?utf-8?q?%s?=') % (c * 10))])
self.assertBody(ntob("\xe2\x84\xabngstr\xc3\xb6m") * 10)
# Note: this is different output for Python3, but it decodes fine.
etag = self.assertHeader(
"ETag",
'=?utf-8?b?4oSrbmdzdHLDtm3ihKtuZ3N0csO2beKEq25nc3Ryw7Zt'
'4oSrbmdzdHLDtm3ihKtuZ3N0csO2beKEq25nc3Ryw7Zt'
'4oSrbmdzdHLDtm3ihKtuZ3N0csO2beKEq25nc3Ryw7Zt'
'4oSrbmdzdHLDtm0=?=')
self.assertEqual(httputil.decode_TEXT(etag), u * 10)
def test_header_presence(self):
# If we don't pass a Content-Type header, it should not be present
# in cherrypy.request.headers
self.getPage("/headers/Content-Type",
headers=[])
self.assertStatus(500)
# If Content-Type is present in the request, it should be present in
# cherrypy.request.headers
self.getPage("/headers/Content-Type",
headers=[("Content-type", "application/json")])
self.assertBody("application/json")
def test_basic_HTTPMethods(self):
helper.webtest.methods_with_bodies = ("POST", "PUT", "PROPFIND")
# Test that all defined HTTP methods work.
for m in defined_http_methods:
self.getPage("/method/", method=m)
# HEAD requests should not return any body.
if m == "HEAD":
self.assertBody("")
elif m == "TRACE":
# Some HTTP servers (like modpy) have their own TRACE support
self.assertEqual(self.body[:5], ntob("TRACE"))
else:
self.assertBody(m)
# Request a PUT method with a form-urlencoded body
self.getPage("/method/parameterized", method="PUT",
body="data=on+top+of+other+things")
self.assertBody("on top of other things")
# Request a PUT method with a file body
b = "one thing on top of another"
h = [("Content-Type", "text/plain"),
("Content-Length", str(len(b)))]
self.getPage("/method/request_body", headers=h, method="PUT", body=b)
self.assertStatus(200)
self.assertBody(b)
# Request a PUT method with a file body but no Content-Type.
# See https://bitbucket.org/cherrypy/cherrypy/issue/790.
b = ntob("one thing on top of another")
self.persistent = True
try:
conn = self.HTTP_CONN
conn.putrequest("PUT", "/method/request_body", skip_host=True)
conn.putheader("Host", self.HOST)
conn.putheader('Content-Length', str(len(b)))
conn.endheaders()
conn.send(b)
response = conn.response_class(conn.sock, method="PUT")
response.begin()
self.assertEqual(response.status, 200)
self.body = response.read()
self.assertBody(b)
finally:
self.persistent = False
# Request a PUT method with no body whatsoever (not an empty one).
# See https://bitbucket.org/cherrypy/cherrypy/issue/650.
# Provide a C-T or webtest will provide one (and a C-L) for us.
h = [("Content-Type", "text/plain")]
self.getPage("/method/reachable", headers=h, method="PUT")
self.assertStatus(411)
# Request a custom method with a request body
b = ('<?xml version="1.0" encoding="utf-8" ?>\n\n'
'<propfind xmlns="DAV:"><prop><getlastmodified/>'
'</prop></propfind>')
h = [('Content-Type', 'text/xml'),
('Content-Length', str(len(b)))]
self.getPage("/method/request_body", headers=h,
method="PROPFIND", body=b)
self.assertStatus(200)
self.assertBody(b)
# Request a disallowed method
self.getPage("/method/", method="LINK")
self.assertStatus(405)
# Request an unknown method
self.getPage("/method/", method="SEARCH")
self.assertStatus(501)
# For method dispatchers: make sure that an HTTP method doesn't
# collide with a virtual path atom. If you build HTTP-method
# dispatching into the core, rewrite these handlers to use
# your dispatch idioms.
self.getPage("/divorce/get?ID=13")
self.assertBody('Divorce document 13: empty')
self.assertStatus(200)
self.getPage("/divorce/", method="GET")
self.assertBody('<h1>Choose your document</h1>\n<ul>\n</ul>')
self.assertStatus(200)
def test_CONNECT_method(self):
if getattr(cherrypy.server, "using_apache", False):
return self.skip("skipped due to known Apache differences... ")
self.getPage("/method/", method="CONNECT")
self.assertBody("CONNECT")
def testEmptyThreadlocals(self):
results = []
for x in range(20):
self.getPage("/threadlocal/")
results.append(self.body)
self.assertEqual(results, [ntob("None")] * 20)
|
kswiat/django | refs/heads/master | django/conf/locale/eu/formats.py | 611 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = r'Yeko M\re\n d\a'
TIME_FORMAT = 'H:i'
# DATETIME_FORMAT =
# YEAR_MONTH_FORMAT =
# MONTH_DAY_FORMAT =
SHORT_DATE_FORMAT = 'Y M j'
# SHORT_DATETIME_FORMAT =
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
# NUMBER_GROUPING =
|
Foxfanmedium/python_training | refs/heads/master | Book_courses/Berry_P/chapter_2/vowels7.py | 1 | # vowels = ['a', 'o', 'i', 'e', 'u']
# word = input("Provide a word to search for vowels:")
# found = []
# for letter in word:
# if letter in vowels:
# if letter not in found:
# found.append(letter)
# for vowel in found:
# print(vowel)
#==================================================================
# vowels = ['a', 'o', 'i', 'e', 'u']
# word = input("Provide a word to search for vowels:")
# found = set(word)
# intersec = found.intersection(vowels)
# print(intersec)
#==================================================================
# Еще короче
vowels = set('aoieu')
word = input("Provide a word to search for vowels:")
found = vowels.intersection(set(word))
print(found)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.