repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values | var_hash
int64 -9,223,186,179,200,150,000
9,223,291,175B
| doc_hash
int64 -9,223,304,365,658,930,000
9,223,309,051B
| line_mean
float64 3.5
99.8
| line_max
int64 13
999
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
victor-rene/kivy-gamelib
|
stickman/bone.py
|
1
|
2072
|
import math
from kivy.graphics.context_instructions import PopMatrix, PushMatrix, Rotate
from kivy.properties import NumericProperty
from kivy.uix.image import Image
class Bone(Image):
angle = NumericProperty()
def __init__(self, **kw):
super(Bone, self).__init__(**kw)
self.name = kw['name'] if 'name' in kw else None
self.allow_stretch = True
self.keep_ratio = False
self.source = 'img/bone.png'
self.next = []
self.prev = None
self.head = None
self.tip = None
self.bone_length = 0
self.radius = None
with self.canvas.before:
PushMatrix()
self.rotation = Rotate()
with self.canvas.after:
PopMatrix()
self.bind(pos=self.update, size=self.update, angle=self.rotate)
def attach(self, bone):
bone.prev = self
self.next.append(bone)
def attach_all(self, bones):
for bone in bones:
self.attach(bone)
def rotate(self, *args):
if self.prev:
self.rotation.angle = self.prev.rotation.angle + self.angle
else: self.rotation.angle = self.angle
self.tip = self.get_tip_pos()
for bone in self.next:
self.coerce(bone)
def update(self, *args):
self.radius = self.width / 2
self.bone_length = self.height - self.radius * 2 # approximate for head / tip radii
self.head = self.x + self.radius, self.top - self.radius
self.tip = self.get_tip_pos()
# print 'head', self.head, self.prev, self.pos
self.rotation.origin = self.head
for bone in self.next:
self.coerce(bone)
def get_tip_pos(self):
a = (self.rotation.angle - 90) * math.pi / 180
dx = math.cos(a) * self.bone_length
dy = math.sin(a) * self.bone_length
return self.x + self.radius + dx, self.top - self.radius + dy
def set_head_pos(self, pos):
radius = self.width / 2
head_x, head_y = pos
self.pos = head_x - radius, head_y - radius - self.bone_length
def coerce(self, bone):
# print 'tip', self.get_tip_pos(), self.prev, self.pos
bone.set_head_pos(self.tip)
bone.rotate()
|
mit
| 6,047,223,673,012,230,000 | -7,913,459,226,661,366,000 | 28.197183 | 87 | 0.629826 | false |
mykytamorachov/outpost
|
flask/lib/python2.7/site-packages/werkzeug/testsuite/formparser.py
|
97
|
18740
|
# -*- coding: utf-8 -*-
"""
werkzeug.testsuite.formparser
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests the form parsing facilities.
:copyright: (c) 2013 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
import unittest
from os.path import join, dirname
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug import formparser
from werkzeug.test import create_environ, Client
from werkzeug.wrappers import Request, Response
from werkzeug.exceptions import RequestEntityTooLarge
from werkzeug.datastructures import MultiDict
from werkzeug.formparser import parse_form_data
from werkzeug._compat import BytesIO
@Request.application
def form_data_consumer(request):
result_object = request.args['object']
if result_object == 'text':
return Response(repr(request.form['text']))
f = request.files[result_object]
return Response(b'\n'.join((
repr(f.filename).encode('ascii'),
repr(f.name).encode('ascii'),
repr(f.content_type).encode('ascii'),
f.stream.read()
)))
def get_contents(filename):
with open(filename, 'rb') as f:
return f.read()
class FormParserTestCase(WerkzeugTestCase):
def test_limiting(self):
data = b'foo=Hello+World&bar=baz'
req = Request.from_values(input_stream=BytesIO(data),
content_length=len(data),
content_type='application/x-www-form-urlencoded',
method='POST')
req.max_content_length = 400
self.assert_strict_equal(req.form['foo'], u'Hello World')
req = Request.from_values(input_stream=BytesIO(data),
content_length=len(data),
content_type='application/x-www-form-urlencoded',
method='POST')
req.max_form_memory_size = 7
self.assert_raises(RequestEntityTooLarge, lambda: req.form['foo'])
req = Request.from_values(input_stream=BytesIO(data),
content_length=len(data),
content_type='application/x-www-form-urlencoded',
method='POST')
req.max_form_memory_size = 400
self.assert_strict_equal(req.form['foo'], u'Hello World')
data = (b'--foo\r\nContent-Disposition: form-field; name=foo\r\n\r\n'
b'Hello World\r\n'
b'--foo\r\nContent-Disposition: form-field; name=bar\r\n\r\n'
b'bar=baz\r\n--foo--')
req = Request.from_values(input_stream=BytesIO(data),
content_length=len(data),
content_type='multipart/form-data; boundary=foo',
method='POST')
req.max_content_length = 4
self.assert_raises(RequestEntityTooLarge, lambda: req.form['foo'])
req = Request.from_values(input_stream=BytesIO(data),
content_length=len(data),
content_type='multipart/form-data; boundary=foo',
method='POST')
req.max_content_length = 400
self.assert_strict_equal(req.form['foo'], u'Hello World')
req = Request.from_values(input_stream=BytesIO(data),
content_length=len(data),
content_type='multipart/form-data; boundary=foo',
method='POST')
req.max_form_memory_size = 7
self.assert_raises(RequestEntityTooLarge, lambda: req.form['foo'])
req = Request.from_values(input_stream=BytesIO(data),
content_length=len(data),
content_type='multipart/form-data; boundary=foo',
method='POST')
req.max_form_memory_size = 400
self.assert_strict_equal(req.form['foo'], u'Hello World')
def test_parse_form_data_put_without_content(self):
# A PUT without a Content-Type header returns empty data
# Both rfc1945 and rfc2616 (1.0 and 1.1) say "Any HTTP/[1.0/1.1] message
# containing an entity-body SHOULD include a Content-Type header field
# defining the media type of that body." In the case where either
# headers are omitted, parse_form_data should still work.
env = create_environ('/foo', 'http://example.org/', method='PUT')
del env['CONTENT_TYPE']
del env['CONTENT_LENGTH']
stream, form, files = formparser.parse_form_data(env)
self.assert_strict_equal(stream.read(), b'')
self.assert_strict_equal(len(form), 0)
self.assert_strict_equal(len(files), 0)
def test_parse_form_data_get_without_content(self):
env = create_environ('/foo', 'http://example.org/', method='GET')
del env['CONTENT_TYPE']
del env['CONTENT_LENGTH']
stream, form, files = formparser.parse_form_data(env)
self.assert_strict_equal(stream.read(), b'')
self.assert_strict_equal(len(form), 0)
self.assert_strict_equal(len(files), 0)
def test_large_file(self):
data = b'x' * (1024 * 600)
req = Request.from_values(data={'foo': (BytesIO(data), 'test.txt')},
method='POST')
# make sure we have a real file here, because we expect to be
# on the disk. > 1024 * 500
self.assert_true(hasattr(req.files['foo'].stream, u'fileno'))
# close file to prevent fds from leaking
req.files['foo'].close()
def test_streaming_parse(self):
data = b'x' * (1024 * 600)
class StreamMPP(formparser.MultiPartParser):
def parse(self, file, boundary, content_length):
i = iter(self.parse_lines(file, boundary, content_length))
one = next(i)
two = next(i)
return self.cls(()), {'one': one, 'two': two}
class StreamFDP(formparser.FormDataParser):
def _sf_parse_multipart(self, stream, mimetype,
content_length, options):
form, files = StreamMPP(
self.stream_factory, self.charset, self.errors,
max_form_memory_size=self.max_form_memory_size,
cls=self.cls).parse(stream, options.get('boundary').encode('ascii'),
content_length)
return stream, form, files
parse_functions = {}
parse_functions.update(formparser.FormDataParser.parse_functions)
parse_functions['multipart/form-data'] = _sf_parse_multipart
class StreamReq(Request):
form_data_parser_class = StreamFDP
req = StreamReq.from_values(data={'foo': (BytesIO(data), 'test.txt')},
method='POST')
self.assert_strict_equal('begin_file', req.files['one'][0])
self.assert_strict_equal(('foo', 'test.txt'), req.files['one'][1][1:])
self.assert_strict_equal('cont', req.files['two'][0])
self.assert_strict_equal(data, req.files['two'][1])
class MultiPartTestCase(WerkzeugTestCase):
def test_basic(self):
resources = join(dirname(__file__), 'multipart')
client = Client(form_data_consumer, Response)
repository = [
('firefox3-2png1txt', '---------------------------186454651713519341951581030105', [
(u'anchor.png', 'file1', 'image/png', 'file1.png'),
(u'application_edit.png', 'file2', 'image/png', 'file2.png')
], u'example text'),
('firefox3-2pnglongtext', '---------------------------14904044739787191031754711748', [
(u'accept.png', 'file1', 'image/png', 'file1.png'),
(u'add.png', 'file2', 'image/png', 'file2.png')
], u'--long text\r\n--with boundary\r\n--lookalikes--'),
('opera8-2png1txt', '----------zEO9jQKmLc2Cq88c23Dx19', [
(u'arrow_branch.png', 'file1', 'image/png', 'file1.png'),
(u'award_star_bronze_1.png', 'file2', 'image/png', 'file2.png')
], u'blafasel öäü'),
('webkit3-2png1txt', '----WebKitFormBoundaryjdSFhcARk8fyGNy6', [
(u'gtk-apply.png', 'file1', 'image/png', 'file1.png'),
(u'gtk-no.png', 'file2', 'image/png', 'file2.png')
], u'this is another text with ümläüts'),
('ie6-2png1txt', '---------------------------7d91b03a20128', [
(u'file1.png', 'file1', 'image/x-png', 'file1.png'),
(u'file2.png', 'file2', 'image/x-png', 'file2.png')
], u'ie6 sucks :-/')
]
for name, boundary, files, text in repository:
folder = join(resources, name)
data = get_contents(join(folder, 'request.txt'))
for filename, field, content_type, fsname in files:
response = client.post('/?object=' + field, data=data, content_type=
'multipart/form-data; boundary="%s"' % boundary,
content_length=len(data))
lines = response.get_data().split(b'\n', 3)
self.assert_strict_equal(lines[0], repr(filename).encode('ascii'))
self.assert_strict_equal(lines[1], repr(field).encode('ascii'))
self.assert_strict_equal(lines[2], repr(content_type).encode('ascii'))
self.assert_strict_equal(lines[3], get_contents(join(folder, fsname)))
response = client.post('/?object=text', data=data, content_type=
'multipart/form-data; boundary="%s"' % boundary,
content_length=len(data))
self.assert_strict_equal(response.get_data(), repr(text).encode('utf-8'))
def test_ie7_unc_path(self):
client = Client(form_data_consumer, Response)
data_file = join(dirname(__file__), 'multipart', 'ie7_full_path_request.txt')
data = get_contents(data_file)
boundary = '---------------------------7da36d1b4a0164'
response = client.post('/?object=cb_file_upload_multiple', data=data, content_type=
'multipart/form-data; boundary="%s"' % boundary, content_length=len(data))
lines = response.get_data().split(b'\n', 3)
self.assert_strict_equal(lines[0],
repr(u'Sellersburg Town Council Meeting 02-22-2010doc.doc').encode('ascii'))
def test_end_of_file(self):
# This test looks innocent but it was actually timeing out in
# the Werkzeug 0.5 release version (#394)
data = (
b'--foo\r\n'
b'Content-Disposition: form-data; name="test"; filename="test.txt"\r\n'
b'Content-Type: text/plain\r\n\r\n'
b'file contents and no end'
)
data = Request.from_values(input_stream=BytesIO(data),
content_length=len(data),
content_type='multipart/form-data; boundary=foo',
method='POST')
self.assert_true(not data.files)
self.assert_true(not data.form)
def test_broken(self):
data = (
'--foo\r\n'
'Content-Disposition: form-data; name="test"; filename="test.txt"\r\n'
'Content-Transfer-Encoding: base64\r\n'
'Content-Type: text/plain\r\n\r\n'
'broken base 64'
'--foo--'
)
_, form, files = formparser.parse_form_data(create_environ(data=data,
method='POST', content_type='multipart/form-data; boundary=foo'))
self.assert_true(not files)
self.assert_true(not form)
self.assert_raises(ValueError, formparser.parse_form_data,
create_environ(data=data, method='POST',
content_type='multipart/form-data; boundary=foo'),
silent=False)
def test_file_no_content_type(self):
data = (
b'--foo\r\n'
b'Content-Disposition: form-data; name="test"; filename="test.txt"\r\n\r\n'
b'file contents\r\n--foo--'
)
data = Request.from_values(input_stream=BytesIO(data),
content_length=len(data),
content_type='multipart/form-data; boundary=foo',
method='POST')
self.assert_equal(data.files['test'].filename, 'test.txt')
self.assert_strict_equal(data.files['test'].read(), b'file contents')
def test_extra_newline(self):
# this test looks innocent but it was actually timeing out in
# the Werkzeug 0.5 release version (#394)
data = (
b'\r\n\r\n--foo\r\n'
b'Content-Disposition: form-data; name="foo"\r\n\r\n'
b'a string\r\n'
b'--foo--'
)
data = Request.from_values(input_stream=BytesIO(data),
content_length=len(data),
content_type='multipart/form-data; boundary=foo',
method='POST')
self.assert_true(not data.files)
self.assert_strict_equal(data.form['foo'], u'a string')
def test_headers(self):
data = (b'--foo\r\n'
b'Content-Disposition: form-data; name="foo"; filename="foo.txt"\r\n'
b'X-Custom-Header: blah\r\n'
b'Content-Type: text/plain; charset=utf-8\r\n\r\n'
b'file contents, just the contents\r\n'
b'--foo--')
req = Request.from_values(input_stream=BytesIO(data),
content_length=len(data),
content_type='multipart/form-data; boundary=foo',
method='POST')
foo = req.files['foo']
self.assert_strict_equal(foo.mimetype, 'text/plain')
self.assert_strict_equal(foo.mimetype_params, {'charset': 'utf-8'})
self.assert_strict_equal(foo.headers['content-type'], foo.content_type)
self.assert_strict_equal(foo.content_type, 'text/plain; charset=utf-8')
self.assert_strict_equal(foo.headers['x-custom-header'], 'blah')
def test_nonstandard_line_endings(self):
for nl in b'\n', b'\r', b'\r\n':
data = nl.join((
b'--foo',
b'Content-Disposition: form-data; name=foo',
b'',
b'this is just bar',
b'--foo',
b'Content-Disposition: form-data; name=bar',
b'',
b'blafasel',
b'--foo--'
))
req = Request.from_values(input_stream=BytesIO(data),
content_length=len(data),
content_type='multipart/form-data; '
'boundary=foo', method='POST')
self.assert_strict_equal(req.form['foo'], u'this is just bar')
self.assert_strict_equal(req.form['bar'], u'blafasel')
def test_failures(self):
def parse_multipart(stream, boundary, content_length):
parser = formparser.MultiPartParser(content_length)
return parser.parse(stream, boundary, content_length)
self.assert_raises(ValueError, parse_multipart, BytesIO(), b'broken ', 0)
data = b'--foo\r\n\r\nHello World\r\n--foo--'
self.assert_raises(ValueError, parse_multipart, BytesIO(data), b'foo', len(data))
data = b'--foo\r\nContent-Disposition: form-field; name=foo\r\n' \
b'Content-Transfer-Encoding: base64\r\n\r\nHello World\r\n--foo--'
self.assert_raises(ValueError, parse_multipart, BytesIO(data), b'foo', len(data))
data = b'--foo\r\nContent-Disposition: form-field; name=foo\r\n\r\nHello World\r\n'
self.assert_raises(ValueError, parse_multipart, BytesIO(data), b'foo', len(data))
x = formparser.parse_multipart_headers(['foo: bar\r\n', ' x test\r\n'])
self.assert_strict_equal(x['foo'], 'bar\n x test')
self.assert_raises(ValueError, formparser.parse_multipart_headers,
['foo: bar\r\n', ' x test'])
def test_bad_newline_bad_newline_assumption(self):
class ISORequest(Request):
charset = 'latin1'
contents = b'U2vlbmUgbORu'
data = b'--foo\r\nContent-Disposition: form-data; name="test"\r\n' \
b'Content-Transfer-Encoding: base64\r\n\r\n' + \
contents + b'\r\n--foo--'
req = ISORequest.from_values(input_stream=BytesIO(data),
content_length=len(data),
content_type='multipart/form-data; boundary=foo',
method='POST')
self.assert_strict_equal(req.form['test'], u'Sk\xe5ne l\xe4n')
def test_empty_multipart(self):
environ = {}
data = b'--boundary--'
environ['REQUEST_METHOD'] = 'POST'
environ['CONTENT_TYPE'] = 'multipart/form-data; boundary=boundary'
environ['CONTENT_LENGTH'] = str(len(data))
environ['wsgi.input'] = BytesIO(data)
stream, form, files = parse_form_data(environ, silent=False)
rv = stream.read()
self.assert_equal(rv, b'')
self.assert_equal(form, MultiDict())
self.assert_equal(files, MultiDict())
class InternalFunctionsTestCase(WerkzeugTestCase):
def test_line_parser(self):
assert formparser._line_parse('foo') == ('foo', False)
assert formparser._line_parse('foo\r\n') == ('foo', True)
assert formparser._line_parse('foo\r') == ('foo', True)
assert formparser._line_parse('foo\n') == ('foo', True)
def test_find_terminator(self):
lineiter = iter(b'\n\n\nfoo\nbar\nbaz'.splitlines(True))
find_terminator = formparser.MultiPartParser()._find_terminator
line = find_terminator(lineiter)
self.assert_equal(line, b'foo')
self.assert_equal(list(lineiter), [b'bar\n', b'baz'])
self.assert_equal(find_terminator([]), b'')
self.assert_equal(find_terminator([b'']), b'')
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(FormParserTestCase))
suite.addTest(unittest.makeSuite(MultiPartTestCase))
suite.addTest(unittest.makeSuite(InternalFunctionsTestCase))
return suite
|
gpl-2.0
| -3,099,239,915,268,090,400 | 8,680,288,824,132,549,000 | 45.835 | 109 | 0.550069 | false |
joshzarrabi/e-mission-server
|
emission/storage/decorations/useful_queries.py
|
2
|
3911
|
# Standard imports
from datetime import datetime, timedelta
import logging
# Our imports
from emission.core.get_database import get_section_db
import emission.core.wrapper.trip_old as rt
def get_all_sections(section_id):
""" Return all sections in the trip that the specified section is a part of
For example, if this is the section to go to the train station, return all
sections for the same trip.
The input is the _id field of the section
"""
section = rt.Section.section_from_json(get_section_db().find_one({'_id': section_id}))
allSections = get_section_db().find({"trip_id": section.trip_id})
return list(allSections)
def get_all_sections_for_user_day(user,year,month,day):
""" Return all sections in the trip that the specified section is a part of
For example, if this is the section to go to the train station, return all
sections for the same trip.
The input is the _id field of the section
"""
dayMidnight = datetime(year,month,day,0,0,0)
nextDayMidnight = dayMidnight + timedelta(days =1)
sectionIt = get_section_db().find({'user_id': user,
"section_start_datetime": {"$gt": dayMidnight},
"section_end_datetime": {"$lt": nextDayMidnight}})
return [rt.Section.section_from_json(s) for s in sectionIt]
def get_trip_before(section_id):
""" Return the trip just before the one that this section belongs to.
"""
section = rt.Section.section_from_json(get_section_db().find_one({'_id': section_id}))
logging.debug("Found section %s" % section)
firstSection = rt.Section.section_from_json(get_section_db().find_one({"trip_id": section.trip_id, "section_id": 0}))
logging.debug("First section %s" % firstSection)
# First, try to find the seection assuming that data collection was continuous
prevPlace = rt.Section.section_from_json(get_section_db().find_one({"section_end_datetime": firstSection.start_time}))
logging.debug("prevPlace %s" % prevPlace)
# This should be the "place" trip
if prevPlace is not None:
logging.debug("prevPlace.section_type = %s" % prevPlace.section_type)
if prevPlace.section_type != "place":
return None
else:
prevTrip = get_section_db().find_one({"section_end_datetime": prevPlace.start_time})
return prevTrip
else:
assert(False)
return allSections
def get_bounds(sectionList):
# Lat and lng are going to be in the range of -180 to 180.
# So let's pick large positive and negative numbers to initialize them
min_lat = 999999
min_lon = 999999
max_lat = -9999999
max_lon = -9999999
for sectionJSON in sectionList:
section = rt.Section.section_from_json(sectionJSON)
logging.debug("Testing start point %s " % section.section_start_location)
if section.section_start_location.lat < min_lat:
min_lat = section.section_start_location.lat
if section.section_start_location.lon < min_lon:
min_lon = section.section_start_location.lon
logging.debug("Testing end point %s " % section.section_end_location)
if section.section_end_location.lat > max_lat:
max_lat = section.section_end_location.lat
if section.section_end_location.lon > max_lon:
max_lon = section.section_end_location.lon
return (rt.Coordinate(min_lat, min_lon), rt.Coordinate(max_lat, max_lon))
def get_center_for_section(sectionJSON):
"""
Returns a tuple (lat, lon) that can be passsed in to pygmaps to create a map
centered at the correct location
"""
return ((sectionJSON["section_start_point"]["coordinates"][1] +
sectionJSON["section_end_point"]["coordinates"][1])/2,
(sectionJSON["section_start_point"]["coordinates"][0] +
sectionJSON["section_end_point"]["coordinates"][0])/2)
|
bsd-3-clause
| -235,231,425,643,743,200 | 4,522,297,048,256,654,300 | 45.559524 | 122 | 0.668371 | false |
pombredanne/MOG
|
nova/tests/virt/test_virt.py
|
5
|
5363
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Isaku Yamahata
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from nova import test
from nova import utils
from nova.virt.disk import api as disk_api
from nova.virt import driver
class TestVirtDriver(test.NoDBTestCase):
def test_block_device(self):
swap = {'device_name': '/dev/sdb',
'swap_size': 1}
ephemerals = [{'num': 0,
'virtual_name': 'ephemeral0',
'device_name': '/dev/sdc1',
'size': 1}]
block_device_mapping = [{'mount_device': '/dev/sde',
'device_path': 'fake_device'}]
block_device_info = {
'root_device_name': '/dev/sda',
'swap': swap,
'ephemerals': ephemerals,
'block_device_mapping': block_device_mapping}
empty_block_device_info = {}
self.assertEqual(
driver.block_device_info_get_root(block_device_info), '/dev/sda')
self.assertEqual(
driver.block_device_info_get_root(empty_block_device_info), None)
self.assertEqual(
driver.block_device_info_get_root(None), None)
self.assertEqual(
driver.block_device_info_get_swap(block_device_info), swap)
self.assertEqual(driver.block_device_info_get_swap(
empty_block_device_info)['device_name'], None)
self.assertEqual(driver.block_device_info_get_swap(
empty_block_device_info)['swap_size'], 0)
self.assertEqual(
driver.block_device_info_get_swap({'swap': None})['device_name'],
None)
self.assertEqual(
driver.block_device_info_get_swap({'swap': None})['swap_size'],
0)
self.assertEqual(
driver.block_device_info_get_swap(None)['device_name'], None)
self.assertEqual(
driver.block_device_info_get_swap(None)['swap_size'], 0)
self.assertEqual(
driver.block_device_info_get_ephemerals(block_device_info),
ephemerals)
self.assertEqual(
driver.block_device_info_get_ephemerals(empty_block_device_info),
[])
self.assertEqual(
driver.block_device_info_get_ephemerals(None),
[])
def test_swap_is_usable(self):
self.assertFalse(driver.swap_is_usable(None))
self.assertFalse(driver.swap_is_usable({'device_name': None}))
self.assertFalse(driver.swap_is_usable({'device_name': '/dev/sdb',
'swap_size': 0}))
self.assertTrue(driver.swap_is_usable({'device_name': '/dev/sdb',
'swap_size': 1}))
class TestVirtDisk(test.NoDBTestCase):
def setUp(self):
super(TestVirtDisk, self).setUp()
self.executes = []
def fake_execute(*cmd, **kwargs):
self.executes.append(cmd)
return None, None
self.stubs.Set(utils, 'execute', fake_execute)
def test_lxc_teardown_container(self):
def proc_mounts(self, mount_point):
mount_points = {
'/mnt/loop/nopart': '/dev/loop0',
'/mnt/loop/part': '/dev/mapper/loop0p1',
'/mnt/nbd/nopart': '/dev/nbd15',
'/mnt/nbd/part': '/dev/mapper/nbd15p1',
}
return mount_points[mount_point]
self.stubs.Set(os.path, 'exists', lambda _: True)
self.stubs.Set(disk_api._DiskImage, '_device_for_path', proc_mounts)
expected_commands = []
disk_api.teardown_container('/mnt/loop/nopart')
expected_commands += [
('umount', '/dev/loop0'),
('losetup', '--detach', '/dev/loop0'),
]
disk_api.teardown_container('/mnt/loop/part')
expected_commands += [
('umount', '/dev/mapper/loop0p1'),
('kpartx', '-d', '/dev/loop0'),
('losetup', '--detach', '/dev/loop0'),
]
disk_api.teardown_container('/mnt/nbd/nopart')
expected_commands += [
('umount', '/dev/nbd15'),
('qemu-nbd', '-d', '/dev/nbd15'),
]
disk_api.teardown_container('/mnt/nbd/part')
expected_commands += [
('umount', '/dev/mapper/nbd15p1'),
('kpartx', '-d', '/dev/nbd15'),
('qemu-nbd', '-d', '/dev/nbd15'),
]
self.assertEqual(self.executes, expected_commands)
|
apache-2.0
| 6,966,753,871,466,692,000 | -3,661,630,127,135,349,000 | 37.582734 | 78 | 0.532911 | false |
McHatters/HippieStation13
|
bot/C_rtd.py
|
67
|
3045
|
import random
def rtd(data,debug,sender):
backo = data
try:
arg1,arg2 = backo.split("d")
except ValueError, err:
return("Too many or too small amount of arguments")
else:
if debug:
print sender+":!rtd "+arg1+"d"+arg2 #faster than using %s's
die,die2 = [],[]
current_mark = ""
outcome = 0
realnumberfound = False
checks = []
count = 0
arg1 = arg1.replace(" ","")
arg2 = arg2.replace(" ","")
try:
i_arg1 = int(arg1)
a_arg1 = abs(i_arg1)
if "+" in arg2 or "-" in arg2:
plus_spot = arg2.find("+")
minus_spot = arg2.find("-")
if plus_spot == -1 and minus_spot == -1:
nicer_form = ""
elif plus_spot != -1 and minus_spot == -1:
nicer_form = arg2[plus_spot:]
elif plus_spot == -1 and minus_spot != -1:
nicer_form = arg2[minus_spot:]
else:
if plus_spot < minus_spot:
nicer_form = arg2[plus_spot:]
else:
nicer_form = arg2[minus_spot:]
for letter in arg2:
if letter == "+" or letter == "-":
current_mark = letter
checks = []
count += 1
continue
checks.append(letter)
try:
next_up = arg2[count+1]
except:
if realnumberfound == False:
i_arg2 = int("".join(checks))
checks = []
realnumberfound = True
elif current_mark == "+":
outcome += int("".join(checks))
else:
outcome -= int("".join(checks))
else:
if next_up == "+" or next_up == "-":
if realnumberfound == False:
i_arg2 = int("".join(checks))
checks = []
realnumberfound = True
else:
if current_mark == "+":
outcome += int("".join(checks))
else:
outcome -= int("".join(checks))
checks = []
count += 1
else:
i_arg2 = int(arg2)
if a_arg1 == 0 or abs(i_arg2) == 0:
raise RuntimeError
except ValueError:
return("You lied! That's not a number!")
except RuntimeError:
return("Too many zeroes!")
else:
if a_arg1 > 100:
return("Too many rolls, I can only do one hundred at max.")
else:
for i in xrange(0,a_arg1):
if i_arg2 < 0:
dice = random.randint(i_arg2,0)
else:
dice = random.randint(1,i_arg2)
die.append(dice)
die2.append(str(dice))
if i_arg2 < 0:
flist = "".join(die2)
else:
flist = "+".join(die2)
if len(flist) > 350:
return(str(reduce(lambda x,y: x+y, die)+outcome))
else:
if current_mark == "":
return(flist+" = "+str(reduce(lambda x,y: x+y, die)+outcome))
else:
return(flist+" ("+nicer_form+") = "+str(reduce(lambda x,y: x+y, die)+outcome))
|
agpl-3.0
| 5,479,295,050,246,494,000 | 824,029,418,471,296,600 | 30.71875 | 90 | 0.468309 | false |
kostajaitachi/shogun
|
tests/integration/python_modular/test_one.py
|
21
|
2671
|
#!/usr/bin/env python
"""
Test one data file
"""
from numpy import *
import sys
import kernel
import distance
import classifier
import clustering
import distribution
import regression
import preprocessor
from modshogun import Math_init_random
SUPPORTED=['kernel', 'distance', 'classifier', 'clustering', 'distribution',
'regression', 'preprocessor']
def _get_name_fun (fnam):
module=None
for supported in SUPPORTED:
if fnam.find(supported)>-1:
module=supported
break
if module is None:
print('Module required for %s not supported yet!' % fnam)
return None
return module+'.test'
def _test_mfile (fnam):
try:
mfile=open(fnam, mode='r')
except IOError as e:
print(e)
return False
indata={}
name_fun=_get_name_fun(fnam)
if name_fun is None:
return False
for line in mfile:
line=line.strip(" \t\n;")
param = line.split('=')[0].strip()
if param=='name':
name=line.split('=')[1].strip().split("'")[1]
indata[param]=name
elif param=='kernel_symdata' or param=='kernel_data':
indata[param]=_read_matrix(line)
elif param.startswith('kernel_matrix') or \
param.startswith('distance_matrix'):
indata[param]=_read_matrix(line)
elif param.find('data_train')>-1 or param.find('data_test')>-1:
# data_{train,test} might be prepended by 'subkernelX_'
indata[param]=_read_matrix(line)
elif param=='classifier_alphas' or param=='classifier_support_vectors':
try:
indata[param]=eval(line.split('=')[1])
except SyntaxError: # might be MultiClass SVM and hence matrix
indata[param]=_read_matrix(line)
elif param=='clustering_centers' or param=='clustering_pairs':
indata[param]=_read_matrix(line)
else:
if (line.find("'")==-1):
indata[param]=eval(line.split('=')[1])
else:
indata[param]=line.split('=')[1].strip().split("'")[1]
mfile.close()
fun=eval(name_fun)
# seed random to constant value used at data file's creation
Math_init_random(indata['init_random'])
random.seed(indata['init_random'])
return fun(indata)
def _read_matrix (line):
try:
str_line=(line.split('[')[1]).split(']')[0]
except IndexError:
str_line=(line.split('{')[1]).split('}')[0]
lines=str_line.split(';')
lis2d=list()
for x in lines:
lis=list()
for y in x.split(','):
y=y.replace("'","").strip()
if(y.isalpha()):
lis.append(y)
else:
if y.find('.')!=-1:
lis.append(float(y))
else:
try:
lis.append(int(y))
except ValueError: # not int, RAWDNA?
lis.append(y)
lis2d.append(lis)
return array(lis2d)
for filename in sys.argv:
if (filename.endswith('.m')):
res=_test_mfile(filename)
if res:
sys.exit(0)
else:
sys.exit(1)
|
gpl-3.0
| -5,560,419,125,044,753,000 | 3,406,399,680,260,783,600 | 21.445378 | 76 | 0.655934 | false |
Cyrillic327/p2pool
|
p2pool/test/test_node.py
|
198
|
10503
|
from __future__ import division
import base64
import random
import tempfile
from twisted.internet import defer, reactor
from twisted.python import failure
from twisted.trial import unittest
from twisted.web import client, resource, server
from p2pool import data, node, work
from p2pool.bitcoin import data as bitcoin_data, networks, worker_interface
from p2pool.util import deferral, jsonrpc, math, variable
class bitcoind(object): # can be used as p2p factory, p2p protocol, or rpc jsonrpc proxy
def __init__(self):
self.blocks = [0x000000000000016c169477c25421250ec5d32cf9c6d38538b5de970a2355fd89]
self.headers = {0x16c169477c25421250ec5d32cf9c6d38538b5de970a2355fd89: {
'nonce': 1853158954,
'timestamp': 1351658517,
'merkle_root': 2282849479936278423916707524932131168473430114569971665822757638339486597658L,
'version': 1,
'previous_block': 1048610514577342396345362905164852351970507722694242579238530L,
'bits': bitcoin_data.FloatingInteger(bits=0x1a0513c5, target=0x513c50000000000000000000000000000000000000000000000L),
}}
self.conn = variable.Variable(self)
self.new_headers = variable.Event()
self.new_block = variable.Event()
self.new_tx = variable.Event()
# p2p factory
def getProtocol(self):
return self
# p2p protocol
def send_block(self, block):
pass
def send_tx(self, tx):
pass
def get_block_header(self, block_hash):
return self.headers[block_hash]
# rpc jsonrpc proxy
def rpc_help(self):
return '\ngetblock '
def rpc_getblock(self, block_hash_hex):
block_hash = int(block_hash_hex, 16)
return dict(height=self.blocks.index(block_hash))
def __getattr__(self, name):
if name.startswith('rpc_'):
return lambda *args, **kwargs: failure.Failure(jsonrpc.Error_for_code(-32601)('Method not found'))
def rpc_getblocktemplate(self, param):
if param['mode'] == 'template':
pass
elif param['mode'] == 'submit':
result = param['data']
block = bitcoin_data.block_type.unpack(result.decode('hex'))
if sum(tx_out['value'] for tx_out in block['txs'][0]['tx_outs']) != sum(tx['tx_outs'][0]['value'] for tx in block['txs'][1:]) + 5000000000:
print 'invalid fee'
if block['header']['previous_block'] != self.blocks[-1]:
return False
if bitcoin_data.hash256(result.decode('hex')) > block['header']['bits'].target:
return False
header_hash = bitcoin_data.hash256(bitcoin_data.block_header_type.pack(block['header']))
self.blocks.append(header_hash)
self.headers[header_hash] = block['header']
reactor.callLater(0, self.new_block.happened)
return True
else:
raise jsonrpc.Error_for_code(-1)('invalid request')
txs = []
for i in xrange(100):
fee = i
txs.append(dict(
data=bitcoin_data.tx_type.pack(dict(version=1, tx_ins=[], tx_outs=[dict(value=fee, script='hello!'*100)], lock_time=0)).encode('hex'),
fee=fee,
))
return {
"version" : 2,
"previousblockhash" : '%064x' % (self.blocks[-1],),
"transactions" : txs,
"coinbaseaux" : {
"flags" : "062f503253482f"
},
"coinbasevalue" : 5000000000 + sum(tx['fee'] for tx in txs),
"target" : "0000000000000513c50000000000000000000000000000000000000000000000",
"mintime" : 1351655621,
"mutable" : [
"time",
"transactions",
"prevblock"
],
"noncerange" : "00000000ffffffff",
"sigoplimit" : 20000,
"sizelimit" : 1000000,
"curtime" : 1351659940,
"bits" : "21008000",
"height" : len(self.blocks),
}
@apply
class mm_provider(object):
def __getattr__(self, name):
print '>>>>>>>', name
def rpc_getauxblock(self, request, result1=None, result2=None):
if result1 is not None:
print result1, result2
return True
return {
"target" : "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", # 2**256*2/3
"hash" : "2756ea0315d46dc3d8d974f34380873fc88863845ac01a658ef11bc3b368af52",
"chainid" : 1
}
mynet = math.Object(
NAME='mynet',
PARENT=networks.nets['litecoin_testnet'],
SHARE_PERIOD=5, # seconds
CHAIN_LENGTH=20*60//3, # shares
REAL_CHAIN_LENGTH=20*60//3, # shares
TARGET_LOOKBEHIND=200, # shares
SPREAD=3, # blocks
IDENTIFIER='cca5e24ec6408b1e'.decode('hex'),
PREFIX='ad9614f6466a39cf'.decode('hex'),
P2P_PORT=19338,
MIN_TARGET=2**256 - 1,
MAX_TARGET=2**256 - 1,
PERSIST=False,
WORKER_PORT=19327,
BOOTSTRAP_ADDRS='72.14.191.28'.split(' '),
ANNOUNCE_CHANNEL='#p2pool-alt',
VERSION_CHECK=lambda v: True,
)
class MiniNode(object):
@classmethod
@defer.inlineCallbacks
def start(cls, net, factory, bitcoind, peer_ports, merged_urls):
self = cls()
self.n = node.Node(factory, bitcoind, [], [], net)
yield self.n.start()
self.n.p2p_node = node.P2PNode(self.n, port=0, max_incoming_conns=1000000, addr_store={}, connect_addrs=[('127.0.0.1', peer_port) for peer_port in peer_ports])
self.n.p2p_node.start()
wb = work.WorkerBridge(node=self.n, my_pubkey_hash=random.randrange(2**160), donation_percentage=random.uniform(0, 10), merged_urls=merged_urls, worker_fee=3)
self.wb = wb
web_root = resource.Resource()
worker_interface.WorkerInterface(wb).attach_to(web_root)
self.web_port = reactor.listenTCP(0, server.Site(web_root))
defer.returnValue(self)
@defer.inlineCallbacks
def stop(self):
yield self.web_port.stopListening()
yield self.n.p2p_node.stop()
yield self.n.stop()
del self.web_port, self.n
class Test(unittest.TestCase):
@defer.inlineCallbacks
def test_node(self):
bitd = bitcoind()
mm_root = resource.Resource()
mm_root.putChild('', jsonrpc.HTTPServer(mm_provider))
mm_port = reactor.listenTCP(0, server.Site(mm_root))
n = node.Node(bitd, bitd, [], [], mynet)
yield n.start()
wb = work.WorkerBridge(node=n, my_pubkey_hash=42, donation_percentage=2, merged_urls=[('http://127.0.0.1:%i' % (mm_port.getHost().port,), '')], worker_fee=3)
web_root = resource.Resource()
worker_interface.WorkerInterface(wb).attach_to(web_root)
port = reactor.listenTCP(0, server.Site(web_root))
proxy = jsonrpc.HTTPProxy('http://127.0.0.1:' + str(port.getHost().port),
headers=dict(Authorization='Basic ' + base64.b64encode('user/0:password')))
yield deferral.sleep(3)
for i in xrange(100):
blah = yield proxy.rpc_getwork()
yield proxy.rpc_getwork(blah['data'])
yield deferral.sleep(3)
assert len(n.tracker.items) == 100
assert n.tracker.verified.get_height(n.best_share_var.value) == 100
wb.stop()
n.stop()
yield port.stopListening()
del n, wb, web_root, port, proxy
import gc
gc.collect()
gc.collect()
gc.collect()
yield deferral.sleep(20) # waiting for work_poller to exit
yield mm_port.stopListening()
#test_node.timeout = 15
@defer.inlineCallbacks
def test_nodes(self):
N = 3
SHARES = 600
bitd = bitcoind()
nodes = []
for i in xrange(N):
nodes.append((yield MiniNode.start(mynet, bitd, bitd, [mn.n.p2p_node.serverfactory.listen_port.getHost().port for mn in nodes], [])))
yield deferral.sleep(3)
for i in xrange(SHARES):
proxy = jsonrpc.HTTPProxy('http://127.0.0.1:' + str(random.choice(nodes).web_port.getHost().port),
headers=dict(Authorization='Basic ' + base64.b64encode('user/0:password')))
blah = yield proxy.rpc_getwork()
yield proxy.rpc_getwork(blah['data'])
yield deferral.sleep(.05)
print i
print type(nodes[0].n.tracker.items[nodes[0].n.best_share_var.value])
# crawl web pages
from p2pool import web
stop_event = variable.Event()
web2_root = web.get_web_root(nodes[0].wb, tempfile.mkdtemp(), variable.Variable(None), stop_event)
web2_port = reactor.listenTCP(0, server.Site(web2_root))
for name in web2_root.listNames() + ['web/' + x for x in web2_root.getChildWithDefault('web', None).listNames()]:
if name in ['web/graph_data', 'web/share', 'web/share_data']: continue
print
print name
try:
res = yield client.getPage('http://127.0.0.1:%i/%s' % (web2_port.getHost().port, name))
except:
import traceback
traceback.print_exc()
else:
print repr(res)[:100]
print
yield web2_port.stopListening()
stop_event.happened()
del web2_root
yield deferral.sleep(3)
for i, n in enumerate(nodes):
assert len(n.n.tracker.items) == SHARES, (i, len(n.n.tracker.items))
assert n.n.tracker.verified.get_height(n.n.best_share_var.value) == SHARES, (i, n.n.tracker.verified.get_height(n.n.best_share_var.value))
assert type(n.n.tracker.items[nodes[0].n.best_share_var.value]) is (data.Share.SUCCESSOR if data.Share.SUCCESSOR is not None else data.Share)
assert type(n.n.tracker.items[n.n.tracker.get_nth_parent_hash(nodes[0].n.best_share_var.value, SHARES - 5)]) is data.Share
for n in nodes:
yield n.stop()
del nodes, n
import gc
gc.collect()
gc.collect()
gc.collect()
yield deferral.sleep(20) # waiting for work_poller to exit
test_nodes.timeout = 300
|
gpl-3.0
| 3,777,134,930,999,433,000 | 5,937,344,012,027,848,000 | 36.510714 | 167 | 0.58231 | false |
liberatorqjw/scikit-learn
|
sklearn/tree/export.py
|
30
|
4529
|
"""
This module defines export functions for decision trees.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Licence: BSD 3 clause
from ..externals import six
from . import _tree
def export_graphviz(decision_tree, out_file="tree.dot", feature_names=None,
max_depth=None):
"""Export a decision tree in DOT format.
This function generates a GraphViz representation of the decision tree,
which is then written into `out_file`. Once exported, graphical renderings
can be generated using, for example::
$ dot -Tps tree.dot -o tree.ps (PostScript format)
$ dot -Tpng tree.dot -o tree.png (PNG format)
The sample counts that are shown are weighted with any sample_weights that
might be present.
Parameters
----------
decision_tree : decision tree classifier
The decision tree to be exported to GraphViz.
out_file : file object or string, optional (default="tree.dot")
Handle or name of the output file.
feature_names : list of strings, optional (default=None)
Names of each of the features.
max_depth : int, optional (default=None)
The maximum depth of the representation. If None, the tree is fully
generated.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn import tree
>>> clf = tree.DecisionTreeClassifier()
>>> iris = load_iris()
>>> clf = clf.fit(iris.data, iris.target)
>>> tree.export_graphviz(clf,
... out_file='tree.dot') # doctest: +SKIP
"""
def node_to_str(tree, node_id, criterion):
if not isinstance(criterion, six.string_types):
criterion = "impurity"
value = tree.value[node_id]
if tree.n_outputs == 1:
value = value[0, :]
if tree.children_left[node_id] == _tree.TREE_LEAF:
return "%s = %.4f\\nsamples = %s\\nvalue = %s" \
% (criterion,
tree.impurity[node_id],
tree.n_node_samples[node_id],
value)
else:
if feature_names is not None:
feature = feature_names[tree.feature[node_id]]
else:
feature = "X[%s]" % tree.feature[node_id]
return "%s <= %.4f\\n%s = %s\\nsamples = %s" \
% (feature,
tree.threshold[node_id],
criterion,
tree.impurity[node_id],
tree.n_node_samples[node_id])
def recurse(tree, node_id, criterion, parent=None, depth=0):
if node_id == _tree.TREE_LEAF:
raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF)
left_child = tree.children_left[node_id]
right_child = tree.children_right[node_id]
# Add node with description
if max_depth is None or depth <= max_depth:
out_file.write('%d [label="%s", shape="box"] ;\n' %
(node_id, node_to_str(tree, node_id, criterion)))
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d ;\n' % (parent, node_id))
if left_child != _tree.TREE_LEAF:
recurse(tree, left_child, criterion=criterion, parent=node_id,
depth=depth + 1)
recurse(tree, right_child, criterion=criterion, parent=node_id,
depth=depth + 1)
else:
out_file.write('%d [label="(...)", shape="box"] ;\n' % node_id)
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d ;\n' % (parent, node_id))
own_file = False
try:
if isinstance(out_file, six.string_types):
if six.PY3:
out_file = open(out_file, "w", encoding="utf-8")
else:
out_file = open(out_file, "wb")
own_file = True
out_file.write("digraph Tree {\n")
if isinstance(decision_tree, _tree.Tree):
recurse(decision_tree, 0, criterion="impurity")
else:
recurse(decision_tree.tree_, 0, criterion=decision_tree.criterion)
out_file.write("}")
finally:
if own_file:
out_file.close()
|
bsd-3-clause
| -8,822,395,776,446,858,000 | -9,077,986,720,388,140,000 | 33.052632 | 79 | 0.546037 | false |
esthermm/odoomrp-utils
|
stock_picking_customer_ref/__openerp__.py
|
12
|
1446
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (c)
# 2015 Serv. Tec. Avanzados - Pedro M. Baeza (http://www.serviciosbaeza.com)
# 2015 AvanzOsc (http://www.avanzosc.es)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': "Stock Picking Customer Ref",
'version': "1.0",
"author": "OdooMRP team,"
"AvanzOSC,"
"Serv. Tecnol. Avanzados - Pedro M. Baeza",
'website': "http://www.odoomrp.com",
'category': 'Warehouse Management',
"depends": ["sale",
"stock",
"sale_stock"
],
'data': ["views/stock_picking_view.xml"],
"installable": True,
}
|
agpl-3.0
| 7,654,532,385,787,020,000 | -2,608,365,231,346,380,000 | 39.166667 | 79 | 0.567082 | false |
djmaze/phantomjs
|
src/qt/qtwebkit/Tools/QueueStatusServer/handlers/patchstatus.py
|
146
|
1974
|
# Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from google.appengine.ext import webapp
from model.queuestatus import QueueStatus
class PatchStatus(webapp.RequestHandler):
def get(self, queue_name, attachment_id):
statuses = QueueStatus.all().filter('queue_name =', queue_name).filter('active_patch_id =', int(attachment_id)).order('-date').fetch(1)
if not statuses:
self.error(404)
return
self.response.out.write(statuses[0].message)
|
bsd-3-clause
| -1,107,344,661,106,378,400 | -8,585,715,638,965,836,000 | 48.35 | 143 | 0.758359 | false |
zsjohny/jumpserver
|
apps/assets/serializers/asset.py
|
1
|
6406
|
# -*- coding: utf-8 -*-
#
from rest_framework import serializers
from django.db.models import Prefetch, F
from django.utils.translation import ugettext_lazy as _
from orgs.mixins.serializers import BulkOrgResourceModelSerializer
from common.serializers import AdaptedBulkListSerializer
from ..models import Asset, Node, Label, Platform
from .base import ConnectivitySerializer
__all__ = [
'AssetSerializer', 'AssetSimpleSerializer',
'AssetDisplaySerializer',
'ProtocolsField', 'PlatformSerializer',
'AssetDetailSerializer', 'AssetTaskSerializer',
]
class ProtocolField(serializers.RegexField):
protocols = '|'.join(dict(Asset.PROTOCOL_CHOICES).keys())
default_error_messages = {
'invalid': _('Protocol format should {}/{}'.format(protocols, '1-65535'))
}
regex = r'^(%s)/(\d{1,5})$' % protocols
def __init__(self, *args, **kwargs):
super().__init__(self.regex, **kwargs)
def validate_duplicate_protocols(values):
errors = []
names = []
for value in values:
if not value or '/' not in value:
continue
name = value.split('/')[0]
if name in names:
errors.append(_("Protocol duplicate: {}").format(name))
names.append(name)
errors.append('')
if any(errors):
raise serializers.ValidationError(errors)
class ProtocolsField(serializers.ListField):
default_validators = [validate_duplicate_protocols]
def __init__(self, *args, **kwargs):
kwargs['child'] = ProtocolField()
kwargs['allow_null'] = True
kwargs['allow_empty'] = True
kwargs['min_length'] = 1
kwargs['max_length'] = 4
super().__init__(*args, **kwargs)
def to_representation(self, value):
if not value:
return []
return value.split(' ')
class AssetSerializer(BulkOrgResourceModelSerializer):
platform = serializers.SlugRelatedField(
slug_field='name', queryset=Platform.objects.all(), label=_("Platform")
)
protocols = ProtocolsField(label=_('Protocols'), required=False)
"""
资产的数据结构
"""
class Meta:
model = Asset
list_serializer_class = AdaptedBulkListSerializer
fields = [
'id', 'ip', 'hostname', 'protocol', 'port',
'protocols', 'platform', 'is_active', 'public_ip', 'domain',
'admin_user', 'nodes', 'labels', 'number', 'vendor', 'model', 'sn',
'cpu_model', 'cpu_count', 'cpu_cores', 'cpu_vcpus', 'memory',
'disk_total', 'disk_info', 'os', 'os_version', 'os_arch',
'hostname_raw', 'comment', 'created_by', 'date_created',
'hardware_info',
]
read_only_fields = (
'vendor', 'model', 'sn', 'cpu_model', 'cpu_count',
'cpu_cores', 'cpu_vcpus', 'memory', 'disk_total', 'disk_info',
'os', 'os_version', 'os_arch', 'hostname_raw',
'created_by', 'date_created',
)
extra_kwargs = {
'protocol': {'write_only': True},
'port': {'write_only': True},
'hardware_info': {'label': _('Hardware info')},
'org_name': {'label': _('Org name')}
}
@classmethod
def setup_eager_loading(cls, queryset):
""" Perform necessary eager loading of data. """
queryset = queryset.prefetch_related(
Prefetch('nodes', queryset=Node.objects.all().only('id')),
Prefetch('labels', queryset=Label.objects.all().only('id')),
).select_related('admin_user', 'domain', 'platform') \
.annotate(platform_base=F('platform__base'))
return queryset
def compatible_with_old_protocol(self, validated_data):
protocols_data = validated_data.pop("protocols", [])
# 兼容老的api
name = validated_data.get("protocol")
port = validated_data.get("port")
if not protocols_data and name and port:
protocols_data.insert(0, '/'.join([name, str(port)]))
elif not name and not port and protocols_data:
protocol = protocols_data[0].split('/')
validated_data["protocol"] = protocol[0]
validated_data["port"] = int(protocol[1])
if protocols_data:
validated_data["protocols"] = ' '.join(protocols_data)
def create(self, validated_data):
self.compatible_with_old_protocol(validated_data)
instance = super().create(validated_data)
return instance
def update(self, instance, validated_data):
self.compatible_with_old_protocol(validated_data)
return super().update(instance, validated_data)
class AssetDisplaySerializer(AssetSerializer):
connectivity = ConnectivitySerializer(read_only=True, label=_("Connectivity"))
class Meta(AssetSerializer.Meta):
fields = [
'id', 'ip', 'hostname', 'protocol', 'port',
'protocols', 'is_active', 'public_ip',
'number', 'vendor', 'model', 'sn',
'cpu_model', 'cpu_count', 'cpu_cores', 'cpu_vcpus', 'memory',
'disk_total', 'disk_info', 'os', 'os_version', 'os_arch',
'hostname_raw', 'comment', 'created_by', 'date_created',
'hardware_info', 'connectivity',
]
@classmethod
def setup_eager_loading(cls, queryset):
""" Perform necessary eager loading of data. """
queryset = queryset\
.annotate(admin_user_username=F('admin_user__username'))
return queryset
class PlatformSerializer(serializers.ModelSerializer):
meta = serializers.DictField(required=False, allow_null=True)
class Meta:
model = Platform
fields = [
'id', 'name', 'base', 'charset',
'internal', 'meta', 'comment'
]
class AssetDetailSerializer(AssetSerializer):
platform = PlatformSerializer(read_only=True)
class AssetSimpleSerializer(serializers.ModelSerializer):
connectivity = ConnectivitySerializer(read_only=True, label=_("Connectivity"))
class Meta:
model = Asset
fields = ['id', 'hostname', 'ip', 'connectivity', 'port']
class AssetTaskSerializer(serializers.Serializer):
ACTION_CHOICES = (
('refresh', 'refresh'),
('test', 'test'),
)
task = serializers.CharField(read_only=True)
action = serializers.ChoiceField(choices=ACTION_CHOICES, write_only=True)
|
gpl-2.0
| 5,383,003,637,001,271,000 | 9,146,585,496,651,882,000 | 33.695652 | 82 | 0.602287 | false |
Lloir/pc-kernel
|
scripts/gdb/linux/dmesg.py
|
367
|
2005
|
#
# gdb helper commands and functions for Linux kernel debugging
#
# kernel log buffer dump
#
# Copyright (c) Siemens AG, 2011, 2012
#
# Authors:
# Jan Kiszka <[email protected]>
#
# This work is licensed under the terms of the GNU GPL version 2.
#
import gdb
import string
from linux import utils
class LxDmesg(gdb.Command):
"""Print Linux kernel log buffer."""
def __init__(self):
super(LxDmesg, self).__init__("lx-dmesg", gdb.COMMAND_DATA)
def invoke(self, arg, from_tty):
log_buf_addr = int(str(gdb.parse_and_eval("log_buf")).split()[0], 16)
log_first_idx = int(gdb.parse_and_eval("log_first_idx"))
log_next_idx = int(gdb.parse_and_eval("log_next_idx"))
log_buf_len = int(gdb.parse_and_eval("log_buf_len"))
inf = gdb.inferiors()[0]
start = log_buf_addr + log_first_idx
if log_first_idx < log_next_idx:
log_buf_2nd_half = -1
length = log_next_idx - log_first_idx
log_buf = inf.read_memory(start, length)
else:
log_buf_2nd_half = log_buf_len - log_first_idx
log_buf = inf.read_memory(start, log_buf_2nd_half) + \
inf.read_memory(log_buf_addr, log_next_idx)
pos = 0
while pos < log_buf.__len__():
length = utils.read_u16(log_buf[pos + 8:pos + 10])
if length == 0:
if log_buf_2nd_half == -1:
gdb.write("Corrupted log buffer!\n")
break
pos = log_buf_2nd_half
continue
text_len = utils.read_u16(log_buf[pos + 10:pos + 12])
text = log_buf[pos + 16:pos + 16 + text_len]
time_stamp = utils.read_u64(log_buf[pos:pos + 8])
for line in memoryview(text).tobytes().splitlines():
gdb.write("[{time:12.6f}] {line}\n".format(
time=time_stamp / 1000000000.0,
line=line))
pos += length
LxDmesg()
|
gpl-2.0
| 7,558,998,160,382,962,000 | -6,797,728,751,857,098,000 | 29.846154 | 77 | 0.545636 | false |
darkleons/BE
|
addons/hr_timesheet_sheet/hr_timesheet_sheet.py
|
35
|
34024
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from dateutil.relativedelta import relativedelta
from pytz import timezone
import pytz
from openerp.osv import fields, osv
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT
from openerp.tools.translate import _
class hr_timesheet_sheet(osv.osv):
_name = "hr_timesheet_sheet.sheet"
_inherit = "mail.thread"
_table = 'hr_timesheet_sheet_sheet'
_order = "id desc"
_description="Timesheet"
def _total(self, cr, uid, ids, name, args, context=None):
""" Compute the attendances, analytic lines timesheets and differences between them
for all the days of a timesheet and the current day
"""
res = {}
for sheet in self.browse(cr, uid, ids, context=context or {}):
res.setdefault(sheet.id, {
'total_attendance': 0.0,
'total_timesheet': 0.0,
'total_difference': 0.0,
})
for period in sheet.period_ids:
res[sheet.id]['total_attendance'] += period.total_attendance
res[sheet.id]['total_timesheet'] += period.total_timesheet
res[sheet.id]['total_difference'] += period.total_attendance - period.total_timesheet
return res
def check_employee_attendance_state(self, cr, uid, sheet_id, context=None):
ids_signin = self.pool.get('hr.attendance').search(cr,uid,[('sheet_id', '=', sheet_id),('action','=','sign_in')])
ids_signout = self.pool.get('hr.attendance').search(cr,uid,[('sheet_id', '=', sheet_id),('action','=','sign_out')])
if len(ids_signin) != len(ids_signout):
raise osv.except_osv(('Warning!'),_('The timesheet cannot be validated as it does not contain an equal number of sign ins and sign outs.'))
return True
def copy(self, cr, uid, ids, *args, **argv):
raise osv.except_osv(_('Error!'), _('You cannot duplicate a timesheet.'))
def create(self, cr, uid, vals, context=None):
if 'employee_id' in vals:
if not self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).user_id:
raise osv.except_osv(_('Error!'), _('In order to create a timesheet for this employee, you must link him/her to a user.'))
if not self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).product_id:
raise osv.except_osv(_('Error!'), _('In order to create a timesheet for this employee, you must link the employee to a product, like \'Consultant\'.'))
if not self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).journal_id:
raise osv.except_osv(_('Configuration Error!'), _('In order to create a timesheet for this employee, you must assign an analytic journal to the employee, like \'Timesheet Journal\'.'))
if vals.get('attendances_ids'):
# If attendances, we sort them by date asc before writing them, to satisfy the alternance constraint
vals['attendances_ids'] = self.sort_attendances(cr, uid, vals['attendances_ids'], context=context)
return super(hr_timesheet_sheet, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None):
if 'employee_id' in vals:
new_user_id = self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).user_id.id or False
if not new_user_id:
raise osv.except_osv(_('Error!'), _('In order to create a timesheet for this employee, you must link him/her to a user.'))
if not self._sheet_date(cr, uid, ids, forced_user_id=new_user_id, context=context):
raise osv.except_osv(_('Error!'), _('You cannot have 2 timesheets that overlap!\nYou should use the menu \'My Timesheet\' to avoid this problem.'))
if not self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).product_id:
raise osv.except_osv(_('Error!'), _('In order to create a timesheet for this employee, you must link the employee to a product.'))
if not self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).journal_id:
raise osv.except_osv(_('Configuration Error!'), _('In order to create a timesheet for this employee, you must assign an analytic journal to the employee, like \'Timesheet Journal\'.'))
if vals.get('attendances_ids'):
# If attendances, we sort them by date asc before writing them, to satisfy the alternance constraint
# In addition to the date order, deleting attendances are done before inserting attendances
vals['attendances_ids'] = self.sort_attendances(cr, uid, vals['attendances_ids'], context=context)
res = super(hr_timesheet_sheet, self).write(cr, uid, ids, vals, context=context)
if vals.get('attendances_ids'):
for timesheet in self.browse(cr, uid, ids):
if not self.pool['hr.attendance']._altern_si_so(cr, uid, [att.id for att in timesheet.attendances_ids]):
raise osv.except_osv(_('Warning !'), _('Error ! Sign in (resp. Sign out) must follow Sign out (resp. Sign in)'))
return res
def sort_attendances(self, cr, uid, attendance_tuples, context=None):
date_attendances = []
for att_tuple in attendance_tuples:
if att_tuple[0] in [0,1,4]:
if att_tuple[0] in [0,1]:
if att_tuple[2] and att_tuple[2].has_key('name'):
name = att_tuple[2]['name']
else:
name = self.pool['hr.attendance'].browse(cr, uid, att_tuple[1]).name
else:
name = self.pool['hr.attendance'].browse(cr, uid, att_tuple[1]).name
date_attendances.append((1, name, att_tuple))
elif att_tuple[0] in [2,3]:
date_attendances.append((0, self.pool['hr.attendance'].browse(cr, uid, att_tuple[1]).name, att_tuple))
else:
date_attendances.append((0, False, att_tuple))
date_attendances.sort()
return [att[2] for att in date_attendances]
def button_confirm(self, cr, uid, ids, context=None):
for sheet in self.browse(cr, uid, ids, context=context):
if sheet.employee_id and sheet.employee_id.parent_id and sheet.employee_id.parent_id.user_id:
self.message_subscribe_users(cr, uid, [sheet.id], user_ids=[sheet.employee_id.parent_id.user_id.id], context=context)
self.check_employee_attendance_state(cr, uid, sheet.id, context=context)
di = sheet.user_id.company_id.timesheet_max_difference
if (abs(sheet.total_difference) < di) or not di:
sheet.signal_workflow('confirm')
else:
raise osv.except_osv(_('Warning!'), _('Please verify that the total difference of the sheet is lower than %.2f.') %(di,))
return True
def attendance_action_change(self, cr, uid, ids, context=None):
hr_employee = self.pool.get('hr.employee')
employee_ids = []
for sheet in self.browse(cr, uid, ids, context=context):
if sheet.employee_id.id not in employee_ids: employee_ids.append(sheet.employee_id.id)
return hr_employee.attendance_action_change(cr, uid, employee_ids, context=context)
def _count_all(self, cr, uid, ids, field_name, arg, context=None):
Timesheet = self.pool['hr.analytic.timesheet']
Attendance = self.pool['hr.attendance']
return {
sheet_id: {
'timesheet_activity_count': Timesheet.search_count(cr,uid, [('sheet_id','=', sheet_id)], context=context),
'attendance_count': Attendance.search_count(cr,uid, [('sheet_id', '=', sheet_id)], context=context)
}
for sheet_id in ids
}
_columns = {
'name': fields.char('Note', select=1,
states={'confirm':[('readonly', True)], 'done':[('readonly', True)]}),
'employee_id': fields.many2one('hr.employee', 'Employee', required=True),
'user_id': fields.related('employee_id', 'user_id', type="many2one", relation="res.users", store=True, string="User", required=False, readonly=True),#fields.many2one('res.users', 'User', required=True, select=1, states={'confirm':[('readonly', True)], 'done':[('readonly', True)]}),
'date_from': fields.date('Date from', required=True, select=1, readonly=True, states={'new':[('readonly', False)]}),
'date_to': fields.date('Date to', required=True, select=1, readonly=True, states={'new':[('readonly', False)]}),
'timesheet_ids' : fields.one2many('hr.analytic.timesheet', 'sheet_id',
'Timesheet lines',
readonly=True, states={
'draft': [('readonly', False)],
'new': [('readonly', False)]}
),
'attendances_ids' : fields.one2many('hr.attendance', 'sheet_id', 'Attendances'),
'state' : fields.selection([
('new', 'New'),
('draft','Open'),
('confirm','Waiting Approval'),
('done','Approved')], 'Status', select=True, required=True, readonly=True,
help=' * The \'Draft\' status is used when a user is encoding a new and unconfirmed timesheet. \
\n* The \'Confirmed\' status is used for to confirm the timesheet by user. \
\n* The \'Done\' status is used when users timesheet is accepted by his/her senior.'),
'state_attendance' : fields.related('employee_id', 'state', type='selection', selection=[('absent', 'Absent'), ('present', 'Present')], string='Current Status', readonly=True),
'total_attendance': fields.function(_total, method=True, string='Total Attendance', multi="_total"),
'total_timesheet': fields.function(_total, method=True, string='Total Timesheet', multi="_total"),
'total_difference': fields.function(_total, method=True, string='Difference', multi="_total"),
'period_ids': fields.one2many('hr_timesheet_sheet.sheet.day', 'sheet_id', 'Period', readonly=True),
'account_ids': fields.one2many('hr_timesheet_sheet.sheet.account', 'sheet_id', 'Analytic accounts', readonly=True),
'company_id': fields.many2one('res.company', 'Company'),
'department_id':fields.many2one('hr.department','Department'),
'timesheet_activity_count': fields.function(_count_all, type='integer', string='Timesheet Activities', multi=True),
'attendance_count': fields.function(_count_all, type='integer', string="Attendances", multi=True),
}
def _default_date_from(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
r = user.company_id and user.company_id.timesheet_range or 'month'
if r=='month':
return time.strftime('%Y-%m-01')
elif r=='week':
return (datetime.today() + relativedelta(weekday=0, days=-6)).strftime('%Y-%m-%d')
elif r=='year':
return time.strftime('%Y-01-01')
return time.strftime('%Y-%m-%d')
def _default_date_to(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
r = user.company_id and user.company_id.timesheet_range or 'month'
if r=='month':
return (datetime.today() + relativedelta(months=+1,day=1,days=-1)).strftime('%Y-%m-%d')
elif r=='week':
return (datetime.today() + relativedelta(weekday=6)).strftime('%Y-%m-%d')
elif r=='year':
return time.strftime('%Y-12-31')
return time.strftime('%Y-%m-%d')
def _default_employee(self, cr, uid, context=None):
emp_ids = self.pool.get('hr.employee').search(cr, uid, [('user_id','=',uid)], context=context)
return emp_ids and emp_ids[0] or False
_defaults = {
'date_from' : _default_date_from,
'date_to' : _default_date_to,
'state': 'new',
'employee_id': _default_employee,
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'hr_timesheet_sheet.sheet', context=c)
}
def _sheet_date(self, cr, uid, ids, forced_user_id=False, context=None):
for sheet in self.browse(cr, uid, ids, context=context):
new_user_id = forced_user_id or sheet.user_id and sheet.user_id.id
if new_user_id:
cr.execute('SELECT id \
FROM hr_timesheet_sheet_sheet \
WHERE (date_from <= %s and %s <= date_to) \
AND user_id=%s \
AND id <> %s',(sheet.date_to, sheet.date_from, new_user_id, sheet.id))
if cr.fetchall():
return False
return True
_constraints = [
(_sheet_date, 'You cannot have 2 timesheets that overlap!\nPlease use the menu \'My Current Timesheet\' to avoid this problem.', ['date_from','date_to']),
]
def action_set_to_draft(self, cr, uid, ids, *args):
self.write(cr, uid, ids, {'state': 'draft'})
self.create_workflow(cr, uid, ids)
return True
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
if isinstance(ids, (long, int)):
ids = [ids]
return [(r['id'], _('Week ')+datetime.strptime(r['date_from'], '%Y-%m-%d').strftime('%U')) \
for r in self.read(cr, uid, ids, ['date_from'],
context=context, load='_classic_write')]
def unlink(self, cr, uid, ids, context=None):
sheets = self.read(cr, uid, ids, ['state','total_attendance'], context=context)
for sheet in sheets:
if sheet['state'] in ('confirm', 'done'):
raise osv.except_osv(_('Invalid Action!'), _('You cannot delete a timesheet which is already confirmed.'))
elif sheet['total_attendance'] <> 0.00:
raise osv.except_osv(_('Invalid Action!'), _('You cannot delete a timesheet which have attendance entries.'))
return super(hr_timesheet_sheet, self).unlink(cr, uid, ids, context=context)
def onchange_employee_id(self, cr, uid, ids, employee_id, context=None):
department_id = False
user_id = False
if employee_id:
empl_id = self.pool.get('hr.employee').browse(cr, uid, employee_id, context=context)
department_id = empl_id.department_id.id
user_id = empl_id.user_id.id
return {'value': {'department_id': department_id, 'user_id': user_id,}}
# ------------------------------------------------
# OpenChatter methods and notifications
# ------------------------------------------------
def _needaction_domain_get(self, cr, uid, context=None):
emp_obj = self.pool.get('hr.employee')
empids = emp_obj.search(cr, uid, [('parent_id.user_id', '=', uid)], context=context)
if not empids:
return False
dom = ['&', ('state', '=', 'confirm'), ('employee_id', 'in', empids)]
return dom
class account_analytic_line(osv.osv):
_inherit = "account.analytic.line"
def _get_default_date(self, cr, uid, context=None):
if context is None:
context = {}
#get the default date (should be: today)
res = super(account_analytic_line, self)._get_default_date(cr, uid, context=context)
#if we got the dates from and to from the timesheet and if the default date is in between, we use the default
#but if the default isn't included in those dates, we use the date start of the timesheet as default
if context.get('timesheet_date_from') and context.get('timesheet_date_to'):
if context['timesheet_date_from'] <= res <= context['timesheet_date_to']:
return res
return context.get('timesheet_date_from')
#if we don't get the dates from the timesheet, we return the default value from super()
return res
class account_analytic_account(osv.osv):
_inherit = "account.analytic.account"
def name_create(self, cr, uid, name, context=None):
if context is None:
context = {}
group_template_required = self.pool['res.users'].has_group(cr, uid, 'account_analytic_analysis.group_template_required')
if not context.get('default_use_timesheets') or group_template_required:
return super(account_analytic_account, self).name_create(cr, uid, name, context=context)
rec_id = self.create(cr, uid, {self._rec_name: name}, context)
return self.name_get(cr, uid, [rec_id], context)[0]
class hr_timesheet_line(osv.osv):
_inherit = "hr.analytic.timesheet"
def _sheet(self, cursor, user, ids, name, args, context=None):
sheet_obj = self.pool.get('hr_timesheet_sheet.sheet')
res = {}.fromkeys(ids, False)
for ts_line in self.browse(cursor, user, ids, context=context):
sheet_ids = sheet_obj.search(cursor, user,
[('date_to', '>=', ts_line.date), ('date_from', '<=', ts_line.date),
('employee_id.user_id', '=', ts_line.user_id.id)],
context=context)
if sheet_ids:
# [0] because only one sheet possible for an employee between 2 dates
res[ts_line.id] = sheet_obj.name_get(cursor, user, sheet_ids, context=context)[0]
return res
def _get_hr_timesheet_sheet(self, cr, uid, ids, context=None):
ts_line_ids = []
for ts in self.browse(cr, uid, ids, context=context):
cr.execute("""
SELECT l.id
FROM hr_analytic_timesheet l
INNER JOIN account_analytic_line al
ON (l.line_id = al.id)
WHERE %(date_to)s >= al.date
AND %(date_from)s <= al.date
AND %(user_id)s = al.user_id
GROUP BY l.id""", {'date_from': ts.date_from,
'date_to': ts.date_to,
'user_id': ts.employee_id.user_id.id,})
ts_line_ids.extend([row[0] for row in cr.fetchall()])
return ts_line_ids
def _get_account_analytic_line(self, cr, uid, ids, context=None):
ts_line_ids = self.pool.get('hr.analytic.timesheet').search(cr, uid, [('line_id', 'in', ids)])
return ts_line_ids
_columns = {
'sheet_id': fields.function(_sheet, string='Sheet', select="1",
type='many2one', relation='hr_timesheet_sheet.sheet', ondelete="cascade",
store={
'hr_timesheet_sheet.sheet': (_get_hr_timesheet_sheet, ['employee_id', 'date_from', 'date_to'], 10),
'account.analytic.line': (_get_account_analytic_line, ['user_id', 'date'], 10),
'hr.analytic.timesheet': (lambda self,cr,uid,ids,context=None: ids, None, 10),
},
),
}
def _check_sheet_state(self, cr, uid, ids, context=None):
if context is None:
context = {}
for timesheet_line in self.browse(cr, uid, ids, context=context):
if timesheet_line.sheet_id and timesheet_line.sheet_id.state not in ('draft', 'new'):
return False
return True
_constraints = [
(_check_sheet_state, 'You cannot modify an entry in a Confirmed/Done timesheet !', ['state']),
]
def unlink(self, cr, uid, ids, *args, **kwargs):
if isinstance(ids, (int, long)):
ids = [ids]
self._check(cr, uid, ids)
return super(hr_timesheet_line,self).unlink(cr, uid, ids,*args, **kwargs)
def _check(self, cr, uid, ids):
for att in self.browse(cr, uid, ids):
if att.sheet_id and att.sheet_id.state not in ('draft', 'new'):
raise osv.except_osv(_('Error!'), _('You cannot modify an entry in a confirmed timesheet.'))
return True
def multi_on_change_account_id(self, cr, uid, ids, account_ids, context=None):
return dict([(el, self.on_change_account_id(cr, uid, ids, el, context.get('user_id', uid))) for el in account_ids])
class hr_attendance(osv.osv):
_inherit = "hr.attendance"
def _get_default_date(self, cr, uid, context=None):
if context is None:
context = {}
if 'name' in context:
return context['name'] + time.strftime(' %H:%M:%S')
return time.strftime('%Y-%m-%d %H:%M:%S')
def _get_hr_timesheet_sheet(self, cr, uid, ids, context=None):
attendance_ids = []
for ts in self.browse(cr, uid, ids, context=context):
cr.execute("""
SELECT a.id
FROM hr_attendance a
INNER JOIN hr_employee e
INNER JOIN resource_resource r
ON (e.resource_id = r.id)
ON (a.employee_id = e.id)
WHERE %(date_to)s >= date_trunc('day', a.name)
AND %(date_from)s <= a.name
AND %(user_id)s = r.user_id
GROUP BY a.id""", {'date_from': ts.date_from,
'date_to': ts.date_to,
'user_id': ts.employee_id.user_id.id,})
attendance_ids.extend([row[0] for row in cr.fetchall()])
return attendance_ids
def _get_attendance_employee_tz(self, cr, uid, employee_id, date, context=None):
""" Simulate timesheet in employee timezone
Return the attendance date in string format in the employee
tz converted from utc timezone as we consider date of employee
timesheet is in employee timezone
"""
employee_obj = self.pool['hr.employee']
tz = False
if employee_id:
employee = employee_obj.browse(cr, uid, employee_id, context=context)
tz = employee.user_id.partner_id.tz
if not date:
date = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
att_tz = timezone(tz or 'utc')
attendance_dt = datetime.strptime(date, DEFAULT_SERVER_DATETIME_FORMAT)
att_tz_dt = pytz.utc.localize(attendance_dt)
att_tz_dt = att_tz_dt.astimezone(att_tz)
# We take only the date omiting the hours as we compare with timesheet
# date_from which is a date format thus using hours would lead to
# be out of scope of timesheet
att_tz_date_str = datetime.strftime(att_tz_dt, DEFAULT_SERVER_DATE_FORMAT)
return att_tz_date_str
def _get_current_sheet(self, cr, uid, employee_id, date=False, context=None):
sheet_obj = self.pool['hr_timesheet_sheet.sheet']
if not date:
date = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
att_tz_date_str = self._get_attendance_employee_tz(
cr, uid, employee_id,
date=date, context=context)
sheet_ids = sheet_obj.search(cr, uid,
[('date_from', '<=', att_tz_date_str),
('date_to', '>=', att_tz_date_str),
('employee_id', '=', employee_id)],
limit=1, context=context)
return sheet_ids and sheet_ids[0] or False
def _sheet(self, cursor, user, ids, name, args, context=None):
res = {}.fromkeys(ids, False)
for attendance in self.browse(cursor, user, ids, context=context):
res[attendance.id] = self._get_current_sheet(
cursor, user, attendance.employee_id.id, attendance.name,
context=context)
return res
_columns = {
'sheet_id': fields.function(_sheet, string='Sheet',
type='many2one', relation='hr_timesheet_sheet.sheet',
store={
'hr_timesheet_sheet.sheet': (_get_hr_timesheet_sheet, ['employee_id', 'date_from', 'date_to'], 10),
'hr.attendance': (lambda self,cr,uid,ids,context=None: ids, ['employee_id', 'name', 'day'], 10),
},
)
}
_defaults = {
'name': _get_default_date,
}
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
sheet_id = context.get('sheet_id') or self._get_current_sheet(cr, uid, vals.get('employee_id'), vals.get('name'), context=context)
if sheet_id:
att_tz_date_str = self._get_attendance_employee_tz(
cr, uid, vals.get('employee_id'),
date=vals.get('name'), context=context)
ts = self.pool.get('hr_timesheet_sheet.sheet').browse(cr, uid, sheet_id, context=context)
if ts.state not in ('draft', 'new'):
raise osv.except_osv(_('Error!'), _('You can not enter an attendance in a submitted timesheet. Ask your manager to reset it before adding attendance.'))
elif ts.date_from > att_tz_date_str or ts.date_to < att_tz_date_str:
raise osv.except_osv(_('User Error!'), _('You can not enter an attendance date outside the current timesheet dates.'))
return super(hr_attendance,self).create(cr, uid, vals, context=context)
def unlink(self, cr, uid, ids, *args, **kwargs):
if isinstance(ids, (int, long)):
ids = [ids]
self._check(cr, uid, ids)
return super(hr_attendance,self).unlink(cr, uid, ids,*args, **kwargs)
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
self._check(cr, uid, ids)
res = super(hr_attendance,self).write(cr, uid, ids, vals, context=context)
if 'sheet_id' in context:
for attendance in self.browse(cr, uid, ids, context=context):
if context['sheet_id'] != attendance.sheet_id.id:
raise osv.except_osv(_('User Error!'), _('You cannot enter an attendance ' \
'date outside the current timesheet dates.'))
return res
def _check(self, cr, uid, ids):
for att in self.browse(cr, uid, ids):
if att.sheet_id and att.sheet_id.state not in ('draft', 'new'):
raise osv.except_osv(_('Error!'), _('You cannot modify an entry in a confirmed timesheet'))
return True
class hr_timesheet_sheet_sheet_day(osv.osv):
_name = "hr_timesheet_sheet.sheet.day"
_description = "Timesheets by Period"
_auto = False
_order='name'
_columns = {
'name': fields.date('Date', readonly=True),
'sheet_id': fields.many2one('hr_timesheet_sheet.sheet', 'Sheet', readonly=True, select="1"),
'total_timesheet': fields.float('Total Timesheet', readonly=True),
'total_attendance': fields.float('Attendance', readonly=True),
'total_difference': fields.float('Difference', readonly=True),
}
_depends = {
'account.analytic.line': ['date', 'unit_amount'],
'hr.analytic.timesheet': ['line_id', 'sheet_id'],
'hr.attendance': ['action', 'name', 'sheet_id'],
}
def init(self, cr):
cr.execute("""create or replace view hr_timesheet_sheet_sheet_day as
SELECT
id,
name,
sheet_id,
total_timesheet,
total_attendance,
cast(round(cast(total_attendance - total_timesheet as Numeric),2) as Double Precision) AS total_difference
FROM
((
SELECT
MAX(id) as id,
name,
sheet_id,
SUM(total_timesheet) as total_timesheet,
CASE WHEN SUM(total_attendance) < 0
THEN (SUM(total_attendance) +
CASE WHEN current_date <> name
THEN 1440
ELSE (EXTRACT(hour FROM current_time AT TIME ZONE 'UTC') * 60) + EXTRACT(minute FROM current_time AT TIME ZONE 'UTC')
END
)
ELSE SUM(total_attendance)
END /60 as total_attendance
FROM
((
select
min(hrt.id) as id,
l.date::date as name,
s.id as sheet_id,
sum(l.unit_amount) as total_timesheet,
0.0 as total_attendance
from
hr_analytic_timesheet hrt
JOIN account_analytic_line l ON l.id = hrt.line_id
LEFT JOIN hr_timesheet_sheet_sheet s ON s.id = hrt.sheet_id
group by l.date::date, s.id
) union (
select
-min(a.id) as id,
a.name::date as name,
s.id as sheet_id,
0.0 as total_timesheet,
SUM(((EXTRACT(hour FROM a.name) * 60) + EXTRACT(minute FROM a.name)) * (CASE WHEN a.action = 'sign_in' THEN -1 ELSE 1 END)) as total_attendance
from
hr_attendance a
LEFT JOIN hr_timesheet_sheet_sheet s
ON s.id = a.sheet_id
WHERE action in ('sign_in', 'sign_out')
group by a.name::date, s.id
)) AS foo
GROUP BY name, sheet_id
)) AS bar""")
class hr_timesheet_sheet_sheet_account(osv.osv):
_name = "hr_timesheet_sheet.sheet.account"
_description = "Timesheets by Period"
_auto = False
_order='name'
_columns = {
'name': fields.many2one('account.analytic.account', 'Project / Analytic Account', readonly=True),
'sheet_id': fields.many2one('hr_timesheet_sheet.sheet', 'Sheet', readonly=True),
'total': fields.float('Total Time', digits=(16,2), readonly=True),
'invoice_rate': fields.many2one('hr_timesheet_invoice.factor', 'Invoice rate', readonly=True),
}
_depends = {
'account.analytic.line': ['account_id', 'date', 'to_invoice', 'unit_amount', 'user_id'],
'hr.analytic.timesheet': ['line_id'],
'hr_timesheet_sheet.sheet': ['date_from', 'date_to', 'user_id'],
}
def init(self, cr):
cr.execute("""create or replace view hr_timesheet_sheet_sheet_account as (
select
min(hrt.id) as id,
l.account_id as name,
s.id as sheet_id,
sum(l.unit_amount) as total,
l.to_invoice as invoice_rate
from
hr_analytic_timesheet hrt
left join (account_analytic_line l
LEFT JOIN hr_timesheet_sheet_sheet s
ON (s.date_to >= l.date
AND s.date_from <= l.date
AND s.user_id = l.user_id))
on (l.id = hrt.line_id)
group by l.account_id, s.id, l.to_invoice
)""")
class res_company(osv.osv):
_inherit = 'res.company'
_columns = {
'timesheet_range': fields.selection(
[('day','Day'),('week','Week'),('month','Month')], 'Timesheet range',
help="Periodicity on which you validate your timesheets."),
'timesheet_max_difference': fields.float('Timesheet allowed difference(Hours)',
help="Allowed difference in hours between the sign in/out and the timesheet " \
"computation for one sheet. Set this to 0 if you do not want any control."),
}
_defaults = {
'timesheet_range': lambda *args: 'week',
'timesheet_max_difference': lambda *args: 0.0
}
class hr_employee(osv.osv):
'''
Employee
'''
_inherit = 'hr.employee'
_description = 'Employee'
def _timesheet_count(self, cr, uid, ids, field_name, arg, context=None):
Sheet = self.pool['hr_timesheet_sheet.sheet']
return {
employee_id: Sheet.search_count(cr,uid, [('employee_id', '=', employee_id)], context=context)
for employee_id in ids
}
_columns = {
'timesheet_count': fields.function(_timesheet_count, type='integer', string='Timesheets'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
| -1,013,306,363,321,960,200 | -3,791,370,034,261,677,600 | 49.480712 | 290 | 0.563161 | false |
dreikanter/public-static
|
publicstatic/conf.py
|
1
|
6519
|
# coding: utf-8
"""Configuration-related fuctionality and defaults."""
import codecs
from datetime import datetime
import os
import yaml
from publicstatic import const
from publicstatic import errors
from publicstatic.version import __version__
_params = {} # Configuration parameters
_path = '' # Configuration file absolute path
class NotFoundException(errors.BasicException):
"""configuration file not found"""
pass
class ParsingError(errors.BasicException):
"""error reading configuration file"""
pass
class ConfigurationExistsException(errors.BasicException):
"""configuration file already exists; use --force to overwrite"""
pass
class NotInitializedException(errors.BasicException):
"""configuration was not initialized"""
pass
def path():
if not _path:
raise NotInitializedException()
return _path
def defaults():
"""Returns default configuration."""
return {key: value['value'] for key, value in const.DEFAULTS.items()}
def load(conf_path):
"""Initializes configuration."""
global _path
_path = find_conf(conf_path or '.')
if not _path:
raise NotFoundException()
try:
with codecs.open(_path, mode='r', encoding='utf-8') as f:
loaded = yaml.load(f.read())
except (IOError, OSError, yaml.scanner.ScannerError) as ex:
raise ParsingError(error=str(ex)) from ex
global _params
_params = defaults()
_params.update(dict((item, loaded[item]) for item in loaded))
_params = _purify(_params)
def generate(conf_path, force):
"""Generates new configuration file using defaults."""
global _path
_path = os.path.join(os.path.abspath(conf_path), const.CONF_NAME)
if not force and os.path.exists(_path):
raise ConfigurationExistsException(path=_path)
dir_path = os.path.dirname(path())
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
header = "# %s\n\n" % const.CONF_HEADER
exports = [opt for opt in const.DEFAULTS.keys() if opt in const.EXPORTS]
text = '\n'.join([_dumpopt(opt) for opt in exports])
with codecs.open(_path, mode='w', encoding='utf-8') as f:
f.write(header + text)
global _params
_params = _purify(defaults())
def find_conf(conf_path):
"""Walks from the specified directory path up to the root until
configuration file will be found. Returns full configuration file path
or None if there are no one."""
seps = os.path.sep + (os.path.altsep or '')
path = os.path.abspath(conf_path).rstrip(seps)
last = True
while last:
result = os.path.join(path, const.CONF_NAME)
if os.path.exists(result):
return result
path, last = os.path.split(path)
return None
def get(param, default=None):
"""Returns a single configuration parameter or default value."""
try:
return _params.get(param, default)
except TypeError:
raise NotInitializedException()
def set(param, value):
"""Set or override configuration parameter."""
_params[param] = value
def tags_rel_url():
return os.path.dirname(get('rel_root_url') + get('tag_location')) + '/'
def commons():
"""Site-wide environmental parameters for page building."""
return {
'root_url': get('root_url'),
'rel_root_url': get('rel_root_url'),
'site_title': get('title'),
'site_subtitle': get('subtitle'),
'menu': get('menu'),
'time': datetime.now(),
'author': get('author'),
'author_twitter': get('author_twitter'),
'author_url': get('author_url'),
'generator': const.GENERATOR,
'generator_url': const.GENERATOR_URL,
'generator_version': __version__,
'source_url': get('source_url'),
'enable_search_form': get('enable_search_form'),
'atom_url': get('root_url') + get('atom_location'),
'archive_rel_url': get('rel_root_url') + get('archive_location'),
'tags_rel_url': tags_rel_url(),
'sitemap_url': get('rel_root_url') + 'sitemap.xml',
'author_location': get('humans_author_location'),
'language': get('humans_language'),
'doctype': get('humans_doctype'),
'ide': get('humans_ide'),
'last_updated': datetime.now(),
'disqus_id': get('disqus_id'),
'addthis_id': get('addthis_id'),
'pluso_enabled': get('pluso_enabled'),
'google_analytics_id': get('google_analytics_id'),
'datetime_format': get('datetime_format'),
'date_format': get('date_format'),
'opengraph_enabled': get('opengraph_enabled'),
'twittercards_enabled': get('twittercards_enabled'),
'site_twitter': get('site_twitter'),
}
def _dumpopt(opt_name):
"""Serializes configuration option with default value."""
desc = const.DEFAULTS[opt_name]['desc']
desc = ("# %s\n" % desc) if desc else ''
return desc + yaml.dump({
opt_name: const.DEFAULTS[opt_name]['value']
}, width=79, indent=2, default_flow_style=False)
def _purify(params):
"""Preprocess configuration parameters."""
expandables = [
'build_path',
'log_file',
]
for param in expandables:
params[param] = _expand(params[param])
urls = [
'root_url',
'rel_root_url',
'source_url',
]
for param in urls:
params[param] = _trsl(params[param].strip())
integers = [
'port',
'log_max_size',
'log_backup_cnt',
]
for param in integers:
params[param] = int(params[param])
if isinstance(params['time_format'], str):
params['time_format'] = [params['time_format']]
menu = params['menu']
for item in menu:
item['href'] = item['href'].strip() if 'href' in item else ''
item['title'] = item['title'].strip() if 'title' in item else ''
params['verbose'] = params['verbose'] or const.ENV_VERBOSE in os.environ
return params
def _expand(rel_path):
"""Expands relative path using configuration file location as base
directory. Absolute pathes will be returned as is."""
path = os.path.expandvars(os.path.expanduser(rel_path))
if not os.path.isabs(path):
base = os.path.dirname(os.path.abspath(_path))
path = os.path.join(base, path)
seps = os.path.sep + (os.path.altsep or '')
return path.rstrip(seps)
def _trsl(url):
"""Guarantees the URL have a single trailing slash."""
return url.rstrip('/') + '/'
|
bsd-3-clause
| 8,217,475,228,982,800,000 | 2,682,505,168,928,747,000 | 27.845133 | 76 | 0.619574 | false |
midma101/m0du1ar
|
.venv/lib/python2.7/site-packages/pip/vcs/mercurial.py
|
280
|
4974
|
from __future__ import absolute_import
import logging
import os
import tempfile
import re
from pip.utils import display_path, rmtree
from pip.vcs import vcs, VersionControl
from pip.download import path_to_url
from pip._vendor.six.moves import configparser
logger = logging.getLogger(__name__)
class Mercurial(VersionControl):
name = 'hg'
dirname = '.hg'
repo_name = 'clone'
schemes = ('hg', 'hg+http', 'hg+https', 'hg+ssh', 'hg+static-http')
def export(self, location):
"""Export the Hg repository at the url to the destination location"""
temp_dir = tempfile.mkdtemp('-export', 'pip-')
self.unpack(temp_dir)
try:
self.run_command(
['archive', location], show_stdout=False, cwd=temp_dir)
finally:
rmtree(temp_dir)
def switch(self, dest, url, rev_options):
repo_config = os.path.join(dest, self.dirname, 'hgrc')
config = configparser.SafeConfigParser()
try:
config.read(repo_config)
config.set('paths', 'default', url)
with open(repo_config, 'w') as config_file:
config.write(config_file)
except (OSError, configparser.NoSectionError) as exc:
logger.warning(
'Could not switch Mercurial repository to %s: %s', url, exc,
)
else:
self.run_command(['update', '-q'] + rev_options, cwd=dest)
def update(self, dest, rev_options):
self.run_command(['pull', '-q'], cwd=dest)
self.run_command(['update', '-q'] + rev_options, cwd=dest)
def obtain(self, dest):
url, rev = self.get_url_rev()
if rev:
rev_options = [rev]
rev_display = ' (to revision %s)' % rev
else:
rev_options = []
rev_display = ''
if self.check_destination(dest, url, rev_options, rev_display):
logger.info(
'Cloning hg %s%s to %s',
url,
rev_display,
display_path(dest),
)
self.run_command(['clone', '--noupdate', '-q', url, dest])
self.run_command(['update', '-q'] + rev_options, cwd=dest)
def get_url(self, location):
url = self.run_command(
['showconfig', 'paths.default'],
show_stdout=False, cwd=location).strip()
if self._is_local_repository(url):
url = path_to_url(url)
return url.strip()
def get_tag_revs(self, location):
tags = self.run_command(['tags'], show_stdout=False, cwd=location)
tag_revs = []
for line in tags.splitlines():
tags_match = re.search(r'([\w\d\.-]+)\s*([\d]+):.*$', line)
if tags_match:
tag = tags_match.group(1)
rev = tags_match.group(2)
if "tip" != tag:
tag_revs.append((rev.strip(), tag.strip()))
return dict(tag_revs)
def get_branch_revs(self, location):
branches = self.run_command(
['branches'], show_stdout=False, cwd=location)
branch_revs = []
for line in branches.splitlines():
branches_match = re.search(r'([\w\d\.-]+)\s*([\d]+):.*$', line)
if branches_match:
branch = branches_match.group(1)
rev = branches_match.group(2)
if "default" != branch:
branch_revs.append((rev.strip(), branch.strip()))
return dict(branch_revs)
def get_revision(self, location):
current_revision = self.run_command(
['parents', '--template={rev}'],
show_stdout=False, cwd=location).strip()
return current_revision
def get_revision_hash(self, location):
current_rev_hash = self.run_command(
['parents', '--template={node}'],
show_stdout=False, cwd=location).strip()
return current_rev_hash
def get_src_requirement(self, dist, location, find_tags):
repo = self.get_url(location)
if not repo.lower().startswith('hg:'):
repo = 'hg+' + repo
egg_project_name = dist.egg_name().split('-', 1)[0]
if not repo:
return None
current_rev = self.get_revision(location)
current_rev_hash = self.get_revision_hash(location)
tag_revs = self.get_tag_revs(location)
branch_revs = self.get_branch_revs(location)
if current_rev in tag_revs:
# It's a tag
full_egg_name = '%s-%s' % (egg_project_name, tag_revs[current_rev])
elif current_rev in branch_revs:
# It's the tip of a branch
full_egg_name = '%s-%s' % (
egg_project_name,
branch_revs[current_rev],
)
else:
full_egg_name = '%s-dev' % egg_project_name
return '%s@%s#egg=%s' % (repo, current_rev_hash, full_egg_name)
vcs.register(Mercurial)
|
mit
| -1,819,275,402,233,144,300 | -1,627,596,356,964,442,400 | 34.784173 | 79 | 0.543024 | false |
lmazuel/azure-sdk-for-python
|
azure-mgmt-commerce/azure/mgmt/commerce/models/recurring_charge.py
|
1
|
1457
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .offer_term_info import OfferTermInfo
class RecurringCharge(OfferTermInfo):
"""Indicates a recurring charge is present for this offer.
:param effective_date: Indicates the date from which the offer term is
effective.
:type effective_date: datetime
:param name: Constant filled by server.
:type name: str
:param recurring_charge: The amount of recurring charge as per the offer
term.
:type recurring_charge: int
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'effective_date': {'key': 'EffectiveDate', 'type': 'iso-8601'},
'name': {'key': 'Name', 'type': 'str'},
'recurring_charge': {'key': 'RecurringCharge', 'type': 'int'},
}
def __init__(self, effective_date=None, recurring_charge=None):
super(RecurringCharge, self).__init__(effective_date=effective_date)
self.recurring_charge = recurring_charge
self.name = 'Recurring Charge'
|
mit
| 366,044,002,688,673,500 | 7,686,794,068,643,346,000 | 34.536585 | 76 | 0.599176 | false |
js850/PyGMIN
|
examples/gui/NewLJ.py
|
1
|
3256
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'NewLJ.ui'
#
# Created: Thu May 10 03:10:06 2012
# by: PyQt4 UI code generator 4.8.6
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_DialogLJSetup(object):
def setupUi(self, DialogLJSetup):
DialogLJSetup.setObjectName(_fromUtf8("DialogLJSetup"))
DialogLJSetup.resize(349, 144)
DialogLJSetup.setWindowTitle(QtGui.QApplication.translate("DialogLJSetup", "Create new Lennard-Jones system", None, QtGui.QApplication.UnicodeUTF8))
DialogLJSetup.setModal(True)
self.buttonBox = QtGui.QDialogButtonBox(DialogLJSetup)
self.buttonBox.setGeometry(QtCore.QRect(20, 100, 301, 32))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.gridLayoutWidget = QtGui.QWidget(DialogLJSetup)
self.gridLayoutWidget.setGeometry(QtCore.QRect(20, 20, 301, 61))
self.gridLayoutWidget.setObjectName(_fromUtf8("gridLayoutWidget"))
self.gridLayout = QtGui.QGridLayout(self.gridLayoutWidget)
self.gridLayout.setMargin(0)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.label_2 = QtGui.QLabel(self.gridLayoutWidget)
self.label_2.setText(QtGui.QApplication.translate("DialogLJSetup", "Number of minima to save", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayout.addWidget(self.label_2, 2, 0, 1, 1)
self.lineNatoms = QtGui.QLineEdit(self.gridLayoutWidget)
self.lineNatoms.setInputMask(_fromUtf8(""))
self.lineNatoms.setText(QtGui.QApplication.translate("DialogLJSetup", "13", None, QtGui.QApplication.UnicodeUTF8))
self.lineNatoms.setObjectName(_fromUtf8("lineNatoms"))
self.gridLayout.addWidget(self.lineNatoms, 1, 1, 1, 1)
self.lineNsave = QtGui.QLineEdit(self.gridLayoutWidget)
self.lineNsave.setInputMask(QtGui.QApplication.translate("DialogLJSetup", "999; ", None, QtGui.QApplication.UnicodeUTF8))
self.lineNsave.setText(QtGui.QApplication.translate("DialogLJSetup", "50", None, QtGui.QApplication.UnicodeUTF8))
self.lineNsave.setObjectName(_fromUtf8("lineNsave"))
self.gridLayout.addWidget(self.lineNsave, 2, 1, 1, 1)
self.label = QtGui.QLabel(self.gridLayoutWidget)
self.label.setText(QtGui.QApplication.translate("DialogLJSetup", "Number of particles", None, QtGui.QApplication.UnicodeUTF8))
self.label.setObjectName(_fromUtf8("label"))
self.gridLayout.addWidget(self.label, 1, 0, 1, 1)
self.retranslateUi(DialogLJSetup)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), DialogLJSetup.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), DialogLJSetup.reject)
QtCore.QMetaObject.connectSlotsByName(DialogLJSetup)
def retranslateUi(self, DialogLJSetup):
pass
|
gpl-3.0
| -4,320,922,121,054,890,000 | 1,799,615,470,851,129,900 | 53.266667 | 156 | 0.722052 | false |
AlexStarov/Shop
|
applications/sms_ussd/migrations/0001_initial.py
|
1
|
2850
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='SendSMS',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sessionid', models.CharField(max_length=32, null=True, verbose_name='SessionID', blank=True)),
('task_id', models.CharField(max_length=255, null=True, verbose_name='task id', blank=True)),
('send', models.BooleanField(default=False, verbose_name='\u041e\u0442\u043f\u0440\u0430\u0432\u043b\u0435\u043d\u043e')),
('code', models.PositiveSmallIntegerField(blank=True, null=True, verbose_name='\u041a\u043e\u0434 \u043f\u0440\u043e\u0432\u0430\u0439\u0434\u0435\u0440\u0430', choices=[(39, b'\xd0\x9a\xd0\xb8\xd0\xb5\xd0\xb2\xd1\x81\xd1\x82\xd0\xb0\xd1\x80 (Golden Telecom)'), (50, b'Vodafone'), (63, b'Life:)'), (66, b'Vodafone'), (67, b'\xd0\x9a\xd0\xb8\xd0\xb5\xd0\xb2\xd1\x81\xd1\x82\xd0\xb0\xd1\x80'), (68, b'\xd0\x9a\xd0\xb8\xd0\xb5\xd0\xb2\xd1\x81\xd1\x82\xd0\xb0\xd1\x80 (Beeline)'), (91, b'Utel'), (92, b'PEOPLEnet'), (93, b'Life:)'), (94, b'\xd0\x98\xd0\xbd\xd1\x82\xd0\xb5\xd1\x80\xd1\x82\xd0\xb5\xd0\xbb\xd0\xb5\xd0\xba\xd0\xbe\xd0\xbc'), (95, b'Vodafone'), (96, b'\xd0\x9a\xd0\xb8\xd0\xb5\xd0\xb2\xd1\x81\xd1\x82\xd0\xb0\xd1\x80'), (97, b'\xd0\x9a\xd0\xb8\xd0\xb5\xd0\xb2\xd1\x81\xd1\x82\xd0\xb0\xd1\x80'), (98, b'\xd0\x9a\xd0\xb8\xd0\xb5\xd0\xb2\xd1\x81\xd1\x82\xd0\xb0\xd1\x80'), (99, b'Vodafone')])),
('phone', models.CharField(max_length=7, null=True, verbose_name='\u0422\u0435\u043b\u0435\u0444\u043e\u043d', blank=True)),
('message', models.TextField(null=True, verbose_name='\u0421\u043e\u043e\u0431\u0449\u0435\u043d\u0438\u0435', blank=True)),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='\u0414\u0430\u0442\u0430 \u0441\u043e\u0437\u0434\u0430\u043d\u0438\u044f', null=True)),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='\u0414\u0430\u0442\u0430 \u043e\u0431\u043d\u043e\u0432\u043b\u0435\u043d\u0438\u044f', null=True)),
('user', models.ForeignKey(verbose_name='\u041f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u044c', blank=True, to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'ordering': ['-created_at'],
'db_table': 'SMS_USSD_SendSMS',
'verbose_name': 'SendSMS',
'verbose_name_plural': 'SendSMS',
},
),
]
|
apache-2.0
| 9,100,890,636,956,477,000 | -5,348,395,205,430,188,000 | 78.166667 | 917 | 0.638947 | false |
linglung/ytdl
|
youtube_dl/extractor/dw.py
|
84
|
4098
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none,
unified_strdate,
)
from ..compat import compat_urlparse
class DWIE(InfoExtractor):
IE_NAME = 'dw'
_VALID_URL = r'https?://(?:www\.)?dw\.com/(?:[^/]+/)+(?:av|e)-(?P<id>\d+)'
_TESTS = [{
# video
'url': 'http://www.dw.com/en/intelligent-light/av-19112290',
'md5': '7372046e1815c5a534b43f3c3c36e6e9',
'info_dict': {
'id': '19112290',
'ext': 'mp4',
'title': 'Intelligent light',
'description': 'md5:90e00d5881719f2a6a5827cb74985af1',
'upload_date': '20160311',
}
}, {
# audio
'url': 'http://www.dw.com/en/worldlink-my-business/av-19111941',
'md5': '2814c9a1321c3a51f8a7aeb067a360dd',
'info_dict': {
'id': '19111941',
'ext': 'mp3',
'title': 'WorldLink: My business',
'description': 'md5:bc9ca6e4e063361e21c920c53af12405',
'upload_date': '20160311',
}
}, {
# DW documentaries, only last for one or two weeks
'url': 'http://www.dw.com/en/documentaries-welcome-to-the-90s-2016-05-21/e-19220158-9798',
'md5': '56b6214ef463bfb9a3b71aeb886f3cf1',
'info_dict': {
'id': '19274438',
'ext': 'mp4',
'title': 'Welcome to the 90s – Hip Hop',
'description': 'Welcome to the 90s - The Golden Decade of Hip Hop',
'upload_date': '20160521',
},
'skip': 'Video removed',
}]
def _real_extract(self, url):
media_id = self._match_id(url)
webpage = self._download_webpage(url, media_id)
hidden_inputs = self._hidden_inputs(webpage)
title = hidden_inputs['media_title']
media_id = hidden_inputs.get('media_id') or media_id
if hidden_inputs.get('player_type') == 'video' and hidden_inputs.get('stream_file') == '1':
formats = self._extract_smil_formats(
'http://www.dw.com/smil/v-%s' % media_id, media_id,
transform_source=lambda s: s.replace(
'rtmp://tv-od.dw.de/flash/',
'http://tv-download.dw.de/dwtv_video/flv/'))
self._sort_formats(formats)
else:
formats = [{'url': hidden_inputs['file_name']}]
upload_date = hidden_inputs.get('display_date')
if not upload_date:
upload_date = self._html_search_regex(
r'<span[^>]+class="date">([0-9.]+)\s*\|', webpage,
'upload date', default=None)
upload_date = unified_strdate(upload_date)
return {
'id': media_id,
'title': title,
'description': self._og_search_description(webpage),
'thumbnail': hidden_inputs.get('preview_image'),
'duration': int_or_none(hidden_inputs.get('file_duration')),
'upload_date': upload_date,
'formats': formats,
}
class DWArticleIE(InfoExtractor):
IE_NAME = 'dw:article'
_VALID_URL = r'https?://(?:www\.)?dw\.com/(?:[^/]+/)+a-(?P<id>\d+)'
_TEST = {
'url': 'http://www.dw.com/en/no-hope-limited-options-for-refugees-in-idomeni/a-19111009',
'md5': '8ca657f9d068bbef74d6fc38b97fc869',
'info_dict': {
'id': '19105868',
'ext': 'mp4',
'title': 'The harsh life of refugees in Idomeni',
'description': 'md5:196015cc7e48ebf474db9399420043c7',
'upload_date': '20160310',
}
}
def _real_extract(self, url):
article_id = self._match_id(url)
webpage = self._download_webpage(url, article_id)
hidden_inputs = self._hidden_inputs(webpage)
media_id = hidden_inputs['media_id']
media_path = self._search_regex(r'href="([^"]+av-%s)"\s+class="overlayLink"' % media_id, webpage, 'media url')
media_url = compat_urlparse.urljoin(url, media_path)
return self.url_result(media_url, 'DW', media_id)
|
unlicense
| -3,556,489,221,330,733,600 | -7,800,059,459,209,359,000 | 36.925926 | 118 | 0.543945 | false |
motion2015/a3
|
common/lib/xmodule/xmodule/modulestore/perf_tests/generate_report.py
|
194
|
10493
|
"""
Reads the data generated by performance tests and generates a savable
report which can be viewed over time to examine the performance effects of code changes on
various parts of the system.
"""
import sqlite3
from lxml.builder import E
import lxml.html
try:
import click
except ImportError:
click = None
DB_NAME = 'block_times.db'
class HTMLTable(object):
"""
Simple wrapper for an HTML table.
"""
def __init__(self, hdr_columns):
self.table = E.TABLE()
col_headers = [E.TH(x) for x in hdr_columns]
header_row = E.TR(*col_headers)
self.table.append(header_row)
def add_row(self, items):
"""Add row to table."""
row_items = [E.TD(x) for x in items]
self.table.append(E.TR(*row_items))
def tostring(self):
"""Output table HTML as string."""
return lxml.html.tostring(self.table)
@staticmethod
def style():
""" Return a hard-coded table style."""
return E.style("""
table, th, td {
border: 1px solid black;
border-collapse: collapse;
}
th, td {
padding: 5px;
}"""
) # pylint: disable=bad-continuation
class HTMLDocument(object):
"""
Simple wrapper for an entire HTML document.
"""
def __init__(self, title):
self.html = E.html(E.head(E.title(title), HTMLTable.style()))
self.body = E.body()
self.html.append(self.body)
def add_header(self, level, text):
"""Add a header to the document."""
func_name = "H{}".format(level)
self.body.append(getattr(E, func_name)(text))
def add_to_body(self, elem):
"""Add to document body."""
self.body.append(elem)
def tostring(self, pretty_print=False):
"""Output HTML document as string."""
return lxml.html.tostring(self.html, pretty_print=pretty_print)
class ReportGenerator(object):
"""
Base class for report generation.
"""
def __init__(self, db_name):
# Read data from all modulestore combos.
conn = sqlite3.connect(db_name)
conn.row_factory = sqlite3.Row
sel_sql = 'select id, run_id, block_desc, elapsed, timestamp FROM block_times ORDER BY run_id DESC'
cur = conn.cursor()
cur.execute(sel_sql)
self.all_rows = cur.fetchall()
class ImportExportReportGen(ReportGenerator):
"""
Class which generates report for course import/export performance test data.
"""
def __init__(self, db_name):
super(ImportExportReportGen, self).__init__(db_name)
self._read_timing_data()
def _read_timing_data(self):
"""
Read in the timing data from the sqlite DB and save into a dict.
"""
self.run_data = {}
self.all_modulestore_combos = set()
for row in self.all_rows:
time_taken = row[3]
# Split apart the description into its parts.
desc_parts = row[2].split(':')
modulestores = desc_parts[1]
self.all_modulestore_combos.add(modulestores)
amount_md = desc_parts[2]
test_phase = 'all'
if len(desc_parts) > 3:
test_phase = desc_parts[3]
# Save the data in a multi-level dict - { phase1: { amount1: {ms1->ms2: duration, ...}, ...}, ...}.
phase_data = self.run_data.setdefault(test_phase, {})
amount_data = phase_data.setdefault(amount_md, {})
__ = amount_data.setdefault(modulestores, time_taken)
def generate_html(self):
"""
Generate HTML.
"""
html = HTMLDocument("Results")
# Output comparison of each phase to a different table.
for phase in self.run_data.keys():
if phase in ('fake_assets',):
continue
per_phase = self.run_data[phase]
html.add_header(1, phase)
title_map = {
'duration': 'Total Duration (ms)',
'ratio': 'Total Duration Per Number of Assets (ms/asset)',
'variable_cost': 'Asset Export Duration Per Number of Assets (ms/asset)'
}
for table_type in ('duration', 'ratio', 'variable_cost'):
if phase == 'all' and table_type in ('ratio', 'variable_cost'):
continue
# Make the table header columns and the table.
columns = ["Asset Metadata Amount", ]
ms_keys = sorted(self.all_modulestore_combos)
for k in ms_keys:
columns.append("{} ({})".format(k, table_type))
phase_table = HTMLTable(columns)
# Make a row for each amount of asset metadata.
for amount in sorted(per_phase.keys()):
per_amount = per_phase[amount]
num_assets = int(amount)
row = [amount, ]
for modulestore in ms_keys:
if table_type == 'duration':
value = per_amount[modulestore]
elif table_type == 'ratio':
if num_assets != 0:
value = per_amount[modulestore] / float(amount)
else:
value = 0
elif table_type == 'variable_cost':
if num_assets == 0:
value = 0
else:
value = (per_amount[modulestore] - per_phase['0'][modulestore]) / float(amount)
row.append("{}".format(value))
phase_table.add_row(row)
# Add the table title and the table.
html.add_header(2, title_map[table_type])
html.add_to_body(phase_table.table)
return html
class FindReportGen(ReportGenerator):
"""
Class which generates report for asset access performance test data.
"""
def __init__(self, db_name):
super(FindReportGen, self).__init__(db_name)
self._read_timing_data()
def _read_timing_data(self):
"""
Read in the timing data from the sqlite DB and save into a dict.
"""
self.run_data = {}
self.all_modulestores = set()
for row in self.all_rows:
time_taken = row[3]
# Split apart the description into its parts.
desc_parts = row[2].split(':')
if desc_parts[0] != 'FindAssetTest':
continue
modulestore, amount_md = desc_parts[1:3]
self.all_modulestores.add(modulestore)
test_phase = 'all'
sort = None
if len(desc_parts) >= 4:
test_phase = desc_parts[3]
if len(desc_parts) >= 5:
sort = desc_parts[4]
# Save the data in a multi-level dict:
# { phase1: { [sort1: {] amount1: { modulestore1: duration, ...}, ...}, ...}.
phase_data = self.run_data.setdefault(test_phase, {})
if test_phase == 'get_asset_list':
# Add a level here for the sort.
phase_data = phase_data.setdefault(sort, {})
amount_data = phase_data.setdefault(amount_md, {})
__ = amount_data.setdefault(modulestore, time_taken)
def generate_html(self):
"""
Generate HTML.
"""
html = HTMLDocument("Results")
# Output comparison of each phase to a different table.
# for store in self.run_data.keys():
# per_phase = self.run_data[store]
# html.add_header(1, store)
for phase in self.run_data.keys():
per_phase = self.run_data[phase]
# Make the table header columns and the table.
columns = ["Asset Metadata Amount", ]
ms_keys = sorted(self.all_modulestores)
for k in ms_keys:
columns.append("Time Taken (ms) ({})".format(k))
phase_table = HTMLTable(columns)
if phase != 'get_asset_list':
for amount in sorted(per_phase.keys()):
per_amount = per_phase[amount]
row = [amount, ]
for modulestore in ms_keys:
time_taken = per_amount[modulestore]
row.append("{}".format(time_taken))
phase_table.add_row(row)
html.add_header(2, phase)
html.add_to_body(phase_table.table)
else:
# get_asset_list phase includes the sort as well.
html.add_header(2, phase)
for sort in per_phase.keys():
sort_table = HTMLTable(columns)
per_sort = per_phase[sort]
for amount in sorted(per_sort.keys()):
per_amount = per_sort[amount]
row = [amount, ]
for modulestore in ms_keys:
# Each sort has two different ranges retrieved.
time_taken = per_amount[modulestore] / 2.0
row.append("{}".format(time_taken))
sort_table.add_row(row)
html.add_header(3, sort)
html.add_to_body(sort_table.table)
return html
if click is not None:
@click.command()
@click.argument('outfile', type=click.File('w'), default='-', required=False)
@click.option('--db_name', help='Name of sqlite database from which to read data.', default=DB_NAME)
@click.option('--data_type', help='Data type to process. One of: "imp_exp" or "find"', default="find")
def cli(outfile, db_name, data_type):
"""
Generate an HTML report from the sqlite timing data.
"""
if data_type == 'imp_exp':
ie_gen = ImportExportReportGen(db_name)
html = ie_gen.generate_html()
elif data_type == 'find':
f_gen = FindReportGen(db_name)
html = f_gen.generate_html()
click.echo(html.tostring(), file=outfile)
if __name__ == '__main__':
if click is not None:
cli() # pylint: disable=no-value-for-parameter
else:
print "Aborted! Module 'click' is not installed."
|
agpl-3.0
| 2,937,351,889,032,428,500 | -2,106,563,972,820,971,800 | 35.307958 | 111 | 0.521491 | false |
EKiefer/edge-starter
|
py34env/Lib/site-packages/pip/_vendor/packaging/specifiers.py
|
26
|
27825
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import abc
import functools
import itertools
import re
from ._compat import string_types, with_metaclass
from .version import Version, LegacyVersion, parse
class InvalidSpecifier(ValueError):
"""
An invalid specifier was found, users should refer to PEP 440.
"""
class BaseSpecifier(with_metaclass(abc.ABCMeta, object)):
@abc.abstractmethod
def __str__(self):
"""
Returns the str representation of this Specifier like object. This
should be representative of the Specifier itself.
"""
@abc.abstractmethod
def __hash__(self):
"""
Returns a hash value for this Specifier like object.
"""
@abc.abstractmethod
def __eq__(self, other):
"""
Returns a boolean representing whether or not the two Specifier like
objects are equal.
"""
@abc.abstractmethod
def __ne__(self, other):
"""
Returns a boolean representing whether or not the two Specifier like
objects are not equal.
"""
@abc.abstractproperty
def prereleases(self):
"""
Returns whether or not pre-releases as a whole are allowed by this
specifier.
"""
@prereleases.setter
def prereleases(self, value):
"""
Sets whether or not pre-releases as a whole are allowed by this
specifier.
"""
@abc.abstractmethod
def contains(self, item, prereleases=None):
"""
Determines if the given item is contained within this specifier.
"""
@abc.abstractmethod
def filter(self, iterable, prereleases=None):
"""
Takes an iterable of items and filters them so that only items which
are contained within this specifier are allowed in it.
"""
class _IndividualSpecifier(BaseSpecifier):
_operators = {}
def __init__(self, spec="", prereleases=None):
match = self._regex.search(spec)
if not match:
raise InvalidSpecifier("Invalid specifier: '{0}'".format(spec))
self._spec = (
match.group("operator").strip(),
match.group("version").strip(),
)
# Store whether or not this Specifier should accept prereleases
self._prereleases = prereleases
def __repr__(self):
pre = (
", prereleases={0!r}".format(self.prereleases)
if self._prereleases is not None
else ""
)
return "<{0}({1!r}{2})>".format(
self.__class__.__name__,
str(self),
pre,
)
def __str__(self):
return "{0}{1}".format(*self._spec)
def __hash__(self):
return hash(self._spec)
def __eq__(self, other):
if isinstance(other, string_types):
try:
other = self.__class__(other)
except InvalidSpecifier:
return NotImplemented
elif not isinstance(other, self.__class__):
return NotImplemented
return self._spec == other._spec
def __ne__(self, other):
if isinstance(other, string_types):
try:
other = self.__class__(other)
except InvalidSpecifier:
return NotImplemented
elif not isinstance(other, self.__class__):
return NotImplemented
return self._spec != other._spec
def _get_operator(self, op):
return getattr(self, "_compare_{0}".format(self._operators[op]))
def _coerce_version(self, version):
if not isinstance(version, (LegacyVersion, Version)):
version = parse(version)
return version
@property
def operator(self):
return self._spec[0]
@property
def version(self):
return self._spec[1]
@property
def prereleases(self):
return self._prereleases
@prereleases.setter
def prereleases(self, value):
self._prereleases = value
def __contains__(self, item):
return self.contains(item)
def contains(self, item, prereleases=None):
# Determine if prereleases are to be allowed or not.
if prereleases is None:
prereleases = self.prereleases
# Normalize item to a Version or LegacyVersion, this allows us to have
# a shortcut for ``"2.0" in Specifier(">=2")
item = self._coerce_version(item)
# Determine if we should be supporting prereleases in this specifier
# or not, if we do not support prereleases than we can short circuit
# logic if this version is a prereleases.
if item.is_prerelease and not prereleases:
return False
# Actually do the comparison to determine if this item is contained
# within this Specifier or not.
return self._get_operator(self.operator)(item, self.version)
def filter(self, iterable, prereleases=None):
yielded = False
found_prereleases = []
kw = {"prereleases": prereleases if prereleases is not None else True}
# Attempt to iterate over all the values in the iterable and if any of
# them match, yield them.
for version in iterable:
parsed_version = self._coerce_version(version)
if self.contains(parsed_version, **kw):
# If our version is a prerelease, and we were not set to allow
# prereleases, then we'll store it for later incase nothing
# else matches this specifier.
if (parsed_version.is_prerelease and not
(prereleases or self.prereleases)):
found_prereleases.append(version)
# Either this is not a prerelease, or we should have been
# accepting prereleases from the begining.
else:
yielded = True
yield version
# Now that we've iterated over everything, determine if we've yielded
# any values, and if we have not and we have any prereleases stored up
# then we will go ahead and yield the prereleases.
if not yielded and found_prereleases:
for version in found_prereleases:
yield version
class LegacySpecifier(_IndividualSpecifier):
_regex = re.compile(
r"""
^
\s*
(?P<operator>(==|!=|<=|>=|<|>))
\s*
(?P<version>
[^\s]* # We just match everything, except for whitespace since this
# is a "legacy" specifier and the version string can be just
# about anything.
)
\s*
$
""",
re.VERBOSE | re.IGNORECASE,
)
_operators = {
"==": "equal",
"!=": "not_equal",
"<=": "less_than_equal",
">=": "greater_than_equal",
"<": "less_than",
">": "greater_than",
}
def _coerce_version(self, version):
if not isinstance(version, LegacyVersion):
version = LegacyVersion(str(version))
return version
def _compare_equal(self, prospective, spec):
return prospective == self._coerce_version(spec)
def _compare_not_equal(self, prospective, spec):
return prospective != self._coerce_version(spec)
def _compare_less_than_equal(self, prospective, spec):
return prospective <= self._coerce_version(spec)
def _compare_greater_than_equal(self, prospective, spec):
return prospective >= self._coerce_version(spec)
def _compare_less_than(self, prospective, spec):
return prospective < self._coerce_version(spec)
def _compare_greater_than(self, prospective, spec):
return prospective > self._coerce_version(spec)
def _require_version_compare(fn):
@functools.wraps(fn)
def wrapped(self, prospective, spec):
if not isinstance(prospective, Version):
return False
return fn(self, prospective, spec)
return wrapped
class Specifier(_IndividualSpecifier):
_regex = re.compile(
r"""
^
\s*
(?P<operator>(~=|==|!=|<=|>=|<|>|===))
(?P<version>
(?:
# The identity operators allow for an escape hatch that will
# do an exact string match of the version you wish to install.
# This will not be parsed by PEP 440 and we cannot determine
# any semantic meaning from it. This operator is discouraged
# but included entirely as an escape hatch.
(?<====) # Only match for the identity operator
\s*
[^\s]* # We just match everything, except for whitespace
# since we are only testing for strict identity.
)
|
(?:
# The (non)equality operators allow for wild card and local
# versions to be specified so we have to define these two
# operators separately to enable that.
(?<===|!=) # Only match for equals and not equals
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)* # release
(?: # pre release
[-_\.]?
(a|b|c|rc|alpha|beta|pre|preview)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
# You cannot use a wild card and a dev or local version
# together so group them with a | and make them optional.
(?:
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
(?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local
|
\.\* # Wild card syntax of .*
)?
)
|
(?:
# The compatible operator requires at least two digits in the
# release segment.
(?<=~=) # Only match for the compatible operator
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *)
(?: # pre release
[-_\.]?
(a|b|c|rc|alpha|beta|pre|preview)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
)
|
(?:
# All other operators only allow a sub set of what the
# (non)equality operators do. Specifically they do not allow
# local versions to be specified nor do they allow the prefix
# matching wild cards.
(?<!==|!=|~=) # We have special cases for these
# operators so we want to make sure they
# don't match here.
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)* # release
(?: # pre release
[-_\.]?
(a|b|c|rc|alpha|beta|pre|preview)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
)
)
\s*
$
""",
re.VERBOSE | re.IGNORECASE,
)
_operators = {
"~=": "compatible",
"==": "equal",
"!=": "not_equal",
"<=": "less_than_equal",
">=": "greater_than_equal",
"<": "less_than",
">": "greater_than",
"===": "arbitrary",
}
@_require_version_compare
def _compare_compatible(self, prospective, spec):
# Compatible releases have an equivalent combination of >= and ==. That
# is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to
# implement this in terms of the other specifiers instead of
# implementing it ourselves. The only thing we need to do is construct
# the other specifiers.
# We want everything but the last item in the version, but we want to
# ignore post and dev releases and we want to treat the pre-release as
# it's own separate segment.
prefix = ".".join(
list(
itertools.takewhile(
lambda x: (not x.startswith("post") and not
x.startswith("dev")),
_version_split(spec),
)
)[:-1]
)
# Add the prefix notation to the end of our string
prefix += ".*"
return (self._get_operator(">=")(prospective, spec) and
self._get_operator("==")(prospective, prefix))
@_require_version_compare
def _compare_equal(self, prospective, spec):
# We need special logic to handle prefix matching
if spec.endswith(".*"):
# In the case of prefix matching we want to ignore local segment.
prospective = Version(prospective.public)
# Split the spec out by dots, and pretend that there is an implicit
# dot in between a release segment and a pre-release segment.
spec = _version_split(spec[:-2]) # Remove the trailing .*
# Split the prospective version out by dots, and pretend that there
# is an implicit dot in between a release segment and a pre-release
# segment.
prospective = _version_split(str(prospective))
# Shorten the prospective version to be the same length as the spec
# so that we can determine if the specifier is a prefix of the
# prospective version or not.
prospective = prospective[:len(spec)]
# Pad out our two sides with zeros so that they both equal the same
# length.
spec, prospective = _pad_version(spec, prospective)
else:
# Convert our spec string into a Version
spec = Version(spec)
# If the specifier does not have a local segment, then we want to
# act as if the prospective version also does not have a local
# segment.
if not spec.local:
prospective = Version(prospective.public)
return prospective == spec
@_require_version_compare
def _compare_not_equal(self, prospective, spec):
return not self._compare_equal(prospective, spec)
@_require_version_compare
def _compare_less_than_equal(self, prospective, spec):
return prospective <= Version(spec)
@_require_version_compare
def _compare_greater_than_equal(self, prospective, spec):
return prospective >= Version(spec)
@_require_version_compare
def _compare_less_than(self, prospective, spec):
# Convert our spec to a Version instance, since we'll want to work with
# it as a version.
spec = Version(spec)
# Check to see if the prospective version is less than the spec
# version. If it's not we can short circuit and just return False now
# instead of doing extra unneeded work.
if not prospective < spec:
return False
# This special case is here so that, unless the specifier itself
# includes is a pre-release version, that we do not accept pre-release
# versions for the version mentioned in the specifier (e.g. <3.1 should
# not match 3.1.dev0, but should match 3.0.dev0).
if not spec.is_prerelease and prospective.is_prerelease:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# If we've gotten to here, it means that prospective version is both
# less than the spec version *and* it's not a pre-release of the same
# version in the spec.
return True
@_require_version_compare
def _compare_greater_than(self, prospective, spec):
# Convert our spec to a Version instance, since we'll want to work with
# it as a version.
spec = Version(spec)
# Check to see if the prospective version is greater than the spec
# version. If it's not we can short circuit and just return False now
# instead of doing extra unneeded work.
if not prospective > spec:
return False
# This special case is here so that, unless the specifier itself
# includes is a post-release version, that we do not accept
# post-release versions for the version mentioned in the specifier
# (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0).
if not spec.is_postrelease and prospective.is_postrelease:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# Ensure that we do not allow a local version of the version mentioned
# in the specifier, which is techincally greater than, to match.
if prospective.local is not None:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# If we've gotten to here, it means that prospective version is both
# greater than the spec version *and* it's not a pre-release of the
# same version in the spec.
return True
def _compare_arbitrary(self, prospective, spec):
return str(prospective).lower() == str(spec).lower()
@property
def prereleases(self):
# If there is an explicit prereleases set for this, then we'll just
# blindly use that.
if self._prereleases is not None:
return self._prereleases
# Look at all of our specifiers and determine if they are inclusive
# operators, and if they are if they are including an explicit
# prerelease.
operator, version = self._spec
if operator in ["==", ">=", "<=", "~=", "==="]:
# The == specifier can include a trailing .*, if it does we
# want to remove before parsing.
if operator == "==" and version.endswith(".*"):
version = version[:-2]
# Parse the version, and if it is a pre-release than this
# specifier allows pre-releases.
if parse(version).is_prerelease:
return True
return False
@prereleases.setter
def prereleases(self, value):
self._prereleases = value
_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$")
def _version_split(version):
result = []
for item in version.split("."):
match = _prefix_regex.search(item)
if match:
result.extend(match.groups())
else:
result.append(item)
return result
def _pad_version(left, right):
left_split, right_split = [], []
# Get the release segment of our versions
left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left)))
right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right)))
# Get the rest of our versions
left_split.append(left[len(left_split[0]):])
right_split.append(right[len(right_split[0]):])
# Insert our padding
left_split.insert(
1,
["0"] * max(0, len(right_split[0]) - len(left_split[0])),
)
right_split.insert(
1,
["0"] * max(0, len(left_split[0]) - len(right_split[0])),
)
return (
list(itertools.chain(*left_split)),
list(itertools.chain(*right_split)),
)
class SpecifierSet(BaseSpecifier):
def __init__(self, specifiers="", prereleases=None):
# Split on , to break each indidivual specifier into it's own item, and
# strip each item to remove leading/trailing whitespace.
specifiers = [s.strip() for s in specifiers.split(",") if s.strip()]
# Parsed each individual specifier, attempting first to make it a
# Specifier and falling back to a LegacySpecifier.
parsed = set()
for specifier in specifiers:
try:
parsed.add(Specifier(specifier))
except InvalidSpecifier:
parsed.add(LegacySpecifier(specifier))
# Turn our parsed specifiers into a frozen set and save them for later.
self._specs = frozenset(parsed)
# Store our prereleases value so we can use it later to determine if
# we accept prereleases or not.
self._prereleases = prereleases
def __repr__(self):
pre = (
", prereleases={0!r}".format(self.prereleases)
if self._prereleases is not None
else ""
)
return "<SpecifierSet({0!r}{1})>".format(str(self), pre)
def __str__(self):
return ",".join(sorted(str(s) for s in self._specs))
def __hash__(self):
return hash(self._specs)
def __and__(self, other):
if isinstance(other, string_types):
other = SpecifierSet(other)
elif not isinstance(other, SpecifierSet):
return NotImplemented
specifier = SpecifierSet()
specifier._specs = frozenset(self._specs | other._specs)
if self._prereleases is None and other._prereleases is not None:
specifier._prereleases = other._prereleases
elif self._prereleases is not None and other._prereleases is None:
specifier._prereleases = self._prereleases
elif self._prereleases == other._prereleases:
specifier._prereleases = self._prereleases
else:
raise ValueError(
"Cannot combine SpecifierSets with True and False prerelease "
"overrides."
)
return specifier
def __eq__(self, other):
if isinstance(other, string_types):
other = SpecifierSet(other)
elif isinstance(other, _IndividualSpecifier):
other = SpecifierSet(str(other))
elif not isinstance(other, SpecifierSet):
return NotImplemented
return self._specs == other._specs
def __ne__(self, other):
if isinstance(other, string_types):
other = SpecifierSet(other)
elif isinstance(other, _IndividualSpecifier):
other = SpecifierSet(str(other))
elif not isinstance(other, SpecifierSet):
return NotImplemented
return self._specs != other._specs
def __len__(self):
return len(self._specs)
def __iter__(self):
return iter(self._specs)
@property
def prereleases(self):
# If we have been given an explicit prerelease modifier, then we'll
# pass that through here.
if self._prereleases is not None:
return self._prereleases
# If we don't have any specifiers, and we don't have a forced value,
# then we'll just return None since we don't know if this should have
# pre-releases or not.
if not self._specs:
return None
# Otherwise we'll see if any of the given specifiers accept
# prereleases, if any of them do we'll return True, otherwise False.
return any(s.prereleases for s in self._specs)
@prereleases.setter
def prereleases(self, value):
self._prereleases = value
def __contains__(self, item):
return self.contains(item)
def contains(self, item, prereleases=None):
# Ensure that our item is a Version or LegacyVersion instance.
if not isinstance(item, (LegacyVersion, Version)):
item = parse(item)
# Determine if we're forcing a prerelease or not, if we're not forcing
# one for this particular filter call, then we'll use whatever the
# SpecifierSet thinks for whether or not we should support prereleases.
if prereleases is None:
prereleases = self.prereleases
# We can determine if we're going to allow pre-releases by looking to
# see if any of the underlying items supports them. If none of them do
# and this item is a pre-release then we do not allow it and we can
# short circuit that here.
# Note: This means that 1.0.dev1 would not be contained in something
# like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0
if not prereleases and item.is_prerelease:
return False
# We simply dispatch to the underlying specs here to make sure that the
# given version is contained within all of them.
# Note: This use of all() here means that an empty set of specifiers
# will always return True, this is an explicit design decision.
return all(
s.contains(item, prereleases=prereleases)
for s in self._specs
)
def filter(self, iterable, prereleases=None):
# Determine if we're forcing a prerelease or not, if we're not forcing
# one for this particular filter call, then we'll use whatever the
# SpecifierSet thinks for whether or not we should support prereleases.
if prereleases is None:
prereleases = self.prereleases
# If we have any specifiers, then we want to wrap our iterable in the
# filter method for each one, this will act as a logical AND amongst
# each specifier.
if self._specs:
for spec in self._specs:
iterable = spec.filter(iterable, prereleases=bool(prereleases))
return iterable
# If we do not have any specifiers, then we need to have a rough filter
# which will filter out any pre-releases, unless there are no final
# releases, and which will filter out LegacyVersion in general.
else:
filtered = []
found_prereleases = []
for item in iterable:
# Ensure that we some kind of Version class for this item.
if not isinstance(item, (LegacyVersion, Version)):
parsed_version = parse(item)
else:
parsed_version = item
# Filter out any item which is parsed as a LegacyVersion
if isinstance(parsed_version, LegacyVersion):
continue
# Store any item which is a pre-release for later unless we've
# already found a final version or we are accepting prereleases
if parsed_version.is_prerelease and not prereleases:
if not filtered:
found_prereleases.append(item)
else:
filtered.append(item)
# If we've found no items except for pre-releases, then we'll go
# ahead and use the pre-releases
if not filtered and found_prereleases and prereleases is None:
return found_prereleases
return filtered
|
mit
| -7,214,918,645,558,197,000 | -2,170,086,487,247,109,600 | 34.856959 | 79 | 0.560072 | false |
cctaylor/googleads-python-lib
|
examples/dfp/v201411/line_item_service/get_all_line_items.py
|
4
|
1764
|
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets all line items.
To create line items, run create_line_items.py."""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
line_item_service = client.GetService('LineItemService', version='v201411')
# Create a filter statement.
statement = dfp.FilterStatement()
# Get line items by statement.
while True:
response = line_item_service.getLineItemsByStatement(
statement.ToStatement())
if 'results' in response:
# Display results.
for line_item in response['results']:
print ('Line item with id \'%s\', belonging to order id \'%s\', and '
'named \'%s\' was found.' %
(line_item['id'], line_item['orderId'], line_item['name']))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
|
apache-2.0
| 8,019,001,752,225,954,000 | -6,769,257,238,438,736,000 | 31.666667 | 77 | 0.687642 | false |
740521985/nw.js
|
tools/aws_uploader.py
|
42
|
3658
|
#!/usr/bin/env python
import argparse
import boto
import datetime
import json
import os
import sys
import time
# Set timeout, for retry
#if not boto.config.has_section('Boto'):
# boto.config.add_section('Boto')
#boto.config.set('Boto','http_socket_timeout','30')
################################
# Parse command line args
parser = argparse.ArgumentParser(description='AWS uploader, please fill in your aws key and id in Boto config (~/.boto)')
parser.add_argument('-p','--path', help='Optional. Where to find the binaries, normally out/Release/dist', required=False)
parser.add_argument('-b','--buildername', help='Builder name, e.g. linux_32bit', required=True)
parser.add_argument('-r','--revision', help='Commit revision',required=True)
parser.add_argument('-n','--number', help='Build number', required=True)
parser.add_argument('-t','--bucket', help='AWS bucket name', required=True)
parser.add_argument('-d','--dlpath', help='AWS bucket path', required=True)
args = parser.parse_args()
################################
# Check and init variables
dist_dir = args.path
builder_name = args.buildername
got_revision = args.revision
build_number = args.number
bucket_name = args.bucket
dlpath = args.dlpath
date = datetime.date.today().strftime('%m-%d-%Y')
# If the binaries location is not given, calculate it from script related dir.
if dist_dir == None:
dist_dir = os.path.join(os.path.dirname(__file__),
os.pardir, os.pardir, os.pardir, 'out', 'Release')
dist_dir = os.path.join(dist_dir, 'dist')
if not os.path.isabs(dist_dir):
dist_dir = os.path.join(os.getcwd(), dist_dir)
if not os.path.isdir(dist_dir):
print 'Invalid path: ' + dist_dir
exit(-1)
dist_dir = os.path.normpath(dist_dir)
# it's for S3, so always use '/' here
#upload_path = ''.join(['/' + date,
# '/' + builder_name + '-build-' + build_number + '-' + got_revision])
upload_path = '/' + dlpath;
file_list = os.listdir(dist_dir)
if len(file_list) == 0:
print 'Cannot find packages!'
exit(-1)
# move node-webkit- to the top of the list.
for i in range(len(file_list)):
fname = file_list[i]
if fname.startswith('nwjs-v') or fname.startswith('nwjs-sdk-v'):
del file_list[i]
file_list.insert(0,fname)
break
def print_progress(transmitted, total):
print ' %d%% transferred of total: %d bytes.' % (transmitted*100/total, total)
sys.stdout.flush()
def aws_upload(upload_path, file_list):
conn = boto.connect_s3()
print 'Connecting to S3 ...'
sys.stdout.flush()
bucket = conn.get_bucket(bucket_name)
print 'Uploading to: ' + upload_path
for f in file_list:
print 'Uploading "' + f + '" ...'
sys.stdout.flush()
# use '/' for s3
path_prefix = ''
if (f == 'nw.lib' or f == 'nw.exp') :
if builder_name != 'nw13_win64' and builder_name != 'nw13_win32' :
continue
if builder_name == 'nw13_win64' :
path_prefix = 'x64'
if f.startswith('nw-headers') and builder_name != 'nw13_mac64' :
continue
if f.startswith('chromedriver') and 'sdk' not in builder_name :
continue
key = bucket.new_key(upload_path + '/' + path_prefix + '/' + f)
key.set_contents_from_filename(filename=os.path.join(dist_dir, f), cb=print_progress, num_cb=50, replace=True)
for retry in range(3):
try:
aws_upload(upload_path, file_list)
break
except Exception, e:
print e
sys.stdout.flush()
time.sleep(30) #wait for 30s and try again.
print 'Done.'
# vim: et:ts=4:sw=4
|
mit
| 7,686,492,956,085,801,000 | -6,058,719,833,610,682,000 | 31.371681 | 122 | 0.617824 | false |
shermanng10/superathletebuilder
|
env/lib/python2.7/site-packages/setuptools/compat.py
|
456
|
2094
|
import sys
import itertools
PY3 = sys.version_info >= (3,)
PY2 = not PY3
if PY2:
basestring = basestring
import __builtin__ as builtins
import ConfigParser
from StringIO import StringIO
BytesIO = StringIO
func_code = lambda o: o.func_code
func_globals = lambda o: o.func_globals
im_func = lambda o: o.im_func
from htmlentitydefs import name2codepoint
import httplib
from BaseHTTPServer import HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
from BaseHTTPServer import BaseHTTPRequestHandler
iteritems = lambda o: o.iteritems()
long_type = long
maxsize = sys.maxint
unichr = unichr
unicode = unicode
bytes = str
from urllib import url2pathname, splittag, pathname2url
import urllib2
from urllib2 import urlopen, HTTPError, URLError, unquote, splituser
from urlparse import urlparse, urlunparse, urljoin, urlsplit, urlunsplit
filterfalse = itertools.ifilterfalse
exec("""def reraise(tp, value, tb=None):
raise tp, value, tb""")
if PY3:
basestring = str
import builtins
import configparser as ConfigParser
from io import StringIO, BytesIO
func_code = lambda o: o.__code__
func_globals = lambda o: o.__globals__
im_func = lambda o: o.__func__
from html.entities import name2codepoint
import http.client as httplib
from http.server import HTTPServer, SimpleHTTPRequestHandler
from http.server import BaseHTTPRequestHandler
iteritems = lambda o: o.items()
long_type = int
maxsize = sys.maxsize
unichr = chr
unicode = str
bytes = bytes
from urllib.error import HTTPError, URLError
import urllib.request as urllib2
from urllib.request import urlopen, url2pathname, pathname2url
from urllib.parse import (
urlparse, urlunparse, unquote, splituser, urljoin, urlsplit,
urlunsplit, splittag,
)
filterfalse = itertools.filterfalse
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
|
mit
| 6,654,542,949,999,693,000 | 7,168,156,333,518,629,000 | 30.727273 | 76 | 0.69914 | false |
djeo94/CouchPotatoServer
|
couchpotato/core/notifications/pushover.py
|
45
|
2903
|
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import getTitle, getIdentifier
from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification
log = CPLog(__name__)
autoload = 'Pushover'
class Pushover(Notification):
api_url = 'https://api.pushover.net'
def notify(self, message = '', data = None, listener = None):
if not data: data = {}
api_data = {
'user': self.conf('user_key'),
'token': self.conf('api_token'),
'message': toUnicode(message),
'priority': self.conf('priority'),
'sound': self.conf('sound'),
}
if data and getIdentifier(data):
api_data.update({
'url': toUnicode('http://www.imdb.com/title/%s/' % getIdentifier(data)),
'url_title': toUnicode('%s on IMDb' % getTitle(data)),
})
try:
data = self.urlopen('%s/%s' % (self.api_url, '1/messages.json'),
headers = {'Content-type': 'application/x-www-form-urlencoded'},
data = api_data)
log.info2('Pushover responded with: %s', data)
return True
except:
return False
config = [{
'name': 'pushover',
'groups': [
{
'tab': 'notifications',
'list': 'notification_providers',
'name': 'pushover',
'options': [
{
'name': 'enabled',
'default': 0,
'type': 'enabler',
},
{
'name': 'user_key',
'description': 'Register on pushover.net to get one.'
},
{
'name': 'api_token',
'description': '<a href="https://pushover.net/apps/clone/couchpotato" target="_blank">Register on pushover.net</a> to get one.',
'advanced': True,
'default': 'YkxHMYDZp285L265L3IwH3LmzkTaCy',
},
{
'name': 'priority',
'default': 0,
'type': 'dropdown',
'values': [('Lowest', -2), ('Low', -1), ('Normal', 0), ('High', 1)],
},
{
'name': 'on_snatch',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Also send message when movie is snatched.',
},
{
'name': 'sound',
'advanced': True,
'description': 'Define <a href="https://pushover.net/api%23sounds" target="_blank">custom sound</a> for Pushover alert.'
},
],
}
],
}]
|
gpl-3.0
| -1,334,464,645,827,816,000 | -5,452,456,226,344,906,000 | 31.988636 | 148 | 0.448846 | false |
martenson/tools-iuc
|
tools/gemini/gemini_mafify.py
|
16
|
9077
|
import string
import sys
so_to_maf = {
'splice_acceptor_variant': 'Splice_Site',
'splice_donor_variant': 'Splice_Site',
'transcript_ablation': 'Splice_Site',
'exon_loss_variant': 'Splice_Site',
'stop_gained': 'Nonsense_Mutation',
'stop_lost': 'Nonstop_Mutation',
'frameshift_variant': 'Frame_Shift_',
'initiator_codon_variant': 'Translation_Start_Site',
'start_lost': 'Translation_Start_Site',
'inframe_insertion': 'In_Frame_Ins',
'inframe_deletion': 'In_Frame_Del',
'conservative_inframe_insertion': 'In_Frame_Ins',
'conservative_inframe_deletion': 'In_Frame_Del',
'disruptive_inframe_insertion': 'In_Frame_Ins',
'disruptive_inframe_deletion': 'In_Frame_Del',
'missense_variant': 'Missense_Mutation',
'coding_sequence_variant': 'Missense_Mutation',
'conservative_missense_variant': 'Missense_Mutation',
'rare_amino_acid_variant': 'Missense_Mutation',
'transcript_amplification': 'Intron',
'intron_variant': 'Intron',
'INTRAGENIC': 'Intron',
'intragenic_variant': 'Intron',
'splice_region_variant': 'Splice_Region',
'mature_miRNA_variant': 'RNA',
'exon_variant': 'RNA',
'non_coding_exon_variant': 'RNA',
'non_coding_transcript_exon_variant': 'RNA',
'non_coding_transcript_variant': 'RNA',
'nc_transcript_variant': 'RNA',
'stop_retained_variant': 'Silent',
'synonymous_variant': 'Silent',
'NMD_transcript_variant': 'Silent',
'incomplete_terminal_codon_variant': 'Silent',
'5_prime_UTR_variant': "5'UTR",
'5_prime_UTR_premature_start_codon_gain_variant': "5'UTR",
'3_prime_UTR_variant': "3'UTR",
'intergenic_variant': 'IGR',
'intergenic_region': 'IGR',
'regulatory_region_variant': 'IGR',
'regulatory_region': 'IGR',
'TF_binding_site_variant': 'IGR',
'upstream_gene_variant': "5'Flank",
'downstream_gene_variant': "3'Flank",
}
class VariantEffect():
def __init__(self, variant_type):
self.variant_type = variant_type.capitalize()
assert self.variant_type in ['Snp', 'Ins', 'Del']
def __getitem__(self, so_effect):
if so_effect not in so_to_maf or (
'frame' in so_effect and self.variant_type == 'Snp'
):
return 'Targeted_Region'
ret = so_to_maf[so_effect]
if ret == 'Frame_Shift_':
ret += self.variant_type
return ret
infile = sys.argv[1]
if len(sys.argv) > 2:
tumor_sample_name = sys.argv[2]
if len(sys.argv) > 3:
normal_sample_name = sys.argv[3]
start_pos_idx = None
ref_idx = None
alt_idx = None
variant_type_idx = None
variant_classification_idx = None
gt_alt_depths_idx = {}
gt_ref_depths_idx = {}
gts_idx = {}
samples = set()
required_fields = [
'Hugo_Symbol',
'NCBI_Build',
'Variant_Type',
'Variant_Classification',
'Tumor_Sample_Barcode',
'HGVSp_Short'
]
with open(infile) as data_in:
cols = data_in.readline().rstrip().split('\t')
for field in required_fields:
if field not in cols:
raise IndexError(
'Cannot generate valid MAF without the following input '
'columns: {0}.\n'
'Missing column: "{1}"'
.format(required_fields, field)
)
for i, col in enumerate(cols):
if col == 'Variant_Type':
variant_type_idx = i
elif col == 'Variant_Classification':
variant_classification_idx = i
elif col == 'Start_Position':
start_pos_idx = i
elif col == 'Reference_Allele':
ref_idx = i
elif col == 'alt':
alt_idx = i
else:
column, _, sample = col.partition('.')
if sample:
if column == 'gt_alt_depths':
gt_alt_depths_idx[sample] = i
elif column == 'gt_ref_depths':
gt_ref_depths_idx[sample] = i
elif column == 'gts':
gts_idx[sample] = i
else:
# not a recognized sample-specific column
continue
samples.add(sample)
if ref_idx is None:
raise IndexError('Input file does not have a column "Reference_Allele".')
if not tumor_sample_name:
if normal_sample_name:
raise ValueError(
'Normal sample name requires the tumor sample name to be '
'specified, too.'
)
if len(samples) > 1:
raise ValueError(
'A tumor sample name is required with more than one sample '
'in the input.'
)
if samples:
# There is a single sample with genotype data.
# Assume its the tumor sample.
tumor_sample_name = next(iter(samples))
else:
if tumor_sample_name not in samples:
raise ValueError(
'Could not find information about the specified tumor sample '
'in the input.'
)
if tumor_sample_name == normal_sample_name:
raise ValueError(
'Need different names for the normal and the tumor sample.'
)
if normal_sample_name and normal_sample_name not in samples:
raise ValueError(
'Could not find information about the specified normal sample '
'in the input.'
)
# All input data checks passed!
# Now extract just the relevant index numbers for the tumor/normal pair
gts_idx = (
gts_idx.get(tumor_sample_name, alt_idx),
gts_idx.get(normal_sample_name)
)
gt_alt_depths_idx = (
gt_alt_depths_idx.get(tumor_sample_name),
gt_alt_depths_idx.get(normal_sample_name)
)
gt_ref_depths_idx = (
gt_ref_depths_idx.get(tumor_sample_name),
gt_ref_depths_idx.get(normal_sample_name)
)
# Echo all MAF column names
cols_to_print = []
for n in range(len(cols)):
if n in gts_idx:
continue
if n in gt_alt_depths_idx:
continue
if n in gt_ref_depths_idx:
continue
if n != alt_idx:
cols_to_print.append(n)
print('\t'.join([cols[n] for n in cols_to_print]))
for line in data_in:
cols = line.rstrip().split('\t')
gt_alt_depths = [
int(cols[ad_idx]) if ad_idx else ''
for ad_idx in gt_alt_depths_idx
]
gt_ref_depths = [
int(cols[rd_idx]) if rd_idx else ''
for rd_idx in gt_ref_depths_idx
]
gts = [
['', ''],
['', '']
]
for n, gt_idx in enumerate(gts_idx):
if gt_idx:
gt_sep = '/' if '/' in cols[gt_idx] else '|'
allele1, _, allele2 = [
'' if allele == '.' else allele
for allele in cols[gt_idx].partition(gt_sep)
]
# follow cBioportal recommendation to leave allele1 empty
# when information is not avaliable
if not allele2:
gts[n] = [allele2, allele1]
else:
gts[n] = [allele1, allele2]
if not gts:
gts = [['', ''], ['', '']]
if cols[variant_type_idx].lower() in ['ins', 'del']:
# transform VCF-style indel representations into MAF ones
ref_allele = cols[ref_idx]
for n, nucs in enumerate(
zip(
ref_allele,
*[allele for gt in gts for allele in gt if allele]
)
):
if any(nuc != nucs[0] for nuc in nucs[1:]):
break
else:
n += 1
if n > 0:
cols[ref_idx] = cols[ref_idx][n:] or '-'
for gt in gts:
for idx, allele in enumerate(gt):
if allele:
gt[idx] = allele[n:] or '-'
if cols[ref_idx] == '-':
n -= 1
cols[start_pos_idx] = str(int(cols[start_pos_idx]) + n)
# in-place substitution of so_effect with MAF effect
cols[variant_classification_idx] = VariantEffect(
cols[variant_type_idx]
)[cols[variant_classification_idx]]
ret_line = '\t'.join([cols[n] for n in cols_to_print])
field_formatters = {
'tumor_seq_allele1': gts[0][0],
'tumor_seq_allele2': gts[0][1],
'match_norm_seq_allele1': gts[1][0],
'match_norm_seq_allele2': gts[1][1],
't_alt_count': gt_alt_depths[0],
'n_alt_count': gt_alt_depths[1],
't_ref_count': gt_ref_depths[0],
'n_ref_count': gt_ref_depths[1],
}
print(
# use safe_substitute here to avoid key errors with column content
# looking like unknown placeholders
string.Template(ret_line).safe_substitute(field_formatters)
)
|
mit
| -1,861,004,256,132,573,000 | -2,955,543,937,839,559,000 | 32.618519 | 81 | 0.532224 | false |
hmen89/odoo
|
addons/l10n_bo/__init__.py
|
2120
|
1456
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2011 Cubic ERP - Teradata SAC. (http://cubicerp.com).
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
| 8,608,872,332,028,404,000 | -6,638,083,503,499,404,000 | 44.5 | 80 | 0.684753 | false |
DeeDee22/nelliepi
|
src/ch/fluxkompensator/nelliepi/ui/screen/ListScreen.py
|
1
|
8571
|
'''
Created on Oct 4, 2013
@author: geraldine
'''
from ch.fluxkompensator.nelliepi.ui.screen.ScreenWithFooter import ScreenWithFooter
from ch.fluxkompensator.nelliepi.ui.widgets.TextButton import TextButton
from ch.fluxkompensator.nelliepi.Constants import RED
from ch.fluxkompensator.nelliepi.Constants import BLUE
from ch.fluxkompensator.nelliepi.functions import ListFunction
from ch.fluxkompensator.nelliepi.functions import PlayFunction
from ch.fluxkompensator.nelliepi.music import Player
class ListScreen(ScreenWithFooter):
'''
classdocs
'''
fileList = None
directoryName = None
level = 0
parentDirectories = None
PADDING = 5
NUMBER_OF_POSSIBLE_ROWS = 8
def __init__(self):
'''
Constructor
'''
ScreenWithFooter.__init__(self, "listScreen")
def setDirectoryToList(self, pDirectoryToList, pStartIndex=0, pParentDirectories=[]):
if pParentDirectories is None :
pParentDirectories = []
print("start index is " + str(pStartIndex))
dirString = "None"
if not pDirectoryToList is None:
dirString = pDirectoryToList
print("pDirectoryToList is " + dirString)
parentDirString = "None"
if not len(pParentDirectories) == 0 :
parentDirString = pParentDirectories[len(pParentDirectories) - 1]
print("parent: " + parentDirString)
self.parentDirectories = pParentDirectories
self.directoryName = pDirectoryToList
self.fileList = self.getFileList(pDirectoryToList)
fontSize=self.getFontSize()
self.addUpButton()
self.addPrevButton(pStartIndex)
i = 0
maxIndex=self.getMaxIndex(pStartIndex, len(self.fileList))
print("max index is " + str(maxIndex))
for currentFile in self.fileList:
if i >= pStartIndex and i <= maxIndex:
if currentFile.has_key("directory"):
directoryName = self.extractFileName(currentFile["directory"])
#print("directory: " + directoryName)
height=self.getHeightForButton(self.getMaxHeight(), len(self.fileList), i, pStartIndex)
button = TextButton(10, height, ">", pColor=RED, pFontSize=fontSize, pMethod=PlayFunction.function, pParams=[currentFile["directory"]])
self.addButton(button)
button = TextButton(self.getMaxWidth() / 2, height, directoryName, pColor=RED, pFontSize=fontSize, pMethod=ListFunction.function, pParams=[currentFile["directory"], 0, self.getParentsOfChild(directoryName)])
self.addButton(button)
elif currentFile.has_key("file"):
fileName = self.extractFileName(currentFile["file"])
#print("file: " + fileName)
button = TextButton(self.getMaxWidth() / 2, self.getHeightForButton(self.getMaxHeight(), len(self.fileList), i, pStartIndex), fileName, pFontSize=fontSize, pMethod=PlayFunction.function, pParams=[currentFile["file"]])
self.addButton(button)
i=i+1
self.addNextButton(pStartIndex, maxIndex)
def extractFileName(self, fileWithPath):
index = fileWithPath.rfind("/")
return fileWithPath[index+1:]
def getHeightForPrevElement(self, pMaxHeight, pTotalNumber):
fontSize = self.getFontSize()
return fontSize / 2 + self.PADDING
def getHeightForButton(self, pMaxHeight, pTotalNumber, pIndex, pStartIndex):
relativeIndex = pIndex - pStartIndex
rowForPrevElement = 0
if pStartIndex > 0:
rowForPrevElement = 1
fontSize = self.getFontSize()
firstElement = fontSize / 2 + self.PADDING
rowHeight = fontSize + self.PADDING
return firstElement + relativeIndex * rowHeight + rowForPrevElement * rowHeight
def getFontSize(self):
return 20
def getMaxIndex(self, pStartIndex, pTotalNumber):
maxIndex=pStartIndex + self.NUMBER_OF_POSSIBLE_ROWS -1
if pTotalNumber > maxIndex + 1:
#in this case we need a "next" element
maxIndex = maxIndex -1
if pStartIndex > 0:
#in this case we need a "previous" element
maxIndex = maxIndex -1
return maxIndex
def addUpButton(self):
if not len(self.parentDirectories) < 1:
button = TextButton(self.getMaxWidth() -10, self.getHeightForPrevElement(self.getMaxHeight(), len(self.fileList)), "^", pColor=BLUE, pFontSize=self.getFontSize(), pMethod=ListFunction.function, pParams=[self.getDirectParent(), 0, self.getParentsOfParents()])
self.addButton(button)
def getDirectParent(self):
if len(self.parentDirectories) < 1:
return None
return self.parentDirectories[len(self.parentDirectories) -1]
def getParentsOfChild(self, dirNameOfThisScreen):
if self.parentDirectories is None:
return [self.directoryName]
else :
result = []
for currentDir in self.parentDirectories:
result.append(currentDir)
result.append(self.directoryName)
return result
def getParentsOfParents(self):
result = []
i=0
for parent in self.parentDirectories:
if i<len(self.parentDirectories) -1:
result.append(parent)
i=i+1
return result
def addPrevButton(self, pStartIndex):
if pStartIndex > 0:
nextStartIndex = pStartIndex - self.NUMBER_OF_POSSIBLE_ROWS
#next screen definitely has a next button
nextStartIndex = nextStartIndex + 1
#does previous screen have a previous button?
if nextStartIndex > 0:
nextStartIndex = nextStartIndex + 1
if nextStartIndex < 0:
nextStartIndex = 0
print("next start index for PrevButton: " + str(nextStartIndex))
button = TextButton(self.getMaxWidth() / 2, self.getHeightForPrevElement(self.getMaxHeight(), len(self.fileList)), "<previous>", pColor=BLUE, pFontSize=self.getFontSize(), pMethod=ListFunction.function, pParams=[self.directoryName, nextStartIndex, self.parentDirectories])
self.addButton(button)
def addNextButton(self, pStartIndex,pMaxIndex):
if len(self.fileList) > pMaxIndex + 1:
nextStartIndex=pMaxIndex + 1
print("next start index forNextButton: " + str(nextStartIndex))
fontSize=self.getFontSize()
button = TextButton(self.getMaxWidth() / 2, self.getHeightForButton(self.getMaxHeight(), len(self.fileList),pMaxIndex +1, pStartIndex), "<next>", pColor=BLUE, pFontSize=fontSize, pMethod=ListFunction.function, pParams=[self.directoryName, nextStartIndex, self.parentDirectories])
self.addButton(button)
def getFileList(self, pDirectoryName):
wholeList = None
if pDirectoryName == "_main_":
wholeList = Player.listFiles()
else :
wholeList = Player.listFiles(pDirectoryName)
result = []
for currentFile in wholeList:
fileName = None
if(currentFile.has_key("directory")):
fileName=currentFile["directory"]
else:
fileName=currentFile["file"]
if self.isDirectlyInDirectory(fileName):
result.append(currentFile);
return result
def isDirectlyInDirectory(self, fileName):
level = 0
if not len(self.parentDirectories) < 1:
level=self.getDirectParent().count("/") + 2
if self.getDirectParent() == "_main_":
level = 1
occurences = fileName.count("/")
#print("=====")
#parentDirString = "None"
#if not self.parentDirectories is None:
# for currentParent in self.parentDirectories:
# parentDirString = parentDirString + currentParent + ","
#print("parent: " + parentDirString + " fileName:" + fileName + " occurences: " + str(occurences) + " level: " + str(level))
#print("=====")
return occurences == level
|
gpl-2.0
| -5,045,380,185,694,866,000 | -2,274,020,171,535,813,400 | 43.180412 | 291 | 0.608447 | false |
jachitech/AndroidPrebuiltPackages
|
packages/libxml2-2.9.4/python/tests/relaxng.py
|
35
|
1203
|
#!/usr/bin/python -u
import libxml2
import sys
# Memory debug specific
libxml2.debugMemory(1)
schema="""<?xml version="1.0"?>
<element name="foo"
xmlns="http://relaxng.org/ns/structure/1.0"
xmlns:a="http://relaxng.org/ns/annotation/1.0"
xmlns:ex1="http://www.example.com/n1"
xmlns:ex2="http://www.example.com/n2">
<a:documentation>A foo element.</a:documentation>
<element name="ex1:bar1">
<empty/>
</element>
<element name="ex2:bar2">
<empty/>
</element>
</element>
"""
instance="""<?xml version="1.0"?>
<foo><pre1:bar1 xmlns:pre1="http://www.example.com/n1"/><pre2:bar2 xmlns:pre2="http://www.example.com/n2"/></foo>"""
rngp = libxml2.relaxNGNewMemParserCtxt(schema, len(schema))
rngs = rngp.relaxNGParse()
ctxt = rngs.relaxNGNewValidCtxt()
doc = libxml2.parseDoc(instance)
ret = doc.relaxNGValidateDoc(ctxt)
if ret != 0:
print("error doing RelaxNG validation")
sys.exit(1)
doc.freeDoc()
del rngp
del rngs
del ctxt
libxml2.relaxNGCleanupTypes()
# Memory debug specific
libxml2.cleanupParser()
if libxml2.debugMemory(1) == 0:
print("OK")
else:
print("Memory leak %d bytes" % (libxml2.debugMemory(1)))
libxml2.dumpMemory()
|
apache-2.0
| 3,035,541,321,508,326,000 | -6,571,589,243,332,347,000 | 24.0625 | 116 | 0.674148 | false |
ran5515/DeepDecision
|
tensorflow/user_ops/invalid_op_test.py
|
146
|
1217
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for custom user ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import tensorflow as tf
class InvalidOpTest(tf.test.TestCase):
def testBasic(self):
library_filename = os.path.join(tf.resource_loader.get_data_files_path(),
'invalid_op.so')
with self.assertRaises(tf.errors.InvalidArgumentError):
tf.load_op_library(library_filename)
if __name__ == '__main__':
tf.test.main()
|
apache-2.0
| -6,144,057,259,986,820,000 | 731,058,316,280,071,200 | 33.771429 | 80 | 0.670501 | false |
crichardson17/starburst_atlas
|
Low_resolution_sims/DustFree_LowRes/Padova_inst/padova_inst_6/Optical1.py
|
33
|
7366
|
import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ------------------------------------------------------------------------------------------------------
#inputs
for file in os.listdir('.'):
if file.endswith(".grd"):
inputfile = file
for file in os.listdir('.'):
if file.endswith(".txt"):
inputfile2 = file
# ------------------------------------------------------------------------------------------------------
#Patches data
#for the Kewley and Levesque data
verts = [
(1., 7.97712125471966000000), # left, bottom
(1., 9.57712125471966000000), # left, top
(2., 10.57712125471970000000), # right, top
(2., 8.97712125471966000000), # right, bottom
(0., 0.), # ignored
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(verts, codes)
# ------------------------
#for the Kewley 01 data
verts2 = [
(2.4, 9.243038049), # left, bottom
(2.4, 11.0211893), # left, top
(2.6, 11.0211893), # right, top
(2.6, 9.243038049), # right, bottom
(0, 0.), # ignored
]
path = Path(verts, codes)
path2 = Path(verts2, codes)
# -------------------------
#for the Moy et al data
verts3 = [
(1., 6.86712125471966000000), # left, bottom
(1., 10.18712125471970000000), # left, top
(3., 12.18712125471970000000), # right, top
(3., 8.86712125471966000000), # right, bottom
(0., 0.), # ignored
]
path = Path(verts, codes)
path3 = Path(verts3, codes)
# ------------------------------------------------------------------------------------------------------
#the routine to add patches for others peoples' data onto our plots.
def add_patches(ax):
patch3 = patches.PathPatch(path3, facecolor='yellow', lw=0)
patch2 = patches.PathPatch(path2, facecolor='green', lw=0)
patch = patches.PathPatch(path, facecolor='red', lw=0)
ax1.add_patch(patch3)
ax1.add_patch(patch2)
ax1.add_patch(patch)
# ------------------------------------------------------------------------------------------------------
#the subplot routine
def add_sub_plot(sub_num):
numplots = 16
plt.subplot(numplots/4.,4,sub_num)
rbf = scipy.interpolate.Rbf(x, y, z[:,sub_num-1], function='linear')
zi = rbf(xi, yi)
contour = plt.contour(xi,yi,zi, levels, colors='c', linestyles = 'dashed')
contour2 = plt.contour(xi,yi,zi, levels2, colors='k', linewidths=1.5)
plt.scatter(max_values[line[sub_num-1],2], max_values[line[sub_num-1],3], c ='k',marker = '*')
plt.annotate(headers[line[sub_num-1]], xy=(8,11), xytext=(6,8.5), fontsize = 10)
plt.annotate(max_values[line[sub_num-1],0], xy= (max_values[line[sub_num-1],2], max_values[line[sub_num-1],3]), xytext = (0, -10), textcoords = 'offset points', ha = 'right', va = 'bottom', fontsize=10)
if sub_num == numplots / 2.:
print "half the plots are complete"
#axis limits
yt_min = 8
yt_max = 23
xt_min = 0
xt_max = 12
plt.ylim(yt_min,yt_max)
plt.xlim(xt_min,xt_max)
plt.yticks(arange(yt_min+1,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min+1,xt_max,1), fontsize = 10)
if sub_num in [2,3,4,6,7,8,10,11,12,14,15,16]:
plt.tick_params(labelleft = 'off')
else:
plt.tick_params(labelleft = 'on')
plt.ylabel('Log ($ \phi _{\mathrm{H}} $)')
if sub_num in [1,2,3,4,5,6,7,8,9,10,11,12]:
plt.tick_params(labelbottom = 'off')
else:
plt.tick_params(labelbottom = 'on')
plt.xlabel('Log($n _{\mathrm{H}} $)')
if sub_num == 1:
plt.yticks(arange(yt_min+1,yt_max+1,1),fontsize=10)
if sub_num == 13:
plt.yticks(arange(yt_min,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min,xt_max,1), fontsize = 10)
if sub_num == 16 :
plt.xticks(arange(xt_min+1,xt_max+1,1), fontsize = 10)
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
grid = [];
with open(inputfile, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid.append(row);
grid = asarray(grid)
#here is where the data for each line is read in and saved to dataEmissionlines
dataEmissionlines = [];
with open(inputfile2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines.append(row);
dataEmissionlines = asarray(dataEmissionlines)
print "import files complete"
# ---------------------------------------------------
#for grid
phi_values = grid[1:len(dataEmissionlines)+1,6]
hdens_values = grid[1:len(dataEmissionlines)+1,7]
#for lines
headers = headers[1:]
Emissionlines = dataEmissionlines[:, 1:]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(Emissionlines[0]),4))
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
#for 4860
incident = Emissionlines[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print "data arranged"
# ---------------------------------------------------
#Creating the grid to interpolate with for contours.
gridarray = zeros((len(Emissionlines),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
#change desired lines here!
line = [36, #NE 3 3343A
38, #BA C
39, #3646
40, #3726
41, #3727
42, #3729
43, #3869
44, #3889
45, #3933
46, #4026
47, #4070
48, #4074
49, #4078
50, #4102
51, #4340
52] #4363
#create z array for this plot
z = concatenated_data[:,line[:]]
# ---------------------------------------------------
# Interpolate
print "starting interpolation"
xi, yi = linspace(x.min(), x.max(), 10), linspace(y.min(), y.max(), 10)
xi, yi = meshgrid(xi, yi)
# ---------------------------------------------------
print "interpolatation complete; now plotting"
#plot
plt.subplots_adjust(wspace=0, hspace=0) #remove space between plots
levels = arange(10**-1,10, .2)
levels2 = arange(10**-2,10**2, 1)
plt.suptitle("Optical Lines", fontsize=14)
# ---------------------------------------------------
for i in range(16):
add_sub_plot(i)
ax1 = plt.subplot(4,4,1)
add_patches(ax1)
print "complete"
plt.savefig('optical_lines.pdf')
plt.clf()
|
gpl-2.0
| 3,454,757,853,294,539,300 | -5,551,462,325,631,344,000 | 29.188525 | 203 | 0.594217 | false |
ict-felix/stack
|
vt_manager/src/python/vt_manager/communication/sfa/managers/AggregateManager.py
|
4
|
2850
|
from vt_manager.communication.sfa.util.version import version_core
from vt_manager.communication.sfa.util.xrn import Xrn
from vt_manager.communication.sfa.util.callids import Callids
from vt_manager.communication.sfa.drivers.VTSfaDriver import VTSfaDriver
from vt_manager.communication.sfa.util.faults import MissingSfaInfo, UnknownSfaType, \
RecordNotFound, SfaNotImplemented, SliverDoesNotExist
import zlib
class AggregateManager:
''' SFA AM Class for VM_Manager'''
def __init__ (self, config=None):
self.driver = VTSfaDriver(None)
def ListSlices(self, api, creds, options):
raise Exception("External authorities do not have permissions to list OCF slices")
def ListResources(self, options):
slice_xrn = options.get('geni_slice_urn', None)
if slice_xrn:
xrn = Xrn(slice_xrn,'slice')
slice_leaf = xrn.get_leaf()
options['slice'] = slice_leaf
rspec = self.driver.list_resources(options)
if options.has_key('geni_compressed') and options['geni_compressed'] == True:
rspec = zlib.compress(rspec).encode('base64')
return rspec
def SliverStatus (self, xrn, options):
xrn = Xrn(xrn,'slice')
slice_leaf = xrn.get_leaf()
authority = xrn.get_authority_hrn()
return self.driver.sliver_status(slice_leaf,authority,options)
def CreateSliver(self,xrn,rspec_string,users,creds,options):
xrn = Xrn(xrn, 'slice')
slice_leaf = xrn.get_leaf()
slice_hrn = xrn.get_hrn()
authority = xrn.get_authority_hrn()
expiration_date = self.driver.get_expiration(creds, slice_hrn)
return self.driver.create_sliver (slice_leaf,authority,rspec_string, users, options, expiration_date)
def DeleteSliver(self, xrn, options):
#TODO: Check the options or xrn to get a single vm.
xrn = Xrn(xrn)
slice_leaf = xrn.get_leaf()
authority = xrn.get_authority_hrn()
return self.driver.crud_slice(slice_leaf,authority,action='delete_slice')
def RenewSliver(self, xrn, expiration_time, options):
return True
def start_slice(self,xrn):
xrn = Xrn(xrn)
slice_leaf = xrn.get_leaf()
authority = xrn.get_authority_hrn()
return self.driver.crud_slice(slice_leaf,authority,action='start_slice')
def stop_slice(self,xrn):
xrn = Xrn(xrn)
slice_leaf = xrn.get_leaf()
authority = xrn.get_authority_hrn()
return self.driver.crud_slice (slice_leaf,authority,action='stop_slice')
def reset_slice(self,xrn):
xrn = Xrn(xrn)
slice_leaf = xrn.get_leaf()
authority = xrn.get_authority_hrn()
return self.driver.crud_slice (slice_leaf,authority,action='reset_slice')
def GetTicket(self, api, xrn, creds, rspec, users, options):
# ticket is dead.
raise SfaNotImplemented('Method GetTicket was deprecated.')
|
apache-2.0
| 5,322,721,614,450,610,000 | -753,043,093,704,336,400 | 34.625 | 109 | 0.68386 | false |
AntonPalich/sublime-evernote
|
lib/pygments/lexers/_robotframeworklexer.py
|
57
|
18610
|
# -*- coding: utf-8 -*-
"""
pygments.lexers._robotframeworklexer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Lexer for Robot Framework.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
# Copyright 2012 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from pygments.lexer import Lexer
from pygments.token import Token
from pygments.util import text_type
HEADING = Token.Generic.Heading
SETTING = Token.Keyword.Namespace
IMPORT = Token.Name.Namespace
TC_KW_NAME = Token.Generic.Subheading
KEYWORD = Token.Name.Function
ARGUMENT = Token.String
VARIABLE = Token.Name.Variable
COMMENT = Token.Comment
SEPARATOR = Token.Punctuation
SYNTAX = Token.Punctuation
GHERKIN = Token.Generic.Emph
ERROR = Token.Error
def normalize(string, remove=''):
string = string.lower()
for char in remove + ' ':
if char in string:
string = string.replace(char, '')
return string
class RobotFrameworkLexer(Lexer):
"""
For `Robot Framework <http://robotframework.org>`_ test data.
Supports both space and pipe separated plain text formats.
.. versionadded:: 1.6
"""
name = 'RobotFramework'
aliases = ['robotframework']
filenames = ['*.txt', '*.robot']
mimetypes = ['text/x-robotframework']
def __init__(self, **options):
options['tabsize'] = 2
options['encoding'] = 'UTF-8'
Lexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
row_tokenizer = RowTokenizer()
var_tokenizer = VariableTokenizer()
index = 0
for row in text.splitlines():
for value, token in row_tokenizer.tokenize(row):
for value, token in var_tokenizer.tokenize(value, token):
if value:
yield index, token, text_type(value)
index += len(value)
class VariableTokenizer(object):
def tokenize(self, string, token):
var = VariableSplitter(string, identifiers='$@%')
if var.start < 0 or token in (COMMENT, ERROR):
yield string, token
return
for value, token in self._tokenize(var, string, token):
if value:
yield value, token
def _tokenize(self, var, string, orig_token):
before = string[:var.start]
yield before, orig_token
yield var.identifier + '{', SYNTAX
for value, token in self.tokenize(var.base, VARIABLE):
yield value, token
yield '}', SYNTAX
if var.index:
yield '[', SYNTAX
for value, token in self.tokenize(var.index, VARIABLE):
yield value, token
yield ']', SYNTAX
for value, token in self.tokenize(string[var.end:], orig_token):
yield value, token
class RowTokenizer(object):
def __init__(self):
self._table = UnknownTable()
self._splitter = RowSplitter()
testcases = TestCaseTable()
settings = SettingTable(testcases.set_default_template)
variables = VariableTable()
keywords = KeywordTable()
self._tables = {'settings': settings, 'setting': settings,
'metadata': settings,
'variables': variables, 'variable': variables,
'testcases': testcases, 'testcase': testcases,
'keywords': keywords, 'keyword': keywords,
'userkeywords': keywords, 'userkeyword': keywords}
def tokenize(self, row):
commented = False
heading = False
for index, value in enumerate(self._splitter.split(row)):
# First value, and every second after that, is a separator.
index, separator = divmod(index-1, 2)
if value.startswith('#'):
commented = True
elif index == 0 and value.startswith('*'):
self._table = self._start_table(value)
heading = True
for value, token in self._tokenize(value, index, commented,
separator, heading):
yield value, token
self._table.end_row()
def _start_table(self, header):
name = normalize(header, remove='*')
return self._tables.get(name, UnknownTable())
def _tokenize(self, value, index, commented, separator, heading):
if commented:
yield value, COMMENT
elif separator:
yield value, SEPARATOR
elif heading:
yield value, HEADING
else:
for value, token in self._table.tokenize(value, index):
yield value, token
class RowSplitter(object):
_space_splitter = re.compile('( {2,})')
_pipe_splitter = re.compile('((?:^| +)\|(?: +|$))')
def split(self, row):
splitter = (row.startswith('| ') and self._split_from_pipes
or self._split_from_spaces)
for value in splitter(row):
yield value
yield '\n'
def _split_from_spaces(self, row):
yield '' # Start with (pseudo)separator similarly as with pipes
for value in self._space_splitter.split(row):
yield value
def _split_from_pipes(self, row):
_, separator, rest = self._pipe_splitter.split(row, 1)
yield separator
while self._pipe_splitter.search(rest):
cell, separator, rest = self._pipe_splitter.split(rest, 1)
yield cell
yield separator
yield rest
class Tokenizer(object):
_tokens = None
def __init__(self):
self._index = 0
def tokenize(self, value):
values_and_tokens = self._tokenize(value, self._index)
self._index += 1
if isinstance(values_and_tokens, type(Token)):
values_and_tokens = [(value, values_and_tokens)]
return values_and_tokens
def _tokenize(self, value, index):
index = min(index, len(self._tokens) - 1)
return self._tokens[index]
def _is_assign(self, value):
if value.endswith('='):
value = value[:-1].strip()
var = VariableSplitter(value, identifiers='$@')
return var.start == 0 and var.end == len(value)
class Comment(Tokenizer):
_tokens = (COMMENT,)
class Setting(Tokenizer):
_tokens = (SETTING, ARGUMENT)
_keyword_settings = ('suitesetup', 'suiteprecondition', 'suiteteardown',
'suitepostcondition', 'testsetup', 'testprecondition',
'testteardown', 'testpostcondition', 'testtemplate')
_import_settings = ('library', 'resource', 'variables')
_other_settings = ('documentation', 'metadata', 'forcetags', 'defaulttags',
'testtimeout')
_custom_tokenizer = None
def __init__(self, template_setter=None):
Tokenizer.__init__(self)
self._template_setter = template_setter
def _tokenize(self, value, index):
if index == 1 and self._template_setter:
self._template_setter(value)
if index == 0:
normalized = normalize(value)
if normalized in self._keyword_settings:
self._custom_tokenizer = KeywordCall(support_assign=False)
elif normalized in self._import_settings:
self._custom_tokenizer = ImportSetting()
elif normalized not in self._other_settings:
return ERROR
elif self._custom_tokenizer:
return self._custom_tokenizer.tokenize(value)
return Tokenizer._tokenize(self, value, index)
class ImportSetting(Tokenizer):
_tokens = (IMPORT, ARGUMENT)
class TestCaseSetting(Setting):
_keyword_settings = ('setup', 'precondition', 'teardown', 'postcondition',
'template')
_import_settings = ()
_other_settings = ('documentation', 'tags', 'timeout')
def _tokenize(self, value, index):
if index == 0:
type = Setting._tokenize(self, value[1:-1], index)
return [('[', SYNTAX), (value[1:-1], type), (']', SYNTAX)]
return Setting._tokenize(self, value, index)
class KeywordSetting(TestCaseSetting):
_keyword_settings = ('teardown',)
_other_settings = ('documentation', 'arguments', 'return', 'timeout')
class Variable(Tokenizer):
_tokens = (SYNTAX, ARGUMENT)
def _tokenize(self, value, index):
if index == 0 and not self._is_assign(value):
return ERROR
return Tokenizer._tokenize(self, value, index)
class KeywordCall(Tokenizer):
_tokens = (KEYWORD, ARGUMENT)
def __init__(self, support_assign=True):
Tokenizer.__init__(self)
self._keyword_found = not support_assign
self._assigns = 0
def _tokenize(self, value, index):
if not self._keyword_found and self._is_assign(value):
self._assigns += 1
return SYNTAX # VariableTokenizer tokenizes this later.
if self._keyword_found:
return Tokenizer._tokenize(self, value, index - self._assigns)
self._keyword_found = True
return GherkinTokenizer().tokenize(value, KEYWORD)
class GherkinTokenizer(object):
_gherkin_prefix = re.compile('^(Given|When|Then|And) ', re.IGNORECASE)
def tokenize(self, value, token):
match = self._gherkin_prefix.match(value)
if not match:
return [(value, token)]
end = match.end()
return [(value[:end], GHERKIN), (value[end:], token)]
class TemplatedKeywordCall(Tokenizer):
_tokens = (ARGUMENT,)
class ForLoop(Tokenizer):
def __init__(self):
Tokenizer.__init__(self)
self._in_arguments = False
def _tokenize(self, value, index):
token = self._in_arguments and ARGUMENT or SYNTAX
if value.upper() in ('IN', 'IN RANGE'):
self._in_arguments = True
return token
class _Table(object):
_tokenizer_class = None
def __init__(self, prev_tokenizer=None):
self._tokenizer = self._tokenizer_class()
self._prev_tokenizer = prev_tokenizer
self._prev_values_on_row = []
def tokenize(self, value, index):
if self._continues(value, index):
self._tokenizer = self._prev_tokenizer
yield value, SYNTAX
else:
for value_and_token in self._tokenize(value, index):
yield value_and_token
self._prev_values_on_row.append(value)
def _continues(self, value, index):
return value == '...' and all(self._is_empty(t)
for t in self._prev_values_on_row)
def _is_empty(self, value):
return value in ('', '\\')
def _tokenize(self, value, index):
return self._tokenizer.tokenize(value)
def end_row(self):
self.__init__(prev_tokenizer=self._tokenizer)
class UnknownTable(_Table):
_tokenizer_class = Comment
def _continues(self, value, index):
return False
class VariableTable(_Table):
_tokenizer_class = Variable
class SettingTable(_Table):
_tokenizer_class = Setting
def __init__(self, template_setter, prev_tokenizer=None):
_Table.__init__(self, prev_tokenizer)
self._template_setter = template_setter
def _tokenize(self, value, index):
if index == 0 and normalize(value) == 'testtemplate':
self._tokenizer = Setting(self._template_setter)
return _Table._tokenize(self, value, index)
def end_row(self):
self.__init__(self._template_setter, prev_tokenizer=self._tokenizer)
class TestCaseTable(_Table):
_setting_class = TestCaseSetting
_test_template = None
_default_template = None
@property
def _tokenizer_class(self):
if self._test_template or (self._default_template and
self._test_template is not False):
return TemplatedKeywordCall
return KeywordCall
def _continues(self, value, index):
return index > 0 and _Table._continues(self, value, index)
def _tokenize(self, value, index):
if index == 0:
if value:
self._test_template = None
return GherkinTokenizer().tokenize(value, TC_KW_NAME)
if index == 1 and self._is_setting(value):
if self._is_template(value):
self._test_template = False
self._tokenizer = self._setting_class(self.set_test_template)
else:
self._tokenizer = self._setting_class()
if index == 1 and self._is_for_loop(value):
self._tokenizer = ForLoop()
if index == 1 and self._is_empty(value):
return [(value, SYNTAX)]
return _Table._tokenize(self, value, index)
def _is_setting(self, value):
return value.startswith('[') and value.endswith(']')
def _is_template(self, value):
return normalize(value) == '[template]'
def _is_for_loop(self, value):
return value.startswith(':') and normalize(value, remove=':') == 'for'
def set_test_template(self, template):
self._test_template = self._is_template_set(template)
def set_default_template(self, template):
self._default_template = self._is_template_set(template)
def _is_template_set(self, template):
return normalize(template) not in ('', '\\', 'none', '${empty}')
class KeywordTable(TestCaseTable):
_tokenizer_class = KeywordCall
_setting_class = KeywordSetting
def _is_template(self, value):
return False
# Following code copied directly from Robot Framework 2.7.5.
class VariableSplitter:
def __init__(self, string, identifiers):
self.identifier = None
self.base = None
self.index = None
self.start = -1
self.end = -1
self._identifiers = identifiers
self._may_have_internal_variables = False
try:
self._split(string)
except ValueError:
pass
else:
self._finalize()
def get_replaced_base(self, variables):
if self._may_have_internal_variables:
return variables.replace_string(self.base)
return self.base
def _finalize(self):
self.identifier = self._variable_chars[0]
self.base = ''.join(self._variable_chars[2:-1])
self.end = self.start + len(self._variable_chars)
if self._has_list_variable_index():
self.index = ''.join(self._list_variable_index_chars[1:-1])
self.end += len(self._list_variable_index_chars)
def _has_list_variable_index(self):
return self._list_variable_index_chars\
and self._list_variable_index_chars[-1] == ']'
def _split(self, string):
start_index, max_index = self._find_variable(string)
self.start = start_index
self._open_curly = 1
self._state = self._variable_state
self._variable_chars = [string[start_index], '{']
self._list_variable_index_chars = []
self._string = string
start_index += 2
for index, char in enumerate(string[start_index:]):
index += start_index # Giving start to enumerate only in Py 2.6+
try:
self._state(char, index)
except StopIteration:
return
if index == max_index and not self._scanning_list_variable_index():
return
def _scanning_list_variable_index(self):
return self._state in [self._waiting_list_variable_index_state,
self._list_variable_index_state]
def _find_variable(self, string):
max_end_index = string.rfind('}')
if max_end_index == -1:
raise ValueError('No variable end found')
if self._is_escaped(string, max_end_index):
return self._find_variable(string[:max_end_index])
start_index = self._find_start_index(string, 1, max_end_index)
if start_index == -1:
raise ValueError('No variable start found')
return start_index, max_end_index
def _find_start_index(self, string, start, end):
index = string.find('{', start, end) - 1
if index < 0:
return -1
if self._start_index_is_ok(string, index):
return index
return self._find_start_index(string, index+2, end)
def _start_index_is_ok(self, string, index):
return string[index] in self._identifiers\
and not self._is_escaped(string, index)
def _is_escaped(self, string, index):
escaped = False
while index > 0 and string[index-1] == '\\':
index -= 1
escaped = not escaped
return escaped
def _variable_state(self, char, index):
self._variable_chars.append(char)
if char == '}' and not self._is_escaped(self._string, index):
self._open_curly -= 1
if self._open_curly == 0:
if not self._is_list_variable():
raise StopIteration
self._state = self._waiting_list_variable_index_state
elif char in self._identifiers:
self._state = self._internal_variable_start_state
def _is_list_variable(self):
return self._variable_chars[0] == '@'
def _internal_variable_start_state(self, char, index):
self._state = self._variable_state
if char == '{':
self._variable_chars.append(char)
self._open_curly += 1
self._may_have_internal_variables = True
else:
self._variable_state(char, index)
def _waiting_list_variable_index_state(self, char, index):
if char != '[':
raise StopIteration
self._list_variable_index_chars.append(char)
self._state = self._list_variable_index_state
def _list_variable_index_state(self, char, index):
self._list_variable_index_chars.append(char)
if char == ']':
raise StopIteration
|
mit
| -5,229,617,512,509,113,000 | -7,156,538,214,843,051,000 | 32.351254 | 80 | 0.591349 | false |
andfoy/margffoy-tuay-server
|
env/lib/python2.7/site-packages/django/template/response.py
|
61
|
8910
|
import warnings
from django.http import HttpResponse
from django.template import Context, RequestContext, Template, loader
from django.template.backends.django import Template as BackendTemplate
from django.template.context import _current_app_undefined
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
class ContentNotRenderedError(Exception):
pass
class SimpleTemplateResponse(HttpResponse):
rendering_attrs = ['template_name', 'context_data', '_post_render_callbacks']
def __init__(self, template, context=None, content_type=None, status=None,
charset=None, using=None):
if isinstance(template, Template):
warnings.warn(
"{}'s template argument cannot be a django.template.Template "
"anymore. It may be a backend-specific template like those "
"created by get_template().".format(self.__class__.__name__),
RemovedInDjango20Warning, stacklevel=2)
template = BackendTemplate(template)
# It would seem obvious to call these next two members 'template' and
# 'context', but those names are reserved as part of the test Client
# API. To avoid the name collision, we use different names.
self.template_name = template
self.context_data = context
self.using = using
self._post_render_callbacks = []
# _request stores the current request object in subclasses that know
# about requests, like TemplateResponse. It's defined in the base class
# to minimize code duplication.
# It's called self._request because self.request gets overwritten by
# django.test.client.Client. Unlike template_name and context_data,
# _request should not be considered part of the public API.
self._request = None
# content argument doesn't make sense here because it will be replaced
# with rendered template so we always pass empty string in order to
# prevent errors and provide shorter signature.
super(SimpleTemplateResponse, self).__init__('', content_type, status, charset)
# _is_rendered tracks whether the template and context has been baked
# into a final response.
# Super __init__ doesn't know any better than to set self.content to
# the empty string we just gave it, which wrongly sets _is_rendered
# True, so we initialize it to False after the call to super __init__.
self._is_rendered = False
def __getstate__(self):
"""Pickling support function.
Ensures that the object can't be pickled before it has been
rendered, and that the pickled state only includes rendered
data, not the data used to construct the response.
"""
obj_dict = self.__dict__.copy()
if not self._is_rendered:
raise ContentNotRenderedError('The response content must be '
'rendered before it can be pickled.')
for attr in self.rendering_attrs:
if attr in obj_dict:
del obj_dict[attr]
return obj_dict
def resolve_template(self, template):
"Accepts a template object, path-to-template or list of paths"
if isinstance(template, (list, tuple)):
return loader.select_template(template, using=self.using)
elif isinstance(template, six.string_types):
return loader.get_template(template, using=self.using)
else:
return template
def _resolve_template(self, template):
# This wrapper deprecates returning a django.template.Template in
# subclasses that override resolve_template. It can be removed in
# Django 2.0.
new_template = self.resolve_template(template)
if isinstance(new_template, Template):
warnings.warn(
"{}.resolve_template() must return a backend-specific "
"template like those created by get_template(), not a "
"{}.".format(
self.__class__.__name__, new_template.__class__.__name__),
RemovedInDjango20Warning, stacklevel=2)
new_template = BackendTemplate(new_template)
return new_template
def resolve_context(self, context):
return context
def _resolve_context(self, context):
# This wrapper deprecates returning a Context or a RequestContext in
# subclasses that override resolve_context. It can be removed in
# Django 2.0. If returning a Context or a RequestContext works by
# accident, it won't be an issue per se, but it won't be officially
# supported either.
new_context = self.resolve_context(context)
if isinstance(new_context, RequestContext) and self._request is None:
self._request = new_context.request
if isinstance(new_context, Context):
warnings.warn(
"{}.resolve_context() must return a dict, not a {}.".format(
self.__class__.__name__, new_context.__class__.__name__),
RemovedInDjango20Warning, stacklevel=2)
# It would be tempting to do new_context = new_context.flatten()
# here but that would cause template context processors to run for
# TemplateResponse(request, template, Context({})), which would be
# backwards-incompatible. As a consequence another deprecation
# warning will be raised when rendering the template. There isn't
# much we can do about that.
return new_context
@property
def rendered_content(self):
"""Returns the freshly rendered content for the template and context
described by the TemplateResponse.
This *does not* set the final content of the response. To set the
response content, you must either call render(), or set the
content explicitly using the value of this property.
"""
template = self._resolve_template(self.template_name)
context = self._resolve_context(self.context_data)
content = template.render(context, self._request)
return content
def add_post_render_callback(self, callback):
"""Adds a new post-rendering callback.
If the response has already been rendered,
invoke the callback immediately.
"""
if self._is_rendered:
callback(self)
else:
self._post_render_callbacks.append(callback)
def render(self):
"""Renders (thereby finalizing) the content of the response.
If the content has already been rendered, this is a no-op.
Returns the baked response instance.
"""
retval = self
if not self._is_rendered:
self.content = self.rendered_content
for post_callback in self._post_render_callbacks:
newretval = post_callback(retval)
if newretval is not None:
retval = newretval
return retval
@property
def is_rendered(self):
return self._is_rendered
def __iter__(self):
if not self._is_rendered:
raise ContentNotRenderedError('The response content must be '
'rendered before it can be iterated over.')
return super(SimpleTemplateResponse, self).__iter__()
@property
def content(self):
if not self._is_rendered:
raise ContentNotRenderedError('The response content must be '
'rendered before it can be accessed.')
return super(SimpleTemplateResponse, self).content
@content.setter
def content(self, value):
"""Sets the content for the response
"""
HttpResponse.content.fset(self, value)
self._is_rendered = True
class TemplateResponse(SimpleTemplateResponse):
rendering_attrs = SimpleTemplateResponse.rendering_attrs + ['_request', '_current_app']
def __init__(self, request, template, context=None, content_type=None,
status=None, current_app=_current_app_undefined, charset=None,
using=None):
# As a convenience we'll allow callers to provide current_app without
# having to avoid needing to create the RequestContext directly
if current_app is not _current_app_undefined:
warnings.warn(
"The current_app argument of TemplateResponse is deprecated. "
"Set the current_app attribute of its request instead.",
RemovedInDjango20Warning, stacklevel=2)
request.current_app = current_app
super(TemplateResponse, self).__init__(
template, context, content_type, status, charset, using)
self._request = request
|
gpl-2.0
| 6,365,628,553,481,261,000 | -7,285,497,395,380,614,000 | 42.252427 | 91 | 0.634343 | false |
DanielSlater/PyGamePlayer
|
games/mini_pong.py
|
2
|
5040
|
# Modified from http://www.pygame.org/project-Very+simple+Pong+game-816-.html
import pygame
from pygame.locals import *
bar1_score, bar2_score = 0, 0
def run(screen_width=40., screen_height=40.):
global bar1_score, bar2_score
pygame.init()
bar_width, bar_height = screen_width / 32., screen_height / 9.6
bar_dist_from_edge = screen_width / 64.
circle_diameter = screen_height / 16.
circle_radius = circle_diameter / 2.
bar_1_start_x, bar_2_start_x = bar_dist_from_edge, screen_width - bar_dist_from_edge - bar_width
bar_start_y = (screen_height - bar_height) / 2.
bar_max_y = screen_height - bar_height - bar_dist_from_edge
circle_start_x, circle_start_y = (screen_width - circle_diameter) / 2., (screen_width - circle_diameter) / 2.
screen = pygame.display.set_mode((int(screen_width), int(screen_height)), 0, 32)
# Creating 2 bars, a ball and background.
back = pygame.Surface((int(screen_width), int(screen_height)))
background = back.convert()
background.fill((0, 0, 0))
bar = pygame.Surface((int(bar_width), int(bar_height)))
bar1 = bar.convert()
bar1.fill((255, 255, 255))
bar2 = bar.convert()
bar2.fill((255, 255, 255))
circle_surface = pygame.Surface((int(circle_diameter), int(circle_diameter)))
pygame.draw.circle(circle_surface, (255, 255, 255), (int(circle_radius), int(circle_radius)), int(circle_radius))
circle = circle_surface.convert()
circle.set_colorkey((0, 0, 0))
# some definitions
bar1_x, bar2_x = bar_1_start_x, bar_2_start_x
bar1_y, bar2_y = bar_start_y, bar_start_y
circle_x, circle_y = circle_start_x, circle_start_y
bar1_move, bar2_move = 0., 0.
speed_x, speed_y, speed_circle = screen_width / 2.56, screen_height / 1.92, screen_width / 2.56 # 250., 250., 250.
clock = pygame.time.Clock()
done = False
while not done:
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT: # If user clicked close
done = True # Flag that we are done so we exit this loop
if event.type == KEYDOWN:
if event.key == K_UP:
bar1_move = -ai_speed
elif event.key == K_DOWN:
bar1_move = ai_speed
elif event.type == KEYUP:
if event.key == K_UP:
bar1_move = 0.
elif event.key == K_DOWN:
bar1_move = 0.
screen.blit(background, (0, 0))
screen.blit(bar1, (bar1_x, bar1_y))
screen.blit(bar2, (bar2_x, bar2_y))
screen.blit(circle, (circle_x, circle_y))
bar1_y += bar1_move
# movement of circle
time_passed = clock.tick(30)
time_sec = time_passed / 1000.0
circle_x += speed_x * time_sec
circle_y += speed_y * time_sec
ai_speed = speed_circle * time_sec
# AI of the computer.
if circle_x >= (screen_width / 2.) - circle_diameter:
if not bar2_y == circle_y + circle_radius:
if bar2_y < circle_y + circle_radius:
bar2_y += ai_speed
if bar2_y > circle_y - (bar_height - circle_radius):
bar2_y -= ai_speed
else:
bar2_y == circle_y + circle_radius
if bar1_y >= bar_max_y:
bar1_y = bar_max_y
elif bar1_y <= bar_dist_from_edge:
bar1_y = bar_dist_from_edge
if bar2_y >= bar_max_y:
bar2_y = bar_max_y
elif bar2_y <= bar_dist_from_edge:
bar2_y = bar_dist_from_edge
# since i don't know anything about collision, ball hitting bars goes like this.
if circle_x <= bar1_x + bar_dist_from_edge:
if circle_y >= bar1_y - circle_radius and circle_y <= bar1_y + (bar_height - circle_radius):
circle_x = bar_dist_from_edge + bar_width
speed_x = -speed_x
if circle_x >= bar2_x - circle_diameter:
if circle_y >= bar2_y - circle_radius and circle_y <= bar2_y + (bar_height - circle_radius):
circle_x = screen_width - bar_dist_from_edge - bar_width - circle_diameter
speed_x = -speed_x
if circle_x < -circle_radius:
bar2_score += 1
circle_x, circle_y = (screen_width + circle_diameter) / 2., circle_start_y
bar1_y, bar_2_y = bar_start_y, bar_start_y
elif circle_x > screen_width - circle_diameter:
bar1_score += 1
circle_x, circle_y = circle_start_x, circle_start_y
bar1_y, bar2_y = bar_start_y, bar_start_y
if circle_y <= bar_dist_from_edge:
speed_y = -speed_y
circle_y = bar_dist_from_edge
elif circle_y >= screen_height - circle_diameter - circle_radius:
speed_y = -speed_y
circle_y = screen_height - circle_diameter - circle_radius
pygame.display.update()
pygame.quit()
if __name__ == '__main__':
run()
|
mit
| -7,239,990,793,230,116,000 | -2,677,622,283,311,851,000 | 39.32 | 119 | 0.565476 | false |
bgribble/mfp
|
mfp/gui/xyplot/scatterplot.py
|
1
|
7839
|
#! /usr/bin/env python
'''
scatterplot.py
Specialization of XYPlot for showing sets of discrete datapoints
Copyright (c) 2012 Bill Gribble <[email protected]>
'''
import math
from .mark_style import MarkStyle
from .xyplot import XYPlot
from mfp import log
class ScatterPlot (XYPlot):
def __init__(self, element, width, height):
# data points
self.points = {}
self.points_by_tile = {}
# roll-mode scroll speeds
self.x_scroll = 0
self.y_scroll = 0
XYPlot.__init__(self, element, width, height)
def draw_field_cb(self, texture, ctxt, px_min, px_max):
def stroke_to(styler, curve, px, ptnum, delta):
points = self.points.get(curve)
dst_ptnum = ptnum + delta
if dst_ptnum < 0 or dst_ptnum > points[-1][0]:
return
dst_num, dst_pt = points[dst_ptnum]
dst_px = self.pt2px(dst_pt)
dst_px[0] -= px_min[0]
dst_px[1] -= px_min[1]
styler.stroke(ctxt, dst_px, px)
# if the viewport is animated (viewport_scroll not 0)
# the position of the field may have changed.
field_vp = self.plot.get_viewport_origin()
field_vp_pos = self.px2pt(field_vp)
field_w = self.x_max - self.x_min
field_h = self.y_max - self.y_min
if self.x_min != field_vp_pos[0]:
self.x_min = field_vp_pos[0]
self.x_max = self.x_min + field_w
self._recalc_x_scale()
if self.y_max != field_vp_pos[1]:
self.y_max = field_vp_pos[1]
self.y_min = self.y_max - field_h
self._recalc_y_scale()
for curve in self.points:
curve = int(curve)
styler = self.style.get(curve)
if styler is None:
log.warning("[scatterplot]: no style for curve", curve)
styler = self.style[curve] = MarkStyle()
tile_id = self.plot.tile_reverse.get(texture)
if tile_id is None:
return
points = self.points_by_tile[curve].get(tile_id)
if points is not None:
for ptnum, p in points:
pc = self.pt2px(p)
pc[0] -= px_min[0]
pc[1] -= px_min[1]
styler.mark(ctxt, pc)
if styler.stroke_style:
stroke_to(styler, curve, pc, ptnum, -1)
if styler.stroke_style:
ptnum, p = points[-1]
pc = self.pt2px(p)
pc[0] -= px_min[0]
pc[1] -= px_min[1]
stroke_to(styler, curve, pc, ptnum, 1)
def set_scroll_rate(self, vx, vy):
px = self.pt2px((vx, vy))
self.x_axis.set_viewport_scroll(px[0], 0)
self.y_axis.set_viewport_scroll(0, px[1])
self.plot.set_viewport_scroll(px[0], px[1])
def append(self, point, curve=0):
curve = int(curve)
pts = self.points.setdefault(curve, [])
ptnum = len(pts)
pts.append([ptnum, point])
tiles = self.index_point(point, curve, ptnum)
for tile_id in tiles:
tex = self.plot.tile_by_pos.get(tile_id)
if tex is not None:
tex.invalidate()
def index_point(self, point, curve, ptnum):
tile_size = self.plot.tile_size
def tile_id(point):
return (int(math.floor(point[0] / tile_size) * tile_size),
int(math.floor(point[1] / tile_size) * tile_size))
px = self.pt2px(point)
if px is None:
# point is not legal, usually on log charts
return []
curve = int(curve)
tiles = []
pts = self.points.setdefault(curve, {})
bytile = self.points_by_tile.setdefault(curve, {})
style = self.style.get(curve)
if style is None:
style = self.style[curve] = MarkStyle()
markradius = style.size
for dx in [-markradius, markradius]:
for dy in [-markradius, markradius]:
x = px[0] + dx
y = px[1] + dy
tid = tile_id((x, y))
if tid not in tiles:
tiles.append(tid)
if style.stroke_style and ptnum > 0:
prev_pt = pts[ptnum - 1][1]
prev_px = self.pt2px(prev_pt)
if prev_px is not None:
tid = tile_id(prev_px)
if tid not in tiles:
tiles.append(tid)
for tile_id in tiles:
pts = bytile.setdefault(tile_id, [])
pts.append([ptnum, point])
return tiles
def reindex(self):
self.points_by_tile = {}
for curve, curvepoints in self.points.items():
for ptnum, point in curvepoints:
self.index_point(point, curve, ptnum)
def clear(self, curve=None):
if curve is None:
self.points = {}
self.points_by_tile = {}
elif curve is not None:
if curve in self.points:
del self.points[curve]
self.reindex()
self.plot.clear()
def set_style(self, style):
for inlet, istyle in style.items():
inlet = int(inlet)
marker = self.style.setdefault(inlet, MarkStyle())
for k, v in istyle.items():
if k == "size":
marker.size = float(v)
elif k == "color":
marker.set_color(v)
elif k == "shape":
marker.shape = str(v)
elif k == "stroke":
marker.stroke_style = str(v)
def save_style(self):
sd = {}
for inlet, style in self.style.items():
props = sd.setdefault(str(inlet), {})
props["size"] = style.size
props["color"] = style.colorspec
props["shape"] = style.shape
props["stroke"] = style.stroke_style
return sd
def configure(self, params):
modes = dict(LINEAR=0, LOG=1, LOG_10=1, LOG_2=2)
s = params.get("plot_style")
if s:
self.set_style(s)
need_vp = False
x = params.get("x_axis")
if x:
mode = modes.get(x.upper())
if mode != self.x_axis_mode:
self.x_axis_mode = mode
self._recalc_x_scale()
xax = self.pt2px((self.x_min, self.y_min))
self.x_axis.set_viewport_origin(xax[0], 0, True)
need_vp = True
y = params.get("y_axis")
if y:
mode = modes.get(y.upper())
if mode != self.y_axis_mode:
self.y_axis_mode = mode
self._recalc_y_scale()
yax = self.pt2px((self.x_min, self.y_max))
self.y_axis.set_viewport_origin(0, yax[1], True)
need_vp = True
if need_vp:
origin = self.pt2px((self.x_min, self.y_max))
self.set_field_origin(origin[0], origin[1], True)
def set_field_origin(self, x_orig, y_orig, redraw):
self.plot.set_viewport_origin(x_orig, y_orig, redraw)
def command(self, action, data):
if action == "add":
for c in data:
for p in data[c]:
self.append(p, c)
return True
elif action == "roll":
self.set_bounds(None, None, data, None)
self.set_scroll_rate(1.0, 0)
return True
elif action == "stop":
self.set_scroll_rate(0.0, 0.0)
return True
elif action == "reset":
self.set_bounds(None, None, data, None)
return True
return False
|
gpl-2.0
| -4,098,032,077,310,899,000 | -6,868,379,807,745,337,000 | 31.6625 | 71 | 0.494834 | false |
mtzirkel/skiff
|
quiz/multichoice/forms.py
|
2
|
1220
|
from django.forms import ModelForm, Textarea, NumberInput, SelectMultiple
from django import forms
from .models import MCQuestion, MCAnswer
class MCQuestionForm(ModelForm):
class Meta:
model = MCQuestion
fields = {'question_text',
'point_value',
'inquiz',
'answer_a',
'a_is_correct',
'answer_b',
'b_is_correct',
'answer_c',
'c_is_correct',
'answer_d',
'd_is_correct',
'inquiz'}
widgets = {
'question_text': Textarea(attrs={'cols': 70, 'rows': 10}),
'point_value': NumberInput(attrs={'cols': 3, 'row': 1}),
'answer_a': Textarea(attrs={'cols': 70, 'rows': 2}),
'answer_b': Textarea(attrs={'cols': 70, 'rows': 2}),
'answer_c': Textarea(attrs={'cols': 70, 'rows': 2}),
'answer_d': Textarea(attrs={'cols': 70, 'rows': 2}),
}
class MCAnswerForm(ModelForm):
class Meta:
model = MCAnswer
fields = {'student_choices'}
widgets = {'student_choice': SelectMultiple(choices='student_choices')}
|
mit
| -5,302,869,212,898,328,000 | -7,780,997,164,554,181,000 | 32.888889 | 79 | 0.495082 | false |
ikool/metact06-djan
|
lib/Crypto/Signature/__init__.py
|
126
|
1202
|
# -*- coding: utf-8 -*-
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Digital signature protocols
A collection of standardized protocols to carry out digital signatures.
:undocumented: __revision__, __package__
"""
__all__ = [ 'PKCS1_v1_5', 'PKCS1_PSS' ]
__revision__ = "$Id$"
|
bsd-3-clause
| -9,096,344,193,010,320,000 | -8,400,893,089,098,710,000 | 37.774194 | 71 | 0.658902 | false |
illicitonion/givabit
|
lib/sdks/google_appengine_1.7.1/google_appengine/lib/django_1_2/django/contrib/admin/views/decorators.py
|
45
|
3276
|
import base64
try:
from functools import wraps
except ImportError:
from django.utils.functional import wraps # Python 2.4 fallback.
from django import http, template
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login
from django.shortcuts import render_to_response
from django.utils.translation import ugettext_lazy, ugettext as _
ERROR_MESSAGE = ugettext_lazy("Please enter a correct username and password. Note that both fields are case-sensitive.")
LOGIN_FORM_KEY = 'this_is_the_login_form'
def _display_login_form(request, error_message=''):
request.session.set_test_cookie()
return render_to_response('admin/login.html', {
'title': _('Log in'),
'app_path': request.get_full_path(),
'error_message': error_message
}, context_instance=template.RequestContext(request))
def staff_member_required(view_func):
"""
Decorator for views that checks that the user is logged in and is a staff
member, displaying the login page if necessary.
"""
def _checklogin(request, *args, **kwargs):
if request.user.is_active and request.user.is_staff:
# The user is valid. Continue to the admin page.
return view_func(request, *args, **kwargs)
assert hasattr(request, 'session'), "The Django admin requires session middleware to be installed. Edit your MIDDLEWARE_CLASSES setting to insert 'django.contrib.sessions.middleware.SessionMiddleware'."
# If this isn't already the login page, display it.
if LOGIN_FORM_KEY not in request.POST:
if request.POST:
message = _("Please log in again, because your session has expired.")
else:
message = ""
return _display_login_form(request, message)
# Check that the user accepts cookies.
if not request.session.test_cookie_worked():
message = _("Looks like your browser isn't configured to accept cookies. Please enable cookies, reload this page, and try again.")
return _display_login_form(request, message)
else:
request.session.delete_test_cookie()
# Check the password.
username = request.POST.get('username', None)
password = request.POST.get('password', None)
user = authenticate(username=username, password=password)
if user is None:
message = ERROR_MESSAGE
if '@' in username:
# Mistakenly entered e-mail address instead of username? Look it up.
users = list(User.objects.filter(email=username))
if len(users) == 1 and users[0].check_password(password):
message = _("Your e-mail address is not your username. Try '%s' instead.") % users[0].username
return _display_login_form(request, message)
# The user data is correct; log in the user in and continue.
else:
if user.is_active and user.is_staff:
login(request, user)
return http.HttpResponseRedirect(request.get_full_path())
else:
return _display_login_form(request, ERROR_MESSAGE)
return wraps(view_func)(_checklogin)
|
apache-2.0
| 9,026,132,911,823,293,000 | -6,678,664,048,672,757,000 | 43.876712 | 210 | 0.654151 | false |
michael-yin/scrapy
|
scrapy/tests/test_log.py
|
10
|
4667
|
from cStringIO import StringIO
from twisted.python import log as txlog, failure
from twisted.trial import unittest
from scrapy import log
from scrapy.spider import BaseSpider
from scrapy.settings import default_settings
class LogTest(unittest.TestCase):
def test_get_log_level(self):
default_log_level = getattr(log, default_settings.LOG_LEVEL)
self.assertEqual(log._get_log_level('WARNING'), log.WARNING)
self.assertEqual(log._get_log_level(log.WARNING), log.WARNING)
self.assertRaises(ValueError, log._get_log_level, object())
class ScrapyFileLogObserverTest(unittest.TestCase):
level = log.INFO
encoding = 'utf-8'
def setUp(self):
self.f = StringIO()
self.sflo = log.ScrapyFileLogObserver(self.f, self.level, self.encoding)
self.sflo.start()
def tearDown(self):
self.flushLoggedErrors()
self.sflo.stop()
def logged(self):
return self.f.getvalue().strip()[25:]
def first_log_line(self):
logged = self.logged()
return logged.splitlines()[0] if logged else ''
def test_msg_basic(self):
log.msg("Hello")
self.assertEqual(self.logged(), "[scrapy] INFO: Hello")
def test_msg_spider(self):
spider = BaseSpider("myspider")
log.msg("Hello", spider=spider)
self.assertEqual(self.logged(), "[myspider] INFO: Hello")
def test_msg_level1(self):
log.msg("Hello", level=log.WARNING)
self.assertEqual(self.logged(), "[scrapy] WARNING: Hello")
def test_msg_level2(self):
log.msg("Hello", log.WARNING)
self.assertEqual(self.logged(), "[scrapy] WARNING: Hello")
def test_msg_wrong_level(self):
log.msg("Hello", level=9999)
self.assertEqual(self.logged(), "[scrapy] NOLEVEL: Hello")
def test_msg_level_spider(self):
spider = BaseSpider("myspider")
log.msg("Hello", spider=spider, level=log.WARNING)
self.assertEqual(self.logged(), "[myspider] WARNING: Hello")
def test_msg_encoding(self):
log.msg(u"Price: \xa3100")
self.assertEqual(self.logged(), "[scrapy] INFO: Price: \xc2\xa3100")
def test_msg_ignore_level(self):
log.msg("Hello", level=log.DEBUG)
log.msg("World", level=log.INFO)
self.assertEqual(self.logged(), "[scrapy] INFO: World")
def test_msg_ignore_system(self):
txlog.msg("Hello")
self.failIf(self.logged())
def test_msg_ignore_system_err(self):
txlog.msg("Hello")
self.failIf(self.logged())
def test_err_noargs(self):
try:
a = 1/0
except:
log.err()
self.failUnless('Traceback' in self.logged())
self.failUnless('ZeroDivisionError' in self.logged())
def test_err_why(self):
log.err(TypeError("bad type"), "Wrong type")
self.assertEqual(self.first_log_line(), "[scrapy] ERROR: Wrong type")
self.failUnless('TypeError' in self.logged())
self.failUnless('bad type' in self.logged())
def test_error_outside_scrapy(self):
"""Scrapy logger should still print outside errors"""
txlog.err(TypeError("bad type"), "Wrong type")
self.assertEqual(self.first_log_line(), "[-] ERROR: Wrong type")
self.failUnless('TypeError' in self.logged())
self.failUnless('bad type' in self.logged())
# this test fails in twisted trial observer, not in scrapy observer
# def test_err_why_encoding(self):
# log.err(TypeError("bad type"), u"\xa3")
# self.assertEqual(self.first_log_line(), "[scrapy] ERROR: \xc2\xa3")
def test_err_exc(self):
log.err(TypeError("bad type"))
self.failUnless('Unhandled Error' in self.logged())
self.failUnless('TypeError' in self.logged())
self.failUnless('bad type' in self.logged())
def test_err_failure(self):
log.err(failure.Failure(TypeError("bad type")))
self.failUnless('Unhandled Error' in self.logged())
self.failUnless('TypeError' in self.logged())
self.failUnless('bad type' in self.logged())
class Latin1ScrapyFileLogObserverTest(ScrapyFileLogObserverTest):
encoding = 'latin-1'
def test_msg_encoding(self):
log.msg(u"Price: \xa3100")
logged = self.f.getvalue().strip()[25:]
self.assertEqual(self.logged(), "[scrapy] INFO: Price: \xa3100")
# this test fails in twisted trial observer, not in scrapy observer
# def test_err_why_encoding(self):
# log.err(TypeError("bad type"), u"\xa3")
# self.assertEqual(self.first_log_line(), "[scrapy] ERROR: \xa3")
if __name__ == "__main__":
unittest.main()
|
bsd-3-clause
| 1,839,458,463,162,321,200 | -6,539,169,652,707,282,000 | 33.065693 | 80 | 0.635312 | false |
rriggio/empower-runtime
|
empower/apps/wifimobilitymanager/wifimobilitymanager.py
|
1
|
1790
|
#!/usr/bin/env python3
#
# Copyright (c) 2019 Roberto Riggio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A simple Wi-Fi mobility manager."""
from empower.managers.ranmanager.lvapp.wifiapp import EWiFiApp
from empower.core.app import EVERY
class WiFiMobilityManager(EWiFiApp):
"""A simple Wi-Fi mobility manager.
This app will peridodically handover every LVAP in the network to the
interface with the highest RSSI.
Parameters:
service_id: the application id as an UUID (mandatory)
project_id: the project id as an UUID (mandatory)
every: the loop period in ms (optional, default 2000ms)
Example:
POST /api/v1/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26/apps
{
"name": "empower.apps.wifimobilitymanager.wifimobilitymanager",
"params": {
"every": 2000
}
}
"""
def loop(self):
"""Periodic job."""
for lvap in self.lvaps.values():
lvap.blocks = self.blocks().sort_by_rssi(lvap.addr).first()
def launch(context, service_id, every=EVERY):
""" Initialize the module. """
return WiFiMobilityManager(context=context,
service_id=service_id,
every=every)
|
apache-2.0
| 905,891,528,787,407,000 | 4,940,143,847,453,849,000 | 30.403509 | 75 | 0.660894 | false |
twoh/leevee
|
env/Lib/encodings/zlib_codec.py
|
58
|
3048
|
""" Python 'zlib_codec' Codec - zlib compression encoding
Unlike most of the other codecs which target Unicode, this codec
will return Python string objects for both encode and decode.
Written by Marc-Andre Lemburg ([email protected]).
"""
import codecs
import zlib # this codec needs the optional zlib module !
### Codec APIs
def zlib_encode(input,errors='strict'):
""" Encodes the object input and returns a tuple (output
object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = zlib.compress(input)
return (output, len(input))
def zlib_decode(input,errors='strict'):
""" Decodes the object input and returns a tuple (output
object, length consumed).
input must be an object which provides the bf_getreadbuf
buffer slot. Python strings, buffer objects and memory
mapped files are examples of objects providing this slot.
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = zlib.decompress(input)
return (output, len(input))
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
return zlib_encode(input, errors)
def decode(self, input, errors='strict'):
return zlib_decode(input, errors)
class IncrementalEncoder(codecs.IncrementalEncoder):
def __init__(self, errors='strict'):
assert errors == 'strict'
self.errors = errors
self.compressobj = zlib.compressobj()
def encode(self, input, final=False):
if final:
c = self.compressobj.compress(input)
return c + self.compressobj.flush()
else:
return self.compressobj.compress(input)
def reset(self):
self.compressobj = zlib.compressobj()
class IncrementalDecoder(codecs.IncrementalDecoder):
def __init__(self, errors='strict'):
assert errors == 'strict'
self.errors = errors
self.decompressobj = zlib.decompressobj()
def decode(self, input, final=False):
if final:
c = self.decompressobj.decompress(input)
return c + self.decompressobj.flush()
else:
return self.decompressobj.decompress(input)
def reset(self):
self.decompressobj = zlib.decompressobj()
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='zlib',
encode=zlib_encode,
decode=zlib_decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
_is_text_encoding=False,
)
|
mit
| 892,217,091,877,655,200 | -5,908,398,837,223,673,000 | 28.592233 | 68 | 0.663386 | false |
HardTacos/techmodnotify-reddit-bot
|
techmodnotify.py
|
1
|
6960
|
#!/usr/bin/env python2.7
# =============================================================================
# IMPORTS
# =============================================================================
import praw
import MySQLdb
import ConfigParser
import time
import parsedatetime.parsedatetime as pdt
import pprint
import logging
from datetime import datetime, timedelta
from requests.exceptions import HTTPError, ConnectionError, Timeout
from praw.errors import ExceptionList, APIException, InvalidCaptcha, InvalidUser, RateLimitExceeded
from socket import timeout
from pytz import timezone
from multiprocessing import Process
# =============================================================================
# GLOBALS
# =============================================================================
# Reads the config file
config = ConfigParser.ConfigParser()
config.read("techmodnotify.cfg")
# Reddit info
user_agent = ("TechModNotify bot by /u/zathegfx")
reddit = praw.Reddit(user_agent = user_agent)
USER = config.get("Reddit", "username")
PASS = config.get("Reddit", "password")
DB_HOST = config.get("SQL", "host")
DB_NAME = config.get("SQL", "db")
DB_USER = config.get("SQL", "user")
DB_PASS = config.get("SQL", "pass")
DB_TABLE = config.get("SQL", "table")
# =============================================================================
# Functions
# =============================================================================
def save_to_db(db, submissionID, permalink, author):
"""
Saves the permalink submission, the time, and the author to the DB
"""
cursor = db.cursor()
currentTime1 = datetime.now(timezone('UTC'))
currentTime = format(currentTime1, '%Y-%m-%d %H:%M:%S')
replyTime1 = currentTime1 + timedelta(0,300)
replyTime = format(replyTime1, '%Y-%m-%d %H:%M:%S')
cmd = "SELECT * FROM " + DB_TABLE + " WHERE submissionID = %s"
cursor.execute(cmd, [submissionID])
results = cursor.fetchall()
if (len(results) > 0):
return True;
else:
cmd = "INSERT INTO " + DB_TABLE + " (submissionID, currentTime, replyTime, permalink, author) VALUES (%s, %s, %s, %s, %s)"
cursor.execute(cmd, [submissionID, currentTime, replyTime, permalink, author])
print currentTime + ' - Inserted new record into table: ' + submissionID
db.commit()
def search_db():
"""
Search the database for any records that are over 10 minutes
"""
while True:
db = MySQLdb.connect(DB_HOST, DB_USER, DB_PASS, DB_NAME )
cursor = db.cursor()
currentTime1 = datetime.now(timezone('UTC'))
currentTime = format(currentTime1, '%Y-%m-%d %H:%M:%S')
cmd = "SELECT * FROM " + DB_TABLE + " WHERE replyTime < %s"
cursor.execute(cmd, [currentTime])
results = cursor.fetchall()
alreadySent = []
if ( len(results) > 0 ):
for row in results:
if row[0] not in alreadySent:
submission = reddit.get_submission(submission_id=row[1])
hasFlair = submission.link_flair_css_class
k = str(hasFlair)
if (k == "None"):
flagDelete = False
flagDelete = new_reply(row[4], row[5])
if flagDelete:
cmd = "DELETE FROM " + DB_TABLE + " WHERE id = %s"
cursor.execute(cmd, [row[0]])
db.commit()
print currentTime + ' - No flair detected - send message - deleting record - ' + row[1]
else:
cmd = "DELETE FROM " + DB_TABLE + " WHERE id = %s"
cursor.execute(cmd, [row[0]])
db.commit()
print currentTime + ' - Flair deteced - deleting record - ' + row[1]
alreadySent.append(row[0])
time.sleep(5)
def new_reply(permalink, author):
reddit.login(USER, PASS)
try:
reddit.send_message(author, 'Message from /r/technology',
"Hello " + author + ","
"\n\nWe appreciate your contribution to /r/technology! We noticed "
"that you haven't flaired your [post](" + permalink + ") yet. In order to keep this sub "
"organized, every post is required to be flaired with respect to "
"the articles main focus. This allows the readers of /r/technology "
"to easily find the articles that most interest them. "
"\n\n If you could take a moment out of your time to properly flair "
"your post, we would gladly apprieciate it. Instruction on properly "
"flairing your post can be found [here](http://www.reddit.com/r/technology/wiki/flair). "
"\n\n Thank you!"
"\n\n Techonology Mod Team"
"\n\n_____\n\n"
"\n\n *This is a bot - if you have any questions or need to report an issue regarding "
"this bot, please [message the mods](https://www.reddit.com/message/compose?to=%2Fr%2Ftechnology) immediately*"
"\n\n**Your Post:** " + permalink + "")
print "Message Sent!"
return True
except InvalidUser as err:
print "InvalidUser", err
return True
except APIException as err:
print "APIException", err
return False
except IndexError as err:
print "IndexError", err
return False
except (HTTPError, ConnectionError, Timeout, timeout) as err:
print "HTTPError", err
return False
except RateLimitExceeded as err:
print "RateLimitExceeded", err
time.sleep(10)
def main():
reddit.login(USER, PASS)
db = MySQLdb.connect(DB_HOST, DB_USER, DB_PASS, DB_NAME )
print "start"
while True:
try:
for submission in praw.helpers.submission_stream(reddit, 'technology', limit=5, verbosity=0):
submissionID = submission.id
author = submission.author
permalink = submission.permalink
save_to_db(db, submissionID, permalink, author)
except Exception as err:
print 'There was an error in main(): '
print err
# =============================================================================
# RUNNER
# =============================================================================
if __name__ == '__main__':
Process(target=main).start()
Process(target=search_db).start()
|
mit
| 6,753,448,824,223,000,000 | -4,978,185,619,096,697,000 | 37.666667 | 130 | 0.500144 | false |
ShaguptaS/moviepy
|
moviepy/video/compositing/CompositeVideoClip.py
|
1
|
4062
|
import numpy as np
from moviepy.video.VideoClip import VideoClip, ColorClip
from moviepy.audio.AudioClip import CompositeAudioClip
# CompositeVideoClip
class CompositeVideoClip(VideoClip):
"""
A VideoClip made of other videoclips displayed together. This is the
base class for most compositions.
:param size: The size (height x width) of the final clip.
:param clips: A list of videoclips. Each clip of the list will
be displayed below the clips appearing after it in the list.
For each clip:
- The attribute ``pos`` determines where the clip is placed.
See ``VideoClip.set_pos``
- The mask of the clip determines which parts are visible.
Finally, if all the clips in the list have their ``duration``
attribute set, then the duration of the composite video clip
is computed automatically
:param transparent: if False, the clips are overlaid on a surface
of the color `bg_color`. If True, the clips are overlaid on
a transparent surface, so that all pixels that are transparent
for all clips will be transparent in the composite clip. More
precisely, the mask of the composite clip is then the composite
of the masks of the different clips. Only use `transparent=True`
when you intend to use your composite clip as part of another
composite clip and you care about its transparency.
"""
def __init__(self, clips, size=None, bg_color=None, transparent=False,
ismask=False):
if size is None:
size = clips[0].size
if bg_color is None:
bg_color = 0.0 if ismask else (0, 0, 0)
VideoClip.__init__(self)
self.size = size
self.ismask = ismask
self.clips = clips
self.transparent = transparent
self.bg_color = bg_color
self.bg = ColorClip(size, col=self.bg_color).get_frame(0)
# compute duration
ends = [c.end for c in self.clips]
if not any([(e is None) for e in ends]):
self.duration = max(ends)
# compute audio
audioclips = [v.audio for v in self.clips if v.audio != None]
if len(audioclips) > 0:
self.audio = CompositeAudioClip(audioclips)
# compute mask
if transparent:
maskclips = [c.mask.set_pos(c.pos) for c in self.clips]
self.mask = CompositeVideoClip(maskclips,self.size,
transparent=False, ismask=True)
def gf(t):
""" The clips playing at time `t` are blitted over one
another. """
f = self.bg
for c in self.playing_clips(t):
f = c.blit_on(f, t)
return f
self.get_frame = gf
def playing_clips(self, t=0):
""" Returns a list of the clips in the composite clips that are
actually playing at the given time `t`. """
return [c for c in self.clips if c.is_playing(t)]
def clips_array(array, rows_widths=None, cols_widths=None,
transparent = True, bg_color = (0,0,0)):
array = np.array(array)
sizes_array = np.vectorize(lambda c:c.size)(array)
if rows_widths == None:
rows_widths = sizes_array.max(axis=0)
if cols_widths == None:
cols_widths = sizes_array.max(axis=1)
xx = np.cumsum([0]+list(cols_width))
yy = np.cumsum([0]+list(rows_width))
for j,(x,rw) in enumerate(zip(xx,cols_width)):
for i,(y,cw) in enumerate(zip(yy,cols_width)):
clip = array[i,j]
w,h = clip.size
if (w < cw) or (h < rw):
clip = CompositeClip([clip], size = (cw,rw),
transparent = True, bg_color = (0,0,0))
array[i,j] = clip.set_pos((x,y))
return CompositeVideoClip(array.flatten(),
size = (xx[-1],yy[-1]))
|
mit
| 3,967,987,274,014,277,600 | -3,311,567,000,389,422,000 | 34.321739 | 74 | 0.575332 | false |
eusi/MissionPlanerHM
|
Lib/site-packages/scipy/signal/setup.py
|
51
|
1049
|
#!"C:\Users\hog\Documents\Visual Studio 2010\Projects\ArdupilotMega\ArdupilotMega\bin\Debug\ipy.exe"
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('signal', parent_package, top_path)
config.add_data_dir('tests')
config.add_extension('sigtools',
sources=['sigtoolsmodule.c', 'sigtools.c',
'firfilter.c','medianfilter.c',
'lfilter.c.src', 'correlate_nd.c.src'],
depends = ['sigtools.h'],
include_dirs=['.']
)
config.add_extension('spectral', sources=['spectral.c'])
config.add_extension('spline',
sources = ['splinemodule.c','S_bspline_util.c','D_bspline_util.c',
'C_bspline_util.c','Z_bspline_util.c','bspline_util.c'],
)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
gpl-3.0
| -3,923,107,751,466,862,000 | 5,280,501,599,114,684,000 | 35.172414 | 100 | 0.57388 | false |
yfried/ansible
|
lib/ansible/modules/cloud/rackspace/rax_files_objects.py
|
102
|
18489
|
#!/usr/bin/python
# (c) 2013, Paul Durivage <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rax_files_objects
short_description: Upload, download, and delete objects in Rackspace Cloud Files
description:
- Upload, download, and delete objects in Rackspace Cloud Files
version_added: "1.5"
options:
clear_meta:
description:
- Optionally clear existing metadata when applying metadata to existing objects.
Selecting this option is only appropriate when setting type=meta
type: bool
default: 'no'
container:
description:
- The container to use for file object operations.
required: true
dest:
description:
- The destination of a "get" operation; i.e. a local directory, "/home/user/myfolder".
Used to specify the destination of an operation on a remote object; i.e. a file name,
"file1", or a comma-separated list of remote objects, "file1,file2,file17"
expires:
description:
- Used to set an expiration on a file or folder uploaded to Cloud Files.
Requires an integer, specifying expiration in seconds
meta:
description:
- A hash of items to set as metadata values on an uploaded file or folder
method:
description:
- The method of operation to be performed. For example, put to upload files
to Cloud Files, get to download files from Cloud Files or delete to delete
remote objects in Cloud Files
choices:
- get
- put
- delete
default: get
src:
description:
- Source from which to upload files. Used to specify a remote object as a source for
an operation, i.e. a file name, "file1", or a comma-separated list of remote objects,
"file1,file2,file17". src and dest are mutually exclusive on remote-only object operations
structure:
description:
- Used to specify whether to maintain nested directory structure when downloading objects
from Cloud Files. Setting to false downloads the contents of a container to a single,
flat directory
type: bool
default: 'yes'
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
type:
description:
- Type of object to do work on
- Metadata object or a file object
choices:
- file
- meta
default: file
author: "Paul Durivage (@angstwad)"
extends_documentation_fragment:
- rackspace
- rackspace.openstack
'''
EXAMPLES = '''
- name: "Test Cloud Files Objects"
hosts: local
gather_facts: False
tasks:
- name: "Get objects from test container"
rax_files_objects:
container: testcont
dest: ~/Downloads/testcont
- name: "Get single object from test container"
rax_files_objects:
container: testcont
src: file1
dest: ~/Downloads/testcont
- name: "Get several objects from test container"
rax_files_objects:
container: testcont
src: file1,file2,file3
dest: ~/Downloads/testcont
- name: "Delete one object in test container"
rax_files_objects:
container: testcont
method: delete
dest: file1
- name: "Delete several objects in test container"
rax_files_objects:
container: testcont
method: delete
dest: file2,file3,file4
- name: "Delete all objects in test container"
rax_files_objects:
container: testcont
method: delete
- name: "Upload all files to test container"
rax_files_objects:
container: testcont
method: put
src: ~/Downloads/onehundred
- name: "Upload one file to test container"
rax_files_objects:
container: testcont
method: put
src: ~/Downloads/testcont/file1
- name: "Upload one file to test container with metadata"
rax_files_objects:
container: testcont
src: ~/Downloads/testcont/file2
method: put
meta:
testkey: testdata
who_uploaded_this: [email protected]
- name: "Upload one file to test container with TTL of 60 seconds"
rax_files_objects:
container: testcont
method: put
src: ~/Downloads/testcont/file3
expires: 60
- name: "Attempt to get remote object that does not exist"
rax_files_objects:
container: testcont
method: get
src: FileThatDoesNotExist.jpg
dest: ~/Downloads/testcont
ignore_errors: yes
- name: "Attempt to delete remote object that does not exist"
rax_files_objects:
container: testcont
method: delete
dest: FileThatDoesNotExist.jpg
ignore_errors: yes
- name: "Test Cloud Files Objects Metadata"
hosts: local
gather_facts: false
tasks:
- name: "Get metadata on one object"
rax_files_objects:
container: testcont
type: meta
dest: file2
- name: "Get metadata on several objects"
rax_files_objects:
container: testcont
type: meta
src: file2,file1
- name: "Set metadata on an object"
rax_files_objects:
container: testcont
type: meta
dest: file17
method: put
meta:
key1: value1
key2: value2
clear_meta: true
- name: "Verify metadata is set"
rax_files_objects:
container: testcont
type: meta
src: file17
- name: "Delete metadata"
rax_files_objects:
container: testcont
type: meta
dest: file17
method: delete
meta:
key1: ''
key2: ''
- name: "Get metadata on all objects"
rax_files_objects:
container: testcont
type: meta
'''
import os
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
EXIT_DICT = dict(success=False)
META_PREFIX = 'x-object-meta-'
def _get_container(module, cf, container):
try:
return cf.get_container(container)
except pyrax.exc.NoSuchContainer as e:
module.fail_json(msg=e.message)
def _upload_folder(cf, folder, container, ttl=None, headers=None):
""" Uploads a folder to Cloud Files.
"""
total_bytes = 0
for root, dirs, files in os.walk(folder):
for fname in files:
full_path = os.path.join(root, fname)
obj_name = os.path.relpath(full_path, folder)
obj_size = os.path.getsize(full_path)
cf.upload_file(container, full_path,
obj_name=obj_name, return_none=True, ttl=ttl, headers=headers)
total_bytes += obj_size
return total_bytes
def upload(module, cf, container, src, dest, meta, expires):
""" Uploads a single object or a folder to Cloud Files Optionally sets an
metadata, TTL value (expires), or Content-Disposition and Content-Encoding
headers.
"""
if not src:
module.fail_json(msg='src must be specified when uploading')
c = _get_container(module, cf, container)
src = os.path.abspath(os.path.expanduser(src))
is_dir = os.path.isdir(src)
if not is_dir and not os.path.isfile(src) or not os.path.exists(src):
module.fail_json(msg='src must be a file or a directory')
if dest and is_dir:
module.fail_json(msg='dest cannot be set when whole '
'directories are uploaded')
cont_obj = None
total_bytes = 0
if dest and not is_dir:
try:
cont_obj = c.upload_file(src, obj_name=dest, ttl=expires, headers=meta)
except Exception as e:
module.fail_json(msg=e.message)
elif is_dir:
try:
total_bytes = _upload_folder(cf, src, c, ttl=expires, headers=meta)
except Exception as e:
module.fail_json(msg=e.message)
else:
try:
cont_obj = c.upload_file(src, ttl=expires, headers=meta)
except Exception as e:
module.fail_json(msg=e.message)
EXIT_DICT['success'] = True
EXIT_DICT['container'] = c.name
EXIT_DICT['msg'] = "Uploaded %s to container: %s" % (src, c.name)
if cont_obj or total_bytes > 0:
EXIT_DICT['changed'] = True
if meta:
EXIT_DICT['meta'] = dict(updated=True)
if cont_obj:
EXIT_DICT['bytes'] = cont_obj.total_bytes
EXIT_DICT['etag'] = cont_obj.etag
else:
EXIT_DICT['bytes'] = total_bytes
module.exit_json(**EXIT_DICT)
def download(module, cf, container, src, dest, structure):
""" Download objects from Cloud Files to a local path specified by "dest".
Optionally disable maintaining a directory structure by by passing a
false value to "structure".
"""
# Looking for an explicit destination
if not dest:
module.fail_json(msg='dest is a required argument when '
'downloading from Cloud Files')
# Attempt to fetch the container by name
c = _get_container(module, cf, container)
# Accept a single object name or a comma-separated list of objs
# If not specified, get the entire container
if src:
objs = src.split(',')
objs = map(str.strip, objs)
else:
objs = c.get_object_names()
dest = os.path.abspath(os.path.expanduser(dest))
is_dir = os.path.isdir(dest)
if not is_dir:
module.fail_json(msg='dest must be a directory')
results = []
for obj in objs:
try:
c.download_object(obj, dest, structure=structure)
except Exception as e:
module.fail_json(msg=e.message)
else:
results.append(obj)
len_results = len(results)
len_objs = len(objs)
EXIT_DICT['container'] = c.name
EXIT_DICT['requested_downloaded'] = results
if results:
EXIT_DICT['changed'] = True
if len_results == len_objs:
EXIT_DICT['success'] = True
EXIT_DICT['msg'] = "%s objects downloaded to %s" % (len_results, dest)
else:
EXIT_DICT['msg'] = "Error: only %s of %s objects were " \
"downloaded" % (len_results, len_objs)
module.exit_json(**EXIT_DICT)
def delete(module, cf, container, src, dest):
""" Delete specific objects by proving a single file name or a
comma-separated list to src OR dest (but not both). Omitting file name(s)
assumes the entire container is to be deleted.
"""
objs = None
if src and dest:
module.fail_json(msg="Error: ambiguous instructions; files to be deleted "
"have been specified on both src and dest args")
elif dest:
objs = dest
else:
objs = src
c = _get_container(module, cf, container)
if objs:
objs = objs.split(',')
objs = map(str.strip, objs)
else:
objs = c.get_object_names()
num_objs = len(objs)
results = []
for obj in objs:
try:
result = c.delete_object(obj)
except Exception as e:
module.fail_json(msg=e.message)
else:
results.append(result)
num_deleted = results.count(True)
EXIT_DICT['container'] = c.name
EXIT_DICT['deleted'] = num_deleted
EXIT_DICT['requested_deleted'] = objs
if num_deleted:
EXIT_DICT['changed'] = True
if num_objs == num_deleted:
EXIT_DICT['success'] = True
EXIT_DICT['msg'] = "%s objects deleted" % num_deleted
else:
EXIT_DICT['msg'] = ("Error: only %s of %s objects "
"deleted" % (num_deleted, num_objs))
module.exit_json(**EXIT_DICT)
def get_meta(module, cf, container, src, dest):
""" Get metadata for a single file, comma-separated list, or entire
container
"""
c = _get_container(module, cf, container)
objs = None
if src and dest:
module.fail_json(msg="Error: ambiguous instructions; files to be deleted "
"have been specified on both src and dest args")
elif dest:
objs = dest
else:
objs = src
if objs:
objs = objs.split(',')
objs = map(str.strip, objs)
else:
objs = c.get_object_names()
results = dict()
for obj in objs:
try:
meta = c.get_object(obj).get_metadata()
except Exception as e:
module.fail_json(msg=e.message)
else:
results[obj] = dict()
for k, v in meta.items():
meta_key = k.split(META_PREFIX)[-1]
results[obj][meta_key] = v
EXIT_DICT['container'] = c.name
if results:
EXIT_DICT['meta_results'] = results
EXIT_DICT['success'] = True
module.exit_json(**EXIT_DICT)
def put_meta(module, cf, container, src, dest, meta, clear_meta):
""" Set metadata on a container, single file, or comma-separated list.
Passing a true value to clear_meta clears the metadata stored in Cloud
Files before setting the new metadata to the value of "meta".
"""
objs = None
if src and dest:
module.fail_json(msg="Error: ambiguous instructions; files to set meta"
" have been specified on both src and dest args")
elif dest:
objs = dest
else:
objs = src
objs = objs.split(',')
objs = map(str.strip, objs)
c = _get_container(module, cf, container)
results = []
for obj in objs:
try:
result = c.get_object(obj).set_metadata(meta, clear=clear_meta)
except Exception as e:
module.fail_json(msg=e.message)
else:
results.append(result)
EXIT_DICT['container'] = c.name
EXIT_DICT['success'] = True
if results:
EXIT_DICT['changed'] = True
EXIT_DICT['num_changed'] = True
module.exit_json(**EXIT_DICT)
def delete_meta(module, cf, container, src, dest, meta):
""" Removes metadata keys and values specified in meta, if any. Deletes on
all objects specified by src or dest (but not both), if any; otherwise it
deletes keys on all objects in the container
"""
objs = None
if src and dest:
module.fail_json(msg="Error: ambiguous instructions; meta keys to be "
"deleted have been specified on both src and dest"
" args")
elif dest:
objs = dest
else:
objs = src
objs = objs.split(',')
objs = map(str.strip, objs)
c = _get_container(module, cf, container)
results = [] # Num of metadata keys removed, not objects affected
for obj in objs:
if meta:
for k, v in meta.items():
try:
result = c.get_object(obj).remove_metadata_key(k)
except Exception as e:
module.fail_json(msg=e.message)
else:
results.append(result)
else:
try:
o = c.get_object(obj)
except pyrax.exc.NoSuchObject as e:
module.fail_json(msg=e.message)
for k, v in o.get_metadata().items():
try:
result = o.remove_metadata_key(k)
except Exception as e:
module.fail_json(msg=e.message)
results.append(result)
EXIT_DICT['container'] = c.name
EXIT_DICT['success'] = True
if results:
EXIT_DICT['changed'] = True
EXIT_DICT['num_deleted'] = len(results)
module.exit_json(**EXIT_DICT)
def cloudfiles(module, container, src, dest, method, typ, meta, clear_meta,
structure, expires):
""" Dispatch from here to work with metadata or file objects """
cf = pyrax.cloudfiles
if cf is None:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
if typ == "file":
if method == 'put':
upload(module, cf, container, src, dest, meta, expires)
elif method == 'get':
download(module, cf, container, src, dest, structure)
elif method == 'delete':
delete(module, cf, container, src, dest)
else:
if method == 'get':
get_meta(module, cf, container, src, dest)
if method == 'put':
put_meta(module, cf, container, src, dest, meta, clear_meta)
if method == 'delete':
delete_meta(module, cf, container, src, dest, meta)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
container=dict(required=True),
src=dict(),
dest=dict(),
method=dict(default='get', choices=['put', 'get', 'delete']),
type=dict(default='file', choices=['file', 'meta']),
meta=dict(type='dict', default=dict()),
clear_meta=dict(default=False, type='bool'),
structure=dict(default=True, type='bool'),
expires=dict(type='int'),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together()
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
container = module.params.get('container')
src = module.params.get('src')
dest = module.params.get('dest')
method = module.params.get('method')
typ = module.params.get('type')
meta = module.params.get('meta')
clear_meta = module.params.get('clear_meta')
structure = module.params.get('structure')
expires = module.params.get('expires')
if clear_meta and not typ == 'meta':
module.fail_json(msg='clear_meta can only be used when setting metadata')
setup_rax_module(module, pyrax)
cloudfiles(module, container, src, dest, method, typ, meta, clear_meta, structure, expires)
if __name__ == '__main__':
main()
|
gpl-3.0
| -1,316,629,408,569,823,000 | 605,995,842,769,830,000 | 29.260229 | 99 | 0.597761 | false |
cledio66/pyglet
|
contrib/layout/layout/Plex/Actions.py
|
32
|
2348
|
#=======================================================================
#
# Python Lexical Analyser
#
# Actions for use in token specifications
#
#=======================================================================
class Action:
def same_as(self, other):
return self is other
class Return(Action):
"""
Internal Plex action which causes |value| to
be returned as the value of the associated token
"""
value = None
def __init__(self, value):
self.value = value
def perform(self, token_stream, text):
return self.value
def same_as(self, other):
return isinstance(other, Return) and self.value == other.value
def __repr__(self):
return "Return(%s)" % repr(self.value)
class Call(Action):
"""
Internal Plex action which causes a function to be called.
"""
function = None
def __init__(self, function):
self.function = function
def perform(self, token_stream, text):
return self.function(token_stream, text)
def __repr__(self):
return "Call(%s)" % self.function.__name__
def same_as(self, other):
return isinstance(other, Call) and self.function is other.function
class Begin(Action):
"""
Begin(state_name) is a Plex action which causes the Scanner to
enter the state |state_name|. See the docstring of Plex.Lexicon
for more information.
"""
state_name = None
def __init__(self, state_name):
self.state_name = state_name
def perform(self, token_stream, text):
token_stream.begin(self.state_name)
def __repr__(self):
return "Begin(%s)" % self.state_name
def same_as(self, other):
return isinstance(other, Begin) and self.state_name == other.state_name
class Ignore(Action):
"""
IGNORE is a Plex action which causes its associated token
to be ignored. See the docstring of Plex.Lexicon for more
information.
"""
def perform(self, token_stream, text):
return None
def __repr__(self):
return "IGNORE"
IGNORE = Ignore()
IGNORE.__doc__ = Ignore.__doc__
class Text(Action):
"""
TEXT is a Plex action which causes the text of a token to
be returned as the value of the token. See the docstring of
Plex.Lexicon for more information.
"""
def perform(self, token_stream, text):
return text
def __repr__(self):
return "TEXT"
TEXT = Text()
TEXT.__doc__ = Text.__doc__
|
bsd-3-clause
| 8,822,719,194,796,159,000 | 7,008,066,720,242,127,000 | 20.541284 | 75 | 0.620528 | false |
rphlypo/parietalretreat
|
setup_data_path_salma.py
|
1
|
6001
|
import glob
import os.path
from pandas import DataFrame
import pandas
def get_all_paths(data_set=None, root_dir="/"):
# TODO
# if data_set ... collections.Sequence
# iterate over list
if data_set is None:
data_set = {"hcp", "henson2010faces", "ds105", "ds107"}
list_ = list()
head, tail_ = os.path.split(root_dir)
counter = 0
while tail_:
head, tail_ = os.path.split(head)
counter += 1
if hasattr(data_set, "__iter__"):
df_ = list()
for ds in data_set:
df_.append(get_all_paths(data_set=ds, root_dir=root_dir))
df = pandas.concat(df_, keys=data_set)
elif data_set.startswith("ds") or data_set == "henson2010faces":
base_path = os.path.join(root_dir,
"storage/workspace/brainpedia/preproc/",
data_set)
with open(os.path.join(base_path, "scan_key.txt")) as file_:
TR = file_.readline()[3:-1]
for fun_path in glob.iglob(os.path.join(base_path,
"sub*/model/model*/"
"BOLD/task*/bold.nii.gz")):
head, tail_ = os.path.split(fun_path)
tail = [tail_]
while tail_:
head, tail_ = os.path.split(head)
tail.append(tail_)
tail.reverse()
subj_id = tail[6 + counter][-3:]
model = tail[8 + counter][-3:]
task, run = tail[10 + counter].split("_")
tmp_base = os.path.split(os.path.split(fun_path)[0])[0]
anat = os.path.join(tmp_base,
"anatomy",
"highres{}.nii.gz".format(model[-3:]))
onsets = glob.glob(os.path.join(tmp_base, "onsets",
"task{}_run{}".format(task, run),
"cond*.txt"))
confds = os.path.join(os.path.split(fun_path)[0], "motion.txt")
list_.append({"subj_id": subj_id,
"model": model,
"task": task[-3:],
"run": run[-3:],
"func": fun_path,
"anat": anat,
"confds": confds,
"TR": TR})
if onsets:
list_[-1]["onsets"] = onsets
df = DataFrame(list_)
elif data_set == "hcp":
base_path = os.path.join(root_dir, "storage/data/HCP/Q2/")
for fun_path in glob.iglob(os.path.join(base_path,
"*/MNINonLinear/Results/",
"*/*.nii.gz")):
head, tail = os.path.split(fun_path)
if head[-2:] not in ["LR", "RL"]:
continue
tail = [tail]
while head != "/":
head, t = os.path.split(head)
tail.append(t)
if tail[0][:-7] != tail[1]:
continue
tail.reverse()
subj_id = tail[4 + counter]
task = tail[7 + counter][6:-3]
if tail[7 + counter].startswith("rfMRI"):
run = task[-1]
task = task[:-1]
mode = tail[7 + counter][-2:]
anat = os.path.join(base_path, subj_id, "MNINonLinear/T1w.nii.gz")
confds = os.path.join(os.path.split(fun_path)[0],
"Movement_Regressors.txt")
list_.append({"subj_id": subj_id,
"task": task,
"mode": mode,
"func": fun_path,
"anat": anat,
"confds": confds,
"TR": 0.72})
if tail[8 + counter].startswith("rfMRI"):
list_[-1]["run"] = run
else:
onsets = [onset
for onset in glob.glob(os.path.join(
os.path.split(fun_path)[0], "EVs/*.txt"))
if os.path.split(onset)[1][0] != "S"]
list_[-1]["onsets"] = onsets
df = DataFrame(list_)
return df
if __name__ == "__main__":
from nilearn.input_data import MultiNiftiMasker, NiftiMapsMasker
from joblib import Memory, Parallel, delayed
import joblib
from sklearn.base import clone
import nibabel
root_dir = "/media/Elements/volatile/new/salma"
mem = Memory(cachedir=os.path.join(root_dir,
("storage/workspace/brainpedia"
"/preproc/henson2010faces/dump/")))
print "Loading all paths and variables into memory"
df = get_all_paths(root_dir=root_dir,
data_set=["henson2010faces"])
target_affine_ = nibabel.load(df["func"][0]).get_affine()
target_shape_ = nibabel.load(df["func"][0]).shape[:-1]
print "preparing and running MultiNiftiMasker"
mnm = MultiNiftiMasker(mask_strategy="epi", memory=mem, n_jobs=1,
verbose=10, target_affine=target_affine_,
target_shape=target_shape_)
mask_img = mnm.fit(list(df["func"])).mask_img_
print "preparing and running NiftiMapsMasker"
nmm = NiftiMapsMasker(
maps_img=os.path.join("/usr/share/fsl/data/atlases/HarvardOxford/",
"HarvardOxford-cortl-prob-2mm.nii.gz"),
mask_img=mask_img, detrend=True, smoothing_fwhm=5, standardize=True,
low_pass=None, high_pass=None, memory=mem, verbose=10)
region_ts = [clone(nmm).fit_transform(niimg, n_hv_confounds=5)
for niimg in list(df["func"])]
joblib.dump(region_ts, "/home/storage/workspace/rphlypo/retreat/results/")
region_signals = DataFrame({"region_signals": region_ts}, index=df.index)
df.join(region_signals)
|
bsd-2-clause
| -41,176,170,529,656,456 | -4,337,496,976,772,303,400 | 40.386207 | 78 | 0.472421 | false |
sudheesh001/oh-mainline
|
vendor/packages/twisted/twisted/protocols/pcp.py
|
71
|
7090
|
# -*- test-case-name: twisted.test.test_pcp -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Producer-Consumer Proxy.
"""
from zope.interface import implements
from twisted.internet import interfaces
class BasicProducerConsumerProxy:
"""
I can act as a man in the middle between any Producer and Consumer.
@ivar producer: the Producer I subscribe to.
@type producer: L{IProducer<interfaces.IProducer>}
@ivar consumer: the Consumer I publish to.
@type consumer: L{IConsumer<interfaces.IConsumer>}
@ivar paused: As a Producer, am I paused?
@type paused: bool
"""
implements(interfaces.IProducer, interfaces.IConsumer)
consumer = None
producer = None
producerIsStreaming = None
iAmStreaming = True
outstandingPull = False
paused = False
stopped = False
def __init__(self, consumer):
self._buffer = []
if consumer is not None:
self.consumer = consumer
consumer.registerProducer(self, self.iAmStreaming)
# Producer methods:
def pauseProducing(self):
self.paused = True
if self.producer:
self.producer.pauseProducing()
def resumeProducing(self):
self.paused = False
if self._buffer:
# TODO: Check to see if consumer supports writeSeq.
self.consumer.write(''.join(self._buffer))
self._buffer[:] = []
else:
if not self.iAmStreaming:
self.outstandingPull = True
if self.producer is not None:
self.producer.resumeProducing()
def stopProducing(self):
if self.producer is not None:
self.producer.stopProducing()
if self.consumer is not None:
del self.consumer
# Consumer methods:
def write(self, data):
if self.paused or (not self.iAmStreaming and not self.outstandingPull):
# We could use that fifo queue here.
self._buffer.append(data)
elif self.consumer is not None:
self.consumer.write(data)
self.outstandingPull = False
def finish(self):
if self.consumer is not None:
self.consumer.finish()
self.unregisterProducer()
def registerProducer(self, producer, streaming):
self.producer = producer
self.producerIsStreaming = streaming
def unregisterProducer(self):
if self.producer is not None:
del self.producer
del self.producerIsStreaming
if self.consumer:
self.consumer.unregisterProducer()
def __repr__(self):
return '<%s@%x around %s>' % (self.__class__, id(self), self.consumer)
class ProducerConsumerProxy(BasicProducerConsumerProxy):
"""ProducerConsumerProxy with a finite buffer.
When my buffer fills up, I have my parent Producer pause until my buffer
has room in it again.
"""
# Copies much from abstract.FileDescriptor
bufferSize = 2**2**2**2
producerPaused = False
unregistered = False
def pauseProducing(self):
# Does *not* call up to ProducerConsumerProxy to relay the pause
# message through to my parent Producer.
self.paused = True
def resumeProducing(self):
self.paused = False
if self._buffer:
data = ''.join(self._buffer)
bytesSent = self._writeSomeData(data)
if bytesSent < len(data):
unsent = data[bytesSent:]
assert not self.iAmStreaming, (
"Streaming producer did not write all its data.")
self._buffer[:] = [unsent]
else:
self._buffer[:] = []
else:
bytesSent = 0
if (self.unregistered and bytesSent and not self._buffer and
self.consumer is not None):
self.consumer.unregisterProducer()
if not self.iAmStreaming:
self.outstandingPull = not bytesSent
if self.producer is not None:
bytesBuffered = sum([len(s) for s in self._buffer])
# TODO: You can see here the potential for high and low
# watermarks, where bufferSize would be the high mark when we
# ask the upstream producer to pause, and we wouldn't have
# it resume again until it hit the low mark. Or if producer
# is Pull, maybe we'd like to pull from it as much as necessary
# to keep our buffer full to the low mark, so we're never caught
# without something to send.
if self.producerPaused and (bytesBuffered < self.bufferSize):
# Now that our buffer is empty,
self.producerPaused = False
self.producer.resumeProducing()
elif self.outstandingPull:
# I did not have any data to write in response to a pull,
# so I'd better pull some myself.
self.producer.resumeProducing()
def write(self, data):
if self.paused or (not self.iAmStreaming and not self.outstandingPull):
# We could use that fifo queue here.
self._buffer.append(data)
elif self.consumer is not None:
assert not self._buffer, (
"Writing fresh data to consumer before my buffer is empty!")
# I'm going to use _writeSomeData here so that there is only one
# path to self.consumer.write. But it doesn't actually make sense,
# if I am streaming, for some data to not be all data. But maybe I
# am not streaming, but I am writing here anyway, because there was
# an earlier request for data which was not answered.
bytesSent = self._writeSomeData(data)
self.outstandingPull = False
if not bytesSent == len(data):
assert not self.iAmStreaming, (
"Streaming producer did not write all its data.")
self._buffer.append(data[bytesSent:])
if (self.producer is not None) and self.producerIsStreaming:
bytesBuffered = sum([len(s) for s in self._buffer])
if bytesBuffered >= self.bufferSize:
self.producer.pauseProducing()
self.producerPaused = True
def registerProducer(self, producer, streaming):
self.unregistered = False
BasicProducerConsumerProxy.registerProducer(self, producer, streaming)
if not streaming:
producer.resumeProducing()
def unregisterProducer(self):
if self.producer is not None:
del self.producer
del self.producerIsStreaming
self.unregistered = True
if self.consumer and not self._buffer:
self.consumer.unregisterProducer()
def _writeSomeData(self, data):
"""Write as much of this data as possible.
@returns: The number of bytes written.
"""
if self.consumer is None:
return 0
self.consumer.write(data)
return len(data)
|
agpl-3.0
| 7,405,788,284,309,675,000 | 6,463,800,326,080,281,000 | 33.754902 | 79 | 0.609027 | false |
hpcugent/easybuild-framework
|
easybuild/toolchains/gmpolf.py
|
2
|
1847
|
##
# Copyright 2013-2019 Ghent University
#
# This file is triple-licensed under GPLv2 (see below), MIT, and
# BSD three-clause licenses.
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for gmpolf compiler toolchain (includes GCC, MPICH2, OpenBLAS, LAPACK, ScaLAPACK and FFTW).
:author: Dmitri Gribenko (National Technical University of Ukraine "KPI") (copy from...)
:author: Bart Verleye (University of Auckland)
"""
from easybuild.toolchains.gmpich import Gmpich
from easybuild.toolchains.golf import Golf
from easybuild.toolchains.fft.fftw import Fftw
from easybuild.toolchains.linalg.openblas import OpenBLAS
from easybuild.toolchains.linalg.scalapack import ScaLAPACK
class Gmpolf(Gmpich, OpenBLAS, ScaLAPACK, Fftw):
"""Compiler toolchain with GCC, MPICH, OpenBLAS, ScaLAPACK and FFTW."""
NAME = 'gmpolf'
SUBTOOLCHAIN = [Gmpich.NAME, Golf.NAME]
|
gpl-2.0
| 5,136,463,900,206,706,000 | 2,069,974,520,983,750,400 | 40.044444 | 109 | 0.760152 | false |
cuongnv23/ansible
|
lib/ansible/modules/database/postgresql/postgresql_schema.py
|
29
|
8233
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: postgresql_schema
short_description: Add or remove PostgreSQL schema from a remote host
description:
- Add or remove PostgreSQL schema from a remote host.
version_added: "2.3"
options:
name:
description:
- Name of the schema to add or remove.
required: true
default: null
database:
description:
- Name of the database to connect to.
required: false
default: postgres
login_user:
description:
- The username used to authenticate with.
required: false
default: null
login_password:
description:
- The password used to authenticate with.
required: false
default: null
login_host:
description:
- Host running the database.
required: false
default: localhost
login_unix_socket:
description:
- Path to a Unix domain socket for local connections.
required: false
default: null
owner:
description:
- Name of the role to set as owner of the schema.
required: false
default: null
port:
description:
- Database port to connect to.
required: false
default: 5432
state:
description:
- The schema state.
required: false
default: present
choices: [ "present", "absent" ]
notes:
- This module uses I(psycopg2), a Python PostgreSQL database adapter. You must ensure that psycopg2 is installed on
the host before using this module. If the remote host is the PostgreSQL server (which is the default case), then PostgreSQL must also be installed
on the remote host. For Ubuntu-based systems, install the C(postgresql), C(libpq-dev), and C(python-psycopg2) packages on the remote host before
using this module.
requirements: [ psycopg2 ]
author: "Flavien Chantelot <[email protected]>"
'''
EXAMPLES = '''
# Create a new schema with name "acme"
- postgresql_schema:
name: acme
# Create a new schema "acme" with a user "bob" who will own it
- postgresql_schema:
name: acme
owner: bob
'''
RETURN = '''
schema:
description: Name of the schema
returned: success, changed
type: string
sample: "acme"
'''
import traceback
try:
import psycopg2
import psycopg2.extras
except ImportError:
postgresqldb_found = False
else:
postgresqldb_found = True
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.database import SQLParseError, pg_quote_identifier
from ansible.module_utils._text import to_native
class NotSupportedError(Exception):
pass
# ===========================================
# PostgreSQL module specific support methods.
#
def set_owner(cursor, schema, owner):
query = "ALTER SCHEMA %s OWNER TO %s" % (
pg_quote_identifier(schema, 'schema'),
pg_quote_identifier(owner, 'role'))
cursor.execute(query)
return True
def get_schema_info(cursor, schema):
query = """
SELECT schema_owner AS owner
FROM information_schema.schemata
WHERE schema_name = %(schema)s
"""
cursor.execute(query, {'schema': schema})
return cursor.fetchone()
def schema_exists(cursor, schema):
query = "SELECT schema_name FROM information_schema.schemata WHERE schema_name = %(schema)s"
cursor.execute(query, {'schema': schema})
return cursor.rowcount == 1
def schema_delete(cursor, schema):
if schema_exists(cursor, schema):
query = "DROP SCHEMA %s" % pg_quote_identifier(schema, 'schema')
cursor.execute(query)
return True
else:
return False
def schema_create(cursor, schema, owner):
if not schema_exists(cursor, schema):
query_fragments = ['CREATE SCHEMA %s' % pg_quote_identifier(schema, 'schema')]
if owner:
query_fragments.append('AUTHORIZATION %s' % pg_quote_identifier(owner, 'role'))
query = ' '.join(query_fragments)
cursor.execute(query)
return True
else:
schema_info = get_schema_info(cursor, schema)
if owner and owner != schema_info['owner']:
return set_owner(cursor, schema, owner)
else:
return False
def schema_matches(cursor, schema, owner):
if not schema_exists(cursor, schema):
return False
else:
schema_info = get_schema_info(cursor, schema)
if owner and owner != schema_info['owner']:
return False
else:
return True
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
login_user=dict(default="postgres"),
login_password=dict(default="", no_log=True),
login_host=dict(default=""),
login_unix_socket=dict(default=""),
port=dict(default="5432"),
schema=dict(required=True, aliases=['name']),
owner=dict(default=""),
database=dict(default="postgres"),
state=dict(default="present", choices=["absent", "present"]),
),
supports_check_mode = True
)
if not postgresqldb_found:
module.fail_json(msg="the python psycopg2 module is required")
schema = module.params["schema"]
owner = module.params["owner"]
state = module.params["state"]
database = module.params["database"]
changed = False
# To use defaults values, keyword arguments must be absent, so
# check which values are empty and don't include in the **kw
# dictionary
params_map = {
"login_host":"host",
"login_user":"user",
"login_password":"password",
"port":"port"
}
kw = dict( (params_map[k], v) for (k, v) in module.params.items()
if k in params_map and v != '' )
# If a login_unix_socket is specified, incorporate it here.
is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost"
if is_localhost and module.params["login_unix_socket"] != "":
kw["host"] = module.params["login_unix_socket"]
try:
db_connection = psycopg2.connect(database=database, **kw)
# Enable autocommit so we can create databases
if psycopg2.__version__ >= '2.4.2':
db_connection.autocommit = True
else:
db_connection.set_isolation_level(psycopg2
.extensions
.ISOLATION_LEVEL_AUTOCOMMIT)
cursor = db_connection.cursor(
cursor_factory=psycopg2.extras.DictCursor)
except Exception as e:
module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
try:
if module.check_mode:
if state == "absent":
changed = not schema_exists(cursor, schema)
elif state == "present":
changed = not schema_matches(cursor, schema, owner)
module.exit_json(changed=changed, schema=schema)
if state == "absent":
try:
changed = schema_delete(cursor, schema)
except SQLParseError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
elif state == "present":
try:
changed = schema_create(cursor, schema, owner)
except SQLParseError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
except NotSupportedError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
except SystemExit:
# Avoid catching this on Python 2.4
raise
except Exception as e:
module.fail_json(msg="Database query failed: %s" % to_native(e), exception=traceback.format_exc())
module.exit_json(changed=changed, schema=schema)
if __name__ == '__main__':
main()
|
gpl-3.0
| -6,969,093,719,985,360,000 | 2,013,524,274,737,959,700 | 29.951128 | 151 | 0.619215 | false |
yatinkumbhare/openstack-nova
|
nova/api/openstack/compute/plugins/v3/block_device_mapping_v1.py
|
37
|
2595
|
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The legacy block device mappings extension."""
from oslo_utils import strutils
from webob import exc
from nova.api.openstack.compute.schemas.v3 import block_device_mapping_v1 as \
schema_block_device_mapping
from nova.api.openstack import extensions
from nova.i18n import _
ALIAS = "os-block-device-mapping-v1"
ATTRIBUTE_NAME = "block_device_mapping"
ATTRIBUTE_NAME_V2 = "block_device_mapping_v2"
class BlockDeviceMappingV1(extensions.V3APIExtensionBase):
"""Block device mapping boot support."""
name = "BlockDeviceMappingV1"
alias = ALIAS
version = 1
def get_resources(self):
return []
def get_controller_extensions(self):
return []
# use nova.api.extensions.server.extensions entry point to modify
# server create kwargs
# NOTE(gmann): This function is not supposed to use 'body_deprecated_param'
# parameter as this is placed to handle scheduler_hint extension for V2.1.
def server_create(self, server_dict, create_kwargs, body_deprecated_param):
block_device_mapping = server_dict.get(ATTRIBUTE_NAME, [])
block_device_mapping_v2 = server_dict.get(ATTRIBUTE_NAME_V2, [])
if block_device_mapping and block_device_mapping_v2:
expl = _('Using different block_device_mapping syntaxes '
'is not allowed in the same request.')
raise exc.HTTPBadRequest(explanation=expl)
for bdm in block_device_mapping:
if 'delete_on_termination' in bdm:
bdm['delete_on_termination'] = strutils.bool_from_string(
bdm['delete_on_termination'])
if block_device_mapping:
create_kwargs['block_device_mapping'] = block_device_mapping
# Sets the legacy_bdm flag if we got a legacy block device mapping.
create_kwargs['legacy_bdm'] = True
def get_server_create_schema(self):
return schema_block_device_mapping.server_create
|
apache-2.0
| -7,855,282,545,022,798,000 | -4,437,632,113,491,365,000 | 37.161765 | 79 | 0.690944 | false |
mysociety/yournextmp-popit
|
candidates/models/address.py
|
2
|
3191
|
from __future__ import unicode_literals
from collections import defaultdict
from django.conf import settings
from django.core.exceptions import ValidationError
from django.utils.six.moves.urllib_parse import urljoin
from django.utils.text import slugify
from django.utils.translation import ugettext as _
from pygeocoder import Geocoder, GeocoderError
import requests
from elections.models import Election
from candidates.election_specific import get_local_area_id
# We use this both for validation of address and the results of the
# lookup, so the MapIt and geocoder lookups should be cached so we
# don't make double requests:
def check_address(address_string, country=None):
tidied_address_before_country = address_string.strip()
if country is None:
tidied_address = tidied_address_before_country
else:
tidied_address = tidied_address_before_country + ', ' + country
try:
location_results = Geocoder.geocode(tidied_address)
except GeocoderError:
message = _("Failed to find a location for '{0}'")
raise ValidationError(message.format(tidied_address_before_country))
lat, lon = location_results[0].coordinates
all_mapit_json = []
queries_to_try = defaultdict(set)
for election in Election.objects.current().prefetch_related('area_types'):
area_types = election.area_types.values_list('name', flat=True)
queries_to_try[election.area_generation].update(area_types)
for area_generation, area_types in queries_to_try.items():
mapit_lookup_url = urljoin(settings.MAPIT_BASE_URL,
'point/4326/{lon},{lat}'.format(
lon=lon,
lat=lat,
))
mapit_lookup_url += '?type=' + ','.join(area_types)
mapit_lookup_url += '&generation={0}'.format(election.area_generation)
mapit_result = requests.get(mapit_lookup_url)
mapit_json = mapit_result.json()
if 'error' in mapit_json:
message = _("The area lookup returned an error: '{error}'")
raise ValidationError(message.format(error=mapit_json['error']))
all_mapit_json += mapit_json.items()
sorted_mapit_results = sorted(
all_mapit_json,
key=lambda t: (t[1]['type'], int(t[0]))
)
if not sorted_mapit_results:
message = _("The address '{0}' appears to be outside the area this site knows about")
raise ValidationError(message.format(tidied_address_before_country))
types_and_areas = [
{
'area_type_code': a[1]['type'],
'area_id': get_local_area_id(a),
}
for a in sorted_mapit_results
]
if settings.AREAS_TO_ALWAYS_RETURN:
types_and_areas += settings.AREAS_TO_ALWAYS_RETURN
types_and_areas_joined = ','.join(
'{area_type_code}-{area_id}'.format(**ta) for ta in types_and_areas
)
area_slugs = [slugify(a[1]['name']) for a in sorted_mapit_results]
ignored_slug = '-'.join(area_slugs)
return {
'type_and_area_ids': types_and_areas_joined,
'ignored_slug': ignored_slug,
}
|
agpl-3.0
| 4,991,474,611,238,552,000 | 6,762,463,786,603,580,000 | 40.441558 | 93 | 0.638045 | false |
CSysTeam/SecurityPackage
|
RC4/RC4Test.py
|
1
|
1105
|
import unittest
from RC4 import RC4
class RC4Test(unittest.TestCase):
def test_RC4TestEnc1(self):
algorithm = RC4()
cipher = algorithm.Encrypt("abcd", "test")
self.assertEqual(cipher, "ÏíDu")
def test_RC4TestDec1(self):
algorithm = RC4()
cipher = algorithm.Decrypt("ÏíDu", "test")
self.assertEqual(cipher, "abcd")
def test_RC4TestEnc2(self):
algorithm = RC4()
cipher = algorithm.Encrypt("0x61626364", "0x74657374")
self.assertTrue(cipher, "0xcfed4475")
def test_RC4TestDec2(self):
algorithm = RC4()
cipher = algorithm.Encrypt("0xcfed4475", "0x74657374")
self.assertTrue(cipher, "0x61626364")
def test_RC4TestEnc(self):
algorithm = RC4()
cipher = algorithm.Encrypt("aaaa", "test")
self.assertEqual(cipher, "ÏîFp")
def test_RC4TestDec(self):
algorithm = RC4()
cipher = algorithm.Decrypt("ÏîFp", "test")
self.assertEqual(cipher, "aaaa")
|
gpl-3.0
| -2,064,729,513,891,306,500 | -8,530,605,706,936,687,000 | 20.096154 | 62 | 0.568824 | false |
sivu22/nltk-on-gae
|
GAE/nltk/corpus/util.py
|
5
|
2934
|
# Natural Language Toolkit: Corpus Reader Utility Functions
#
# Copyright (C) 2001-2012 NLTK Project
# Author: Edward Loper <[email protected]>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
######################################################################
#{ Lazy Corpus Loader
######################################################################
import re
import nltk
TRY_ZIPFILE_FIRST = False
class LazyCorpusLoader(object):
"""
A proxy object which is used to stand in for a corpus object
before the corpus is loaded. This allows NLTK to create an object
for each corpus, but defer the costs associated with loading those
corpora until the first time that they're actually accessed.
The first time this object is accessed in any way, it will load
the corresponding corpus, and transform itself into that corpus
(by modifying its own ``__class__`` and ``__dict__`` attributes).
If the corpus can not be found, then accessing this object will
raise an exception, displaying installation instructions for the
NLTK data package. Once they've properly installed the data
package (or modified ``nltk.data.path`` to point to its location),
they can then use the corpus object without restarting python.
"""
def __init__(self, name, reader_cls, *args, **kwargs):
from nltk.corpus.reader.api import CorpusReader
assert issubclass(reader_cls, CorpusReader)
self.__name = self.__name__ = name
self.__reader_cls = reader_cls
self.__args = args
self.__kwargs = kwargs
def __load(self):
# Find the corpus root directory.
zip_name = re.sub(r'(([^/]*)(/.*)?)', r'\2.zip/\1/', self.__name)
if TRY_ZIPFILE_FIRST:
try:
root = nltk.data.find('corpora/%s' % zip_name)
except LookupError:
raise
root = nltk.data.find('corpora/%s' % self.__name)
else:
try:
root = nltk.data.find('corpora/%s' % self.__name)
except LookupError as e:
try: root = nltk.data.find('corpora/%s' % zip_name)
except LookupError: raise e
# Load the corpus.
corpus = self.__reader_cls(root, *self.__args, **self.__kwargs)
# This is where the magic happens! Transform ourselves into
# the corpus by modifying our own __dict__ and __class__ to
# match that of the corpus.
self.__dict__ = corpus.__dict__
self.__class__ = corpus.__class__
def __getattr__(self, attr):
self.__load()
# This looks circular, but its not, since __load() changes our
# __class__ to something new:
return getattr(self, attr)
def __repr__(self):
return '<%s in %r (not loaded yet)>' % (
self.__reader_cls.__name__, '.../corpora/'+self.__name)
|
apache-2.0
| -1,648,188,837,894,738,200 | -7,896,327,495,109,561,000 | 38.12 | 73 | 0.580436 | false |
benjaminjkraft/django
|
tests/template_tests/test_engine.py
|
116
|
1925
|
import os
from django.template import Context
from django.template.engine import Engine
from django.test import SimpleTestCase
from .utils import ROOT, TEMPLATE_DIR
OTHER_DIR = os.path.join(ROOT, 'other_templates')
class RenderToStringTest(SimpleTestCase):
def setUp(self):
self.engine = Engine(dirs=[TEMPLATE_DIR])
def test_basic_context(self):
self.assertEqual(
self.engine.render_to_string('test_context.html', {'obj': 'test'}),
'obj:test\n',
)
class LoaderTests(SimpleTestCase):
def test_origin(self):
engine = Engine(dirs=[TEMPLATE_DIR], debug=True)
template = engine.get_template('index.html')
self.assertEqual(template.origin.template_name, 'index.html')
def test_loader_priority(self):
"""
#21460 -- Check that the order of template loader works.
"""
loaders = [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
engine = Engine(dirs=[OTHER_DIR, TEMPLATE_DIR], loaders=loaders)
template = engine.get_template('priority/foo.html')
self.assertEqual(template.render(Context()), 'priority\n')
def test_cached_loader_priority(self):
"""
Check that the order of template loader works. Refs #21460.
"""
loaders = [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]),
]
engine = Engine(dirs=[OTHER_DIR, TEMPLATE_DIR], loaders=loaders)
template = engine.get_template('priority/foo.html')
self.assertEqual(template.render(Context()), 'priority\n')
template = engine.get_template('priority/foo.html')
self.assertEqual(template.render(Context()), 'priority\n')
|
bsd-3-clause
| 1,761,577,113,090,223,400 | 6,231,529,476,382,697,000 | 31.627119 | 79 | 0.631688 | false |
ekalosak/boto
|
boto/swf/__init__.py
|
145
|
1792
|
# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.ec2.regioninfo import RegionInfo
from boto.regioninfo import get_regions, load_regions
import boto.swf.layer1
REGION_ENDPOINTS = load_regions().get('swf', {})
def regions(**kw_params):
"""
Get all available regions for the Amazon Simple Workflow service.
:rtype: list
:return: A list of :class:`boto.regioninfo.RegionInfo`
"""
return get_regions('swf', connection_cls=boto.swf.layer1.Layer1)
def connect_to_region(region_name, **kw_params):
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None
|
mit
| 4,876,830,077,654,313,000 | -4,708,689,316,624,654,000 | 37.956522 | 74 | 0.739955 | false |
MQQiang/kbengine
|
kbe/src/lib/python/Lib/ctypes/wintypes.py
|
197
|
5628
|
# The most useful windows datatypes
import ctypes
BYTE = ctypes.c_byte
WORD = ctypes.c_ushort
DWORD = ctypes.c_ulong
#UCHAR = ctypes.c_uchar
CHAR = ctypes.c_char
WCHAR = ctypes.c_wchar
UINT = ctypes.c_uint
INT = ctypes.c_int
DOUBLE = ctypes.c_double
FLOAT = ctypes.c_float
BOOLEAN = BYTE
BOOL = ctypes.c_long
class VARIANT_BOOL(ctypes._SimpleCData):
_type_ = "v"
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self.value)
ULONG = ctypes.c_ulong
LONG = ctypes.c_long
USHORT = ctypes.c_ushort
SHORT = ctypes.c_short
# in the windows header files, these are structures.
_LARGE_INTEGER = LARGE_INTEGER = ctypes.c_longlong
_ULARGE_INTEGER = ULARGE_INTEGER = ctypes.c_ulonglong
LPCOLESTR = LPOLESTR = OLESTR = ctypes.c_wchar_p
LPCWSTR = LPWSTR = ctypes.c_wchar_p
LPCSTR = LPSTR = ctypes.c_char_p
LPCVOID = LPVOID = ctypes.c_void_p
# WPARAM is defined as UINT_PTR (unsigned type)
# LPARAM is defined as LONG_PTR (signed type)
if ctypes.sizeof(ctypes.c_long) == ctypes.sizeof(ctypes.c_void_p):
WPARAM = ctypes.c_ulong
LPARAM = ctypes.c_long
elif ctypes.sizeof(ctypes.c_longlong) == ctypes.sizeof(ctypes.c_void_p):
WPARAM = ctypes.c_ulonglong
LPARAM = ctypes.c_longlong
ATOM = WORD
LANGID = WORD
COLORREF = DWORD
LGRPID = DWORD
LCTYPE = DWORD
LCID = DWORD
################################################################
# HANDLE types
HANDLE = ctypes.c_void_p # in the header files: void *
HACCEL = HANDLE
HBITMAP = HANDLE
HBRUSH = HANDLE
HCOLORSPACE = HANDLE
HDC = HANDLE
HDESK = HANDLE
HDWP = HANDLE
HENHMETAFILE = HANDLE
HFONT = HANDLE
HGDIOBJ = HANDLE
HGLOBAL = HANDLE
HHOOK = HANDLE
HICON = HANDLE
HINSTANCE = HANDLE
HKEY = HANDLE
HKL = HANDLE
HLOCAL = HANDLE
HMENU = HANDLE
HMETAFILE = HANDLE
HMODULE = HANDLE
HMONITOR = HANDLE
HPALETTE = HANDLE
HPEN = HANDLE
HRGN = HANDLE
HRSRC = HANDLE
HSTR = HANDLE
HTASK = HANDLE
HWINSTA = HANDLE
HWND = HANDLE
SC_HANDLE = HANDLE
SERVICE_STATUS_HANDLE = HANDLE
################################################################
# Some important structure definitions
class RECT(ctypes.Structure):
_fields_ = [("left", LONG),
("top", LONG),
("right", LONG),
("bottom", LONG)]
tagRECT = _RECTL = RECTL = RECT
class _SMALL_RECT(ctypes.Structure):
_fields_ = [('Left', SHORT),
('Top', SHORT),
('Right', SHORT),
('Bottom', SHORT)]
SMALL_RECT = _SMALL_RECT
class _COORD(ctypes.Structure):
_fields_ = [('X', SHORT),
('Y', SHORT)]
class POINT(ctypes.Structure):
_fields_ = [("x", LONG),
("y", LONG)]
tagPOINT = _POINTL = POINTL = POINT
class SIZE(ctypes.Structure):
_fields_ = [("cx", LONG),
("cy", LONG)]
tagSIZE = SIZEL = SIZE
def RGB(red, green, blue):
return red + (green << 8) + (blue << 16)
class FILETIME(ctypes.Structure):
_fields_ = [("dwLowDateTime", DWORD),
("dwHighDateTime", DWORD)]
_FILETIME = FILETIME
class MSG(ctypes.Structure):
_fields_ = [("hWnd", HWND),
("message", UINT),
("wParam", WPARAM),
("lParam", LPARAM),
("time", DWORD),
("pt", POINT)]
tagMSG = MSG
MAX_PATH = 260
class WIN32_FIND_DATAA(ctypes.Structure):
_fields_ = [("dwFileAttributes", DWORD),
("ftCreationTime", FILETIME),
("ftLastAccessTime", FILETIME),
("ftLastWriteTime", FILETIME),
("nFileSizeHigh", DWORD),
("nFileSizeLow", DWORD),
("dwReserved0", DWORD),
("dwReserved1", DWORD),
("cFileName", CHAR * MAX_PATH),
("cAlternateFileName", CHAR * 14)]
class WIN32_FIND_DATAW(ctypes.Structure):
_fields_ = [("dwFileAttributes", DWORD),
("ftCreationTime", FILETIME),
("ftLastAccessTime", FILETIME),
("ftLastWriteTime", FILETIME),
("nFileSizeHigh", DWORD),
("nFileSizeLow", DWORD),
("dwReserved0", DWORD),
("dwReserved1", DWORD),
("cFileName", WCHAR * MAX_PATH),
("cAlternateFileName", WCHAR * 14)]
################################################################
# Pointer types
LPBOOL = PBOOL = ctypes.POINTER(BOOL)
PBOOLEAN = ctypes.POINTER(BOOLEAN)
LPBYTE = PBYTE = ctypes.POINTER(BYTE)
PCHAR = ctypes.POINTER(CHAR)
LPCOLORREF = ctypes.POINTER(COLORREF)
LPDWORD = PDWORD = ctypes.POINTER(DWORD)
LPFILETIME = PFILETIME = ctypes.POINTER(FILETIME)
PFLOAT = ctypes.POINTER(FLOAT)
LPHANDLE = PHANDLE = ctypes.POINTER(HANDLE)
PHKEY = ctypes.POINTER(HKEY)
LPHKL = ctypes.POINTER(HKL)
LPINT = PINT = ctypes.POINTER(INT)
PLARGE_INTEGER = ctypes.POINTER(LARGE_INTEGER)
PLCID = ctypes.POINTER(LCID)
LPLONG = PLONG = ctypes.POINTER(LONG)
LPMSG = PMSG = ctypes.POINTER(MSG)
LPPOINT = PPOINT = ctypes.POINTER(POINT)
PPOINTL = ctypes.POINTER(POINTL)
LPRECT = PRECT = ctypes.POINTER(RECT)
LPRECTL = PRECTL = ctypes.POINTER(RECTL)
LPSC_HANDLE = ctypes.POINTER(SC_HANDLE)
PSHORT = ctypes.POINTER(SHORT)
LPSIZE = PSIZE = ctypes.POINTER(SIZE)
LPSIZEL = PSIZEL = ctypes.POINTER(SIZEL)
PSMALL_RECT = ctypes.POINTER(SMALL_RECT)
LPUINT = PUINT = ctypes.POINTER(UINT)
PULARGE_INTEGER = ctypes.POINTER(ULARGE_INTEGER)
PULONG = ctypes.POINTER(ULONG)
PUSHORT = ctypes.POINTER(USHORT)
PWCHAR = ctypes.POINTER(WCHAR)
LPWIN32_FIND_DATAA = PWIN32_FIND_DATAA = ctypes.POINTER(WIN32_FIND_DATAA)
LPWIN32_FIND_DATAW = PWIN32_FIND_DATAW = ctypes.POINTER(WIN32_FIND_DATAW)
LPWORD = PWORD = ctypes.POINTER(WORD)
|
lgpl-3.0
| 5,092,844,102,163,481,000 | -3,602,177,987,643,780,600 | 26.861386 | 73 | 0.614606 | false |
lokeshh/lokeshh-stem
|
test/unit/response/authchallenge.py
|
9
|
2148
|
"""
Unit tests for the stem.response.authchallenge.AuthChallengeResponse class.
"""
import unittest
import stem.response
import stem.response.authchallenge
import stem.socket
from test import mocking
VALID_RESPONSE = '250 AUTHCHALLENGE \
SERVERHASH=B16F72DACD4B5ED1531F3FCC04B593D46A1E30267E636EA7C7F8DD7A2B7BAA05 \
SERVERNONCE=653574272ABBB49395BD1060D642D653CFB7A2FCE6A4955BCFED819703A9998C'
VALID_HASH = b'\xb1or\xda\xcdK^\xd1S\x1f?\xcc\x04\xb5\x93\xd4j\x1e0&~cn\xa7\xc7\xf8\xddz+{\xaa\x05'
VALID_NONCE = b"e5t'*\xbb\xb4\x93\x95\xbd\x10`\xd6B\xd6S\xcf\xb7\xa2\xfc\xe6\xa4\x95[\xcf\xed\x81\x97\x03\xa9\x99\x8c"
INVALID_RESPONSE = '250 AUTHCHALLENGE \
SERVERHASH=FOOBARB16F72DACD4B5ED1531F3FCC04B593D46A1E30267E636EA7C7F8DD7A2B7BAA05 \
SERVERNONCE=FOOBAR653574272ABBB49395BD1060D642D653CFB7A2FCE6A4955BCFED819703A9998C'
class TestAuthChallengeResponse(unittest.TestCase):
def test_valid_response(self):
"""
Parses valid AUTHCHALLENGE responses.
"""
control_message = mocking.get_message(VALID_RESPONSE)
stem.response.convert('AUTHCHALLENGE', control_message)
# now this should be a AuthChallengeResponse (ControlMessage subclass)
self.assertTrue(isinstance(control_message, stem.response.ControlMessage))
self.assertTrue(isinstance(control_message, stem.response.authchallenge.AuthChallengeResponse))
self.assertEqual(VALID_HASH, control_message.server_hash)
self.assertEqual(VALID_NONCE, control_message.server_nonce)
def test_invalid_responses(self):
"""
Tries to parse various malformed responses and checks it they raise
appropriate exceptions.
"""
auth_challenge_comp = VALID_RESPONSE.split()
for index in range(1, len(auth_challenge_comp)):
# Attempts to parse a message without this item. The first item is
# skipped because, without the 250 code, the message won't be
# constructed.
remaining_comp = auth_challenge_comp[:index] + auth_challenge_comp[index + 1:]
control_message = mocking.get_message(' '.join(remaining_comp))
self.assertRaises(stem.ProtocolError, stem.response.convert, 'AUTHCHALLENGE', control_message)
|
lgpl-3.0
| 3,883,768,488,924,272,000 | 7,117,659,655,463,967,000 | 38.054545 | 118 | 0.772812 | false |
wenyu1001/scrapy
|
tests/test_downloadermiddleware_decompression.py
|
133
|
1851
|
from unittest import TestCase, main
from scrapy.http import Response, XmlResponse
from scrapy.downloadermiddlewares.decompression import DecompressionMiddleware
from scrapy.spiders import Spider
from tests import get_testdata
from scrapy.utils.test import assert_samelines
def _test_data(formats):
uncompressed_body = get_testdata('compressed', 'feed-sample1.xml')
test_responses = {}
for format in formats:
body = get_testdata('compressed', 'feed-sample1.' + format)
test_responses[format] = Response('http://foo.com/bar', body=body)
return uncompressed_body, test_responses
class DecompressionMiddlewareTest(TestCase):
test_formats = ['tar', 'xml.bz2', 'xml.gz', 'zip']
uncompressed_body, test_responses = _test_data(test_formats)
def setUp(self):
self.mw = DecompressionMiddleware()
self.spider = Spider('foo')
def test_known_compression_formats(self):
for fmt in self.test_formats:
rsp = self.test_responses[fmt]
new = self.mw.process_response(None, rsp, self.spider)
assert isinstance(new, XmlResponse), \
'Failed %s, response type %s' % (fmt, type(new).__name__)
assert_samelines(self, new.body, self.uncompressed_body, fmt)
def test_plain_response(self):
rsp = Response(url='http://test.com', body=self.uncompressed_body)
new = self.mw.process_response(None, rsp, self.spider)
assert new is rsp
assert_samelines(self, new.body, rsp.body)
def test_empty_response(self):
rsp = Response(url='http://test.com', body=b'')
new = self.mw.process_response(None, rsp, self.spider)
assert new is rsp
assert not rsp.body
assert not new.body
def tearDown(self):
del self.mw
if __name__ == '__main__':
main()
|
bsd-3-clause
| -949,783,831,092,888,000 | 4,951,109,287,860,853,000 | 33.924528 | 78 | 0.654781 | false |
embeddedarm/android_external_chromium_org
|
tools/telemetry/telemetry/page/page_filter_unittest.py
|
25
|
4195
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.page import page as page_module
from telemetry.page import page_filter as page_filter_module
from telemetry.page import page_set
class MockUrlFilterOptions(object):
def __init__(self, page_filter, page_filter_exclude):
self.page_filter = page_filter
self.page_filter_exclude = page_filter_exclude
self.page_label_filter = None
self.page_label_filter_exclude = None
class MockLabelFilterOptions(object):
def __init__(self, page_label_filter, page_label_filter_exclude):
self.page_filter = None
self.page_filter_exclude = None
self.page_label_filter = page_label_filter
self.page_label_filter_exclude = page_label_filter_exclude
class PageFilterTest(unittest.TestCase):
def setUp(self):
ps = page_set.PageSet()
self.p1 = page_module.Page(
'file://conformance/textures/tex-sub-image-2d.html',
ps,
{ 'name': 'WebglConformance.conformance_textures_tex_sub_image_2d' })
self.p2 = page_module.Page(
'file://othersuite/textures/tex-sub-image-3d.html',
ps,
{ 'name': 'OtherSuite.textures_tex_sub_image_3d' })
self.p3 = page_module.Page(
'file://othersuite/textures/tex-sub-image-3d.html',
ps,
{ 'name': None })
def testURLPattern(self):
options = MockUrlFilterOptions('conformance/textures', '')
page_filter = page_filter_module.PageFilter(options)
self.assertTrue(page_filter.IsSelected(self.p1))
self.assertFalse(page_filter.IsSelected(self.p2))
options = MockUrlFilterOptions('textures', '')
page_filter = page_filter_module.PageFilter(options)
self.assertTrue(page_filter.IsSelected(self.p1))
self.assertTrue(page_filter.IsSelected(self.p2))
options = MockUrlFilterOptions('somethingelse', '')
page_filter = page_filter_module.PageFilter(options)
self.assertFalse(page_filter.IsSelected(self.p1))
self.assertFalse(page_filter.IsSelected(self.p2))
def testName(self):
options = MockUrlFilterOptions('somethingelse', '')
page_filter = page_filter_module.PageFilter(options)
self.assertFalse(page_filter.IsSelected(self.p1))
self.assertFalse(page_filter.IsSelected(self.p2))
options = MockUrlFilterOptions('textures_tex_sub_image', '')
page_filter = page_filter_module.PageFilter(options)
self.assertTrue(page_filter.IsSelected(self.p1))
self.assertTrue(page_filter.IsSelected(self.p2))
options = MockUrlFilterOptions('WebglConformance', '')
page_filter = page_filter_module.PageFilter(options)
self.assertTrue(page_filter.IsSelected(self.p1))
self.assertFalse(page_filter.IsSelected(self.p2))
options = MockUrlFilterOptions('OtherSuite', '')
page_filter = page_filter_module.PageFilter(options)
self.assertFalse(page_filter.IsSelected(self.p1))
self.assertTrue(page_filter.IsSelected(self.p2))
def testNameNone(self):
options = MockUrlFilterOptions('othersuite/textures', '')
page_filter = page_filter_module.PageFilter(options)
self.assertTrue(page_filter.IsSelected(self.p3))
options = MockUrlFilterOptions('conformance/textures', '')
page_filter = page_filter_module.PageFilter(options)
self.assertFalse(page_filter.IsSelected(self.p3))
def testLabelFilters(self):
self.p1.label1 = True
self.p2.label1 = True
self.p3.label1 = False
self.p1.label2 = True
self.p2.label2 = False
self.p3.label2 = True
# Include both labels
options = MockLabelFilterOptions('label1,label2', '')
page_filter = page_filter_module.PageFilter(options)
self.assertTrue(page_filter.IsSelected(self.p1))
self.assertTrue(page_filter.IsSelected(self.p2))
self.assertTrue(page_filter.IsSelected(self.p3))
# Exclude takes priority
options = MockLabelFilterOptions('label1', 'label2')
page_filter = page_filter_module.PageFilter(options)
self.assertFalse(page_filter.IsSelected(self.p1))
self.assertTrue(page_filter.IsSelected(self.p2))
self.assertFalse(page_filter.IsSelected(self.p3))
|
bsd-3-clause
| 3,766,626,798,513,230,000 | 3,036,538,205,548,505,000 | 41.373737 | 77 | 0.721573 | false |
bartekjagiello/inteygrate_flaskapp
|
yowsup/layers/protocol_presence/protocolentities/iq_lastseen_result.py
|
61
|
1028
|
from yowsup.layers.protocol_iq.protocolentities.iq_result import ResultIqProtocolEntity
from yowsup.structs.protocoltreenode import ProtocolTreeNode
class ResultLastseenIqProtocolEntity(ResultIqProtocolEntity):
def __init__(self, jid, seconds, _id = None):
super(ResultLastseenIqProtocolEntity, self).__init__(_from=jid, _id=_id)
self.setSeconds(seconds)
def setSeconds(self, seconds):
self.seconds = int(seconds)
def getSeconds(self):
return self.seconds
def __str__(self):
out = super(ResultIqProtocolEntity, self).__str__()
out += "Seconds: %s\n" % self.seconds
return out
def toProtocolTreeNode(self):
node = super(ResultLastseenIqProtocolEntity, self).toProtocolTreeNode()
node.addChild(ProtocolTreeNode("query", {"seconds": str(self.seconds)}))
return node
@staticmethod
def fromProtocolTreeNode(node):
return ResultLastseenIqProtocolEntity(node["from"], node.getChild("query")["seconds"], node["id"])
|
gpl-3.0
| -7,934,080,718,167,162,000 | 4,536,939,144,542,097,400 | 38.576923 | 106 | 0.69358 | false |
mcus/SickRage
|
lib/sqlalchemy/ext/mutable.py
|
76
|
22912
|
# ext/mutable.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Provide support for tracking of in-place changes to scalar values,
which are propagated into ORM change events on owning parent objects.
.. versionadded:: 0.7 :mod:`sqlalchemy.ext.mutable` replaces SQLAlchemy's
legacy approach to in-place mutations of scalar values; see
:ref:`07_migration_mutation_extension`.
.. _mutable_scalars:
Establishing Mutability on Scalar Column Values
===============================================
A typical example of a "mutable" structure is a Python dictionary.
Following the example introduced in :ref:`types_toplevel`, we
begin with a custom type that marshals Python dictionaries into
JSON strings before being persisted::
from sqlalchemy.types import TypeDecorator, VARCHAR
import json
class JSONEncodedDict(TypeDecorator):
"Represents an immutable structure as a json-encoded string."
impl = VARCHAR
def process_bind_param(self, value, dialect):
if value is not None:
value = json.dumps(value)
return value
def process_result_value(self, value, dialect):
if value is not None:
value = json.loads(value)
return value
The usage of ``json`` is only for the purposes of example. The
:mod:`sqlalchemy.ext.mutable` extension can be used
with any type whose target Python type may be mutable, including
:class:`.PickleType`, :class:`.postgresql.ARRAY`, etc.
When using the :mod:`sqlalchemy.ext.mutable` extension, the value itself
tracks all parents which reference it. Below, we illustrate the a simple
version of the :class:`.MutableDict` dictionary object, which applies
the :class:`.Mutable` mixin to a plain Python dictionary::
from sqlalchemy.ext.mutable import Mutable
class MutableDict(Mutable, dict):
@classmethod
def coerce(cls, key, value):
"Convert plain dictionaries to MutableDict."
if not isinstance(value, MutableDict):
if isinstance(value, dict):
return MutableDict(value)
# this call will raise ValueError
return Mutable.coerce(key, value)
else:
return value
def __setitem__(self, key, value):
"Detect dictionary set events and emit change events."
dict.__setitem__(self, key, value)
self.changed()
def __delitem__(self, key):
"Detect dictionary del events and emit change events."
dict.__delitem__(self, key)
self.changed()
The above dictionary class takes the approach of subclassing the Python
built-in ``dict`` to produce a dict
subclass which routes all mutation events through ``__setitem__``. There are
variants on this approach, such as subclassing ``UserDict.UserDict`` or
``collections.MutableMapping``; the part that's important to this example is
that the :meth:`.Mutable.changed` method is called whenever an in-place
change to the datastructure takes place.
We also redefine the :meth:`.Mutable.coerce` method which will be used to
convert any values that are not instances of ``MutableDict``, such
as the plain dictionaries returned by the ``json`` module, into the
appropriate type. Defining this method is optional; we could just as well
created our ``JSONEncodedDict`` such that it always returns an instance
of ``MutableDict``, and additionally ensured that all calling code
uses ``MutableDict`` explicitly. When :meth:`.Mutable.coerce` is not
overridden, any values applied to a parent object which are not instances
of the mutable type will raise a ``ValueError``.
Our new ``MutableDict`` type offers a class method
:meth:`~.Mutable.as_mutable` which we can use within column metadata
to associate with types. This method grabs the given type object or
class and associates a listener that will detect all future mappings
of this type, applying event listening instrumentation to the mapped
attribute. Such as, with classical table metadata::
from sqlalchemy import Table, Column, Integer
my_data = Table('my_data', metadata,
Column('id', Integer, primary_key=True),
Column('data', MutableDict.as_mutable(JSONEncodedDict))
)
Above, :meth:`~.Mutable.as_mutable` returns an instance of ``JSONEncodedDict``
(if the type object was not an instance already), which will intercept any
attributes which are mapped against this type. Below we establish a simple
mapping against the ``my_data`` table::
from sqlalchemy import mapper
class MyDataClass(object):
pass
# associates mutation listeners with MyDataClass.data
mapper(MyDataClass, my_data)
The ``MyDataClass.data`` member will now be notified of in place changes
to its value.
There's no difference in usage when using declarative::
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class MyDataClass(Base):
__tablename__ = 'my_data'
id = Column(Integer, primary_key=True)
data = Column(MutableDict.as_mutable(JSONEncodedDict))
Any in-place changes to the ``MyDataClass.data`` member
will flag the attribute as "dirty" on the parent object::
>>> from sqlalchemy.orm import Session
>>> sess = Session()
>>> m1 = MyDataClass(data={'value1':'foo'})
>>> sess.add(m1)
>>> sess.commit()
>>> m1.data['value1'] = 'bar'
>>> assert m1 in sess.dirty
True
The ``MutableDict`` can be associated with all future instances
of ``JSONEncodedDict`` in one step, using
:meth:`~.Mutable.associate_with`. This is similar to
:meth:`~.Mutable.as_mutable` except it will intercept all occurrences
of ``MutableDict`` in all mappings unconditionally, without
the need to declare it individually::
MutableDict.associate_with(JSONEncodedDict)
class MyDataClass(Base):
__tablename__ = 'my_data'
id = Column(Integer, primary_key=True)
data = Column(JSONEncodedDict)
Supporting Pickling
--------------------
The key to the :mod:`sqlalchemy.ext.mutable` extension relies upon the
placement of a ``weakref.WeakKeyDictionary`` upon the value object, which
stores a mapping of parent mapped objects keyed to the attribute name under
which they are associated with this value. ``WeakKeyDictionary`` objects are
not picklable, due to the fact that they contain weakrefs and function
callbacks. In our case, this is a good thing, since if this dictionary were
picklable, it could lead to an excessively large pickle size for our value
objects that are pickled by themselves outside of the context of the parent.
The developer responsibility here is only to provide a ``__getstate__`` method
that excludes the :meth:`~MutableBase._parents` collection from the pickle
stream::
class MyMutableType(Mutable):
def __getstate__(self):
d = self.__dict__.copy()
d.pop('_parents', None)
return d
With our dictionary example, we need to return the contents of the dict itself
(and also restore them on __setstate__)::
class MutableDict(Mutable, dict):
# ....
def __getstate__(self):
return dict(self)
def __setstate__(self, state):
self.update(state)
In the case that our mutable value object is pickled as it is attached to one
or more parent objects that are also part of the pickle, the :class:`.Mutable`
mixin will re-establish the :attr:`.Mutable._parents` collection on each value
object as the owning parents themselves are unpickled.
.. _mutable_composites:
Establishing Mutability on Composites
=====================================
Composites are a special ORM feature which allow a single scalar attribute to
be assigned an object value which represents information "composed" from one
or more columns from the underlying mapped table. The usual example is that of
a geometric "point", and is introduced in :ref:`mapper_composite`.
.. versionchanged:: 0.7
The internals of :func:`.orm.composite` have been
greatly simplified and in-place mutation detection is no longer enabled by
default; instead, the user-defined value must detect changes on its own and
propagate them to all owning parents. The :mod:`sqlalchemy.ext.mutable`
extension provides the helper class :class:`.MutableComposite`, which is a
slight variant on the :class:`.Mutable` class.
As is the case with :class:`.Mutable`, the user-defined composite class
subclasses :class:`.MutableComposite` as a mixin, and detects and delivers
change events to its parents via the :meth:`.MutableComposite.changed` method.
In the case of a composite class, the detection is usually via the usage of
Python descriptors (i.e. ``@property``), or alternatively via the special
Python method ``__setattr__()``. Below we expand upon the ``Point`` class
introduced in :ref:`mapper_composite` to subclass :class:`.MutableComposite`
and to also route attribute set events via ``__setattr__`` to the
:meth:`.MutableComposite.changed` method::
from sqlalchemy.ext.mutable import MutableComposite
class Point(MutableComposite):
def __init__(self, x, y):
self.x = x
self.y = y
def __setattr__(self, key, value):
"Intercept set events"
# set the attribute
object.__setattr__(self, key, value)
# alert all parents to the change
self.changed()
def __composite_values__(self):
return self.x, self.y
def __eq__(self, other):
return isinstance(other, Point) and \\
other.x == self.x and \\
other.y == self.y
def __ne__(self, other):
return not self.__eq__(other)
The :class:`.MutableComposite` class uses a Python metaclass to automatically
establish listeners for any usage of :func:`.orm.composite` that specifies our
``Point`` type. Below, when ``Point`` is mapped to the ``Vertex`` class,
listeners are established which will route change events from ``Point``
objects to each of the ``Vertex.start`` and ``Vertex.end`` attributes::
from sqlalchemy.orm import composite, mapper
from sqlalchemy import Table, Column
vertices = Table('vertices', metadata,
Column('id', Integer, primary_key=True),
Column('x1', Integer),
Column('y1', Integer),
Column('x2', Integer),
Column('y2', Integer),
)
class Vertex(object):
pass
mapper(Vertex, vertices, properties={
'start': composite(Point, vertices.c.x1, vertices.c.y1),
'end': composite(Point, vertices.c.x2, vertices.c.y2)
})
Any in-place changes to the ``Vertex.start`` or ``Vertex.end`` members
will flag the attribute as "dirty" on the parent object::
>>> from sqlalchemy.orm import Session
>>> sess = Session()
>>> v1 = Vertex(start=Point(3, 4), end=Point(12, 15))
>>> sess.add(v1)
>>> sess.commit()
>>> v1.end.x = 8
>>> assert v1 in sess.dirty
True
Coercing Mutable Composites
---------------------------
The :meth:`.MutableBase.coerce` method is also supported on composite types.
In the case of :class:`.MutableComposite`, the :meth:`.MutableBase.coerce`
method is only called for attribute set operations, not load operations.
Overriding the :meth:`.MutableBase.coerce` method is essentially equivalent
to using a :func:`.validates` validation routine for all attributes which
make use of the custom composite type::
class Point(MutableComposite):
# other Point methods
# ...
def coerce(cls, key, value):
if isinstance(value, tuple):
value = Point(*value)
elif not isinstance(value, Point):
raise ValueError("tuple or Point expected")
return value
.. versionadded:: 0.7.10,0.8.0b2
Support for the :meth:`.MutableBase.coerce` method in conjunction with
objects of type :class:`.MutableComposite`.
Supporting Pickling
--------------------
As is the case with :class:`.Mutable`, the :class:`.MutableComposite` helper
class uses a ``weakref.WeakKeyDictionary`` available via the
:meth:`MutableBase._parents` attribute which isn't picklable. If we need to
pickle instances of ``Point`` or its owning class ``Vertex``, we at least need
to define a ``__getstate__`` that doesn't include the ``_parents`` dictionary.
Below we define both a ``__getstate__`` and a ``__setstate__`` that package up
the minimal form of our ``Point`` class::
class Point(MutableComposite):
# ...
def __getstate__(self):
return self.x, self.y
def __setstate__(self, state):
self.x, self.y = state
As with :class:`.Mutable`, the :class:`.MutableComposite` augments the
pickling process of the parent's object-relational state so that the
:meth:`MutableBase._parents` collection is restored to all ``Point`` objects.
"""
from ..orm.attributes import flag_modified
from .. import event, types
from ..orm import mapper, object_mapper, Mapper
from ..util import memoized_property
import weakref
class MutableBase(object):
"""Common base class to :class:`.Mutable`
and :class:`.MutableComposite`.
"""
@memoized_property
def _parents(self):
"""Dictionary of parent object->attribute name on the parent.
This attribute is a so-called "memoized" property. It initializes
itself with a new ``weakref.WeakKeyDictionary`` the first time
it is accessed, returning the same object upon subsequent access.
"""
return weakref.WeakKeyDictionary()
@classmethod
def coerce(cls, key, value):
"""Given a value, coerce it into the target type.
Can be overridden by custom subclasses to coerce incoming
data into a particular type.
By default, raises ``ValueError``.
This method is called in different scenarios depending on if
the parent class is of type :class:`.Mutable` or of type
:class:`.MutableComposite`. In the case of the former, it is called
for both attribute-set operations as well as during ORM loading
operations. For the latter, it is only called during attribute-set
operations; the mechanics of the :func:`.composite` construct
handle coercion during load operations.
:param key: string name of the ORM-mapped attribute being set.
:param value: the incoming value.
:return: the method should return the coerced value, or raise
``ValueError`` if the coercion cannot be completed.
"""
if value is None:
return None
msg = "Attribute '%s' does not accept objects of type %s"
raise ValueError(msg % (key, type(value)))
@classmethod
def _listen_on_attribute(cls, attribute, coerce, parent_cls):
"""Establish this type as a mutation listener for the given
mapped descriptor.
"""
key = attribute.key
if parent_cls is not attribute.class_:
return
# rely on "propagate" here
parent_cls = attribute.class_
def load(state, *args):
"""Listen for objects loaded or refreshed.
Wrap the target data member's value with
``Mutable``.
"""
val = state.dict.get(key, None)
if val is not None:
if coerce:
val = cls.coerce(key, val)
state.dict[key] = val
val._parents[state.obj()] = key
def set(target, value, oldvalue, initiator):
"""Listen for set/replace events on the target
data member.
Establish a weak reference to the parent object
on the incoming value, remove it for the one
outgoing.
"""
if value is oldvalue:
return value
if not isinstance(value, cls):
value = cls.coerce(key, value)
if value is not None:
value._parents[target.obj()] = key
if isinstance(oldvalue, cls):
oldvalue._parents.pop(target.obj(), None)
return value
def pickle(state, state_dict):
val = state.dict.get(key, None)
if val is not None:
if 'ext.mutable.values' not in state_dict:
state_dict['ext.mutable.values'] = []
state_dict['ext.mutable.values'].append(val)
def unpickle(state, state_dict):
if 'ext.mutable.values' in state_dict:
for val in state_dict['ext.mutable.values']:
val._parents[state.obj()] = key
event.listen(parent_cls, 'load', load,
raw=True, propagate=True)
event.listen(parent_cls, 'refresh', load,
raw=True, propagate=True)
event.listen(attribute, 'set', set,
raw=True, retval=True, propagate=True)
event.listen(parent_cls, 'pickle', pickle,
raw=True, propagate=True)
event.listen(parent_cls, 'unpickle', unpickle,
raw=True, propagate=True)
class Mutable(MutableBase):
"""Mixin that defines transparent propagation of change
events to a parent object.
See the example in :ref:`mutable_scalars` for usage information.
"""
def changed(self):
"""Subclasses should call this method whenever change events occur."""
for parent, key in self._parents.items():
flag_modified(parent, key)
@classmethod
def associate_with_attribute(cls, attribute):
"""Establish this type as a mutation listener for the given
mapped descriptor.
"""
cls._listen_on_attribute(attribute, True, attribute.class_)
@classmethod
def associate_with(cls, sqltype):
"""Associate this wrapper with all future mapped columns
of the given type.
This is a convenience method that calls
``associate_with_attribute`` automatically.
.. warning::
The listeners established by this method are *global*
to all mappers, and are *not* garbage collected. Only use
:meth:`.associate_with` for types that are permanent to an
application, not with ad-hoc types else this will cause unbounded
growth in memory usage.
"""
def listen_for_type(mapper, class_):
for prop in mapper.column_attrs:
if isinstance(prop.columns[0].type, sqltype):
cls.associate_with_attribute(getattr(class_, prop.key))
event.listen(mapper, 'mapper_configured', listen_for_type)
@classmethod
def as_mutable(cls, sqltype):
"""Associate a SQL type with this mutable Python type.
This establishes listeners that will detect ORM mappings against
the given type, adding mutation event trackers to those mappings.
The type is returned, unconditionally as an instance, so that
:meth:`.as_mutable` can be used inline::
Table('mytable', metadata,
Column('id', Integer, primary_key=True),
Column('data', MyMutableType.as_mutable(PickleType))
)
Note that the returned type is always an instance, even if a class
is given, and that only columns which are declared specifically with
that type instance receive additional instrumentation.
To associate a particular mutable type with all occurrences of a
particular type, use the :meth:`.Mutable.associate_with` classmethod
of the particular :class:`.Mutable` subclass to establish a global
association.
.. warning::
The listeners established by this method are *global*
to all mappers, and are *not* garbage collected. Only use
:meth:`.as_mutable` for types that are permanent to an application,
not with ad-hoc types else this will cause unbounded growth
in memory usage.
"""
sqltype = types.to_instance(sqltype)
def listen_for_type(mapper, class_):
for prop in mapper.column_attrs:
if prop.columns[0].type is sqltype:
cls.associate_with_attribute(getattr(class_, prop.key))
event.listen(mapper, 'mapper_configured', listen_for_type)
return sqltype
class MutableComposite(MutableBase):
"""Mixin that defines transparent propagation of change
events on a SQLAlchemy "composite" object to its
owning parent or parents.
See the example in :ref:`mutable_composites` for usage information.
"""
def changed(self):
"""Subclasses should call this method whenever change events occur."""
for parent, key in self._parents.items():
prop = object_mapper(parent).get_property(key)
for value, attr_name in zip(
self.__composite_values__(),
prop._attribute_keys):
setattr(parent, attr_name, value)
def _setup_composite_listener():
def _listen_for_type(mapper, class_):
for prop in mapper.iterate_properties:
if (hasattr(prop, 'composite_class') and
isinstance(prop.composite_class, type) and
issubclass(prop.composite_class, MutableComposite)):
prop.composite_class._listen_on_attribute(
getattr(class_, prop.key), False, class_)
if not event.contains(Mapper, "mapper_configured", _listen_for_type):
event.listen(Mapper, 'mapper_configured', _listen_for_type)
_setup_composite_listener()
class MutableDict(Mutable, dict):
"""A dictionary type that implements :class:`.Mutable`.
.. versionadded:: 0.8
"""
def __setitem__(self, key, value):
"""Detect dictionary set events and emit change events."""
dict.__setitem__(self, key, value)
self.changed()
def __delitem__(self, key):
"""Detect dictionary del events and emit change events."""
dict.__delitem__(self, key)
self.changed()
def clear(self):
dict.clear(self)
self.changed()
@classmethod
def coerce(cls, key, value):
"""Convert plain dictionary to MutableDict."""
if not isinstance(value, MutableDict):
if isinstance(value, dict):
return MutableDict(value)
return Mutable.coerce(key, value)
else:
return value
def __getstate__(self):
return dict(self)
def __setstate__(self, state):
self.update(state)
|
gpl-3.0
| -8,090,697,009,239,155,000 | 4,217,810,180,143,473,700 | 35.025157 | 84 | 0.650794 | false |
FenceAtMHacks/flaskbackend
|
fence-api/flask/lib/python2.7/site-packages/pip/_vendor/cachecontrol/adapter.py
|
87
|
3967
|
import functools
from pip._vendor.requests.adapters import HTTPAdapter
from .controller import CacheController
from .cache import DictCache
from .filewrapper import CallbackFileWrapper
class CacheControlAdapter(HTTPAdapter):
invalidating_methods = set(['PUT', 'DELETE'])
def __init__(self, cache=None,
cache_etags=True,
controller_class=None,
serializer=None,
heuristic=None,
*args, **kw):
super(CacheControlAdapter, self).__init__(*args, **kw)
self.cache = cache or DictCache()
self.heuristic = heuristic
controller_factory = controller_class or CacheController
self.controller = controller_factory(
self.cache,
cache_etags=cache_etags,
serializer=serializer,
)
def send(self, request, **kw):
"""
Send a request. Use the request information to see if it
exists in the cache and cache the response if we need to and can.
"""
if request.method == 'GET':
cached_response = self.controller.cached_request(request)
if cached_response:
return self.build_response(request, cached_response, from_cache=True)
# check for etags and add headers if appropriate
request.headers.update(self.controller.conditional_headers(request))
resp = super(CacheControlAdapter, self).send(request, **kw)
return resp
def build_response(self, request, response, from_cache=False):
"""
Build a response by making a request or using the cache.
This will end up calling send and returning a potentially
cached response
"""
if not from_cache and request.method == 'GET':
# apply any expiration heuristics
if response.status == 304:
# We must have sent an ETag request. This could mean
# that we've been expired already or that we simply
# have an etag. In either case, we want to try and
# update the cache if that is the case.
cached_response = self.controller.update_cached_response(
request, response
)
if cached_response is not response:
from_cache = True
# We are done with the server response, read a
# possible response body (compliant servers will
# not return one, but we cannot be 100% sure) and
# release the connection back to the pool.
response.read(decode_content=False)
response.release_conn()
response = cached_response
else:
# Check for any heuristics that might update headers
# before trying to cache.
if self.heuristic:
response = self.heuristic.apply(response)
# Wrap the response file with a wrapper that will cache the
# response when the stream has been consumed.
response._fp = CallbackFileWrapper(
response._fp,
functools.partial(
self.controller.cache_response,
request,
response,
)
)
resp = super(CacheControlAdapter, self).build_response(
request, response
)
# See if we should invalidate the cache.
if request.method in self.invalidating_methods and resp.ok:
cache_url = self.controller.cache_url(request.url)
self.cache.delete(cache_url)
# Give the request a from_cache attr to let people use it
resp.from_cache = from_cache
return resp
def close(self):
self.cache.close()
super(CacheControlAdapter, self).close()
|
mit
| -2,948,321,094,085,656,000 | 237,506,637,232,813,500 | 35.063636 | 85 | 0.571465 | false |
puzan/ansible
|
test/units/modules/cloud/amazon/test_ec2_vpc_nat_gateway.py
|
41
|
17149
|
import pytest
import unittest
boto3 = pytest.importorskip("boto3")
botocore = pytest.importorskip("botocore")
from collections import namedtuple
from ansible.parsing.dataloader import DataLoader
from ansible.vars import VariableManager
from ansible.inventory import Inventory
from ansible.playbook.play import Play
from ansible.executor.task_queue_manager import TaskQueueManager
import ansible.modules.cloud.amazon.ec2_vpc_nat_gateway as ng
Options = (
namedtuple(
'Options', [
'connection', 'module_path', 'forks', 'become', 'become_method',
'become_user', 'remote_user', 'private_key_file', 'ssh_common_args',
'sftp_extra_args', 'scp_extra_args', 'ssh_extra_args', 'verbosity',
'check'
]
)
)
# initialize needed objects
variable_manager = VariableManager()
loader = DataLoader()
options = (
Options(
connection='local',
module_path='cloud/amazon',
forks=1, become=None, become_method=None, become_user=None, check=True,
remote_user=None, private_key_file=None, ssh_common_args=None,
sftp_extra_args=None, scp_extra_args=None, ssh_extra_args=None,
verbosity=3
)
)
passwords = dict(vault_pass='')
aws_region = 'us-west-2'
# create inventory and pass to var manager
inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list='localhost')
variable_manager.set_inventory(inventory)
def run(play):
tqm = None
results = None
try:
tqm = TaskQueueManager(
inventory=inventory,
variable_manager=variable_manager,
loader=loader,
options=options,
passwords=passwords,
stdout_callback='default',
)
results = tqm.run(play)
finally:
if tqm is not None:
tqm.cleanup()
return tqm, results
class AnsibleVpcNatGatewayTasks(unittest.TestCase):
def test_create_gateway_using_allocation_id(self):
play_source = dict(
name = "Create new nat gateway with eip allocation-id",
hosts = 'localhost',
gather_facts = 'no',
tasks = [
dict(
action=dict(
module='ec2_vpc_nat_gateway',
args=dict(
subnet_id='subnet-12345678',
allocation_id='eipalloc-12345678',
wait='yes',
region=aws_region,
)
),
register='nat_gateway',
),
dict(
action=dict(
module='debug',
args=dict(
msg='{{nat_gateway}}'
)
)
)
]
)
play = Play().load(play_source, variable_manager=variable_manager, loader=loader)
tqm, results = run(play)
self.failUnless(tqm._stats.ok['localhost'] == 2)
self.failUnless(tqm._stats.changed['localhost'] == 1)
def test_create_gateway_using_allocation_id_idempotent(self):
play_source = dict(
name = "Create new nat gateway with eip allocation-id",
hosts = 'localhost',
gather_facts = 'no',
tasks = [
dict(
action=dict(
module='ec2_vpc_nat_gateway',
args=dict(
subnet_id='subnet-123456789',
allocation_id='eipalloc-1234567',
wait='yes',
region=aws_region,
)
),
register='nat_gateway',
),
dict(
action=dict(
module='debug',
args=dict(
msg='{{nat_gateway}}'
)
)
)
]
)
play = Play().load(play_source, variable_manager=variable_manager, loader=loader)
tqm, results = run(play)
self.failUnless(tqm._stats.ok['localhost'] == 2)
self.assertFalse('localhost' in tqm._stats.changed)
def test_create_gateway_using_eip_address(self):
play_source = dict(
name = "Create new nat gateway with eip address",
hosts = 'localhost',
gather_facts = 'no',
tasks = [
dict(
action=dict(
module='ec2_vpc_nat_gateway',
args=dict(
subnet_id='subnet-12345678',
eip_address='55.55.55.55',
wait='yes',
region=aws_region,
)
),
register='nat_gateway',
),
dict(
action=dict(
module='debug',
args=dict(
msg='{{nat_gateway}}'
)
)
)
]
)
play = Play().load(play_source, variable_manager=variable_manager, loader=loader)
tqm, results = run(play)
self.failUnless(tqm._stats.ok['localhost'] == 2)
self.failUnless(tqm._stats.changed['localhost'] == 1)
def test_create_gateway_using_eip_address_idempotent(self):
play_source = dict(
name = "Create new nat gateway with eip address",
hosts = 'localhost',
gather_facts = 'no',
tasks = [
dict(
action=dict(
module='ec2_vpc_nat_gateway',
args=dict(
subnet_id='subnet-123456789',
eip_address='55.55.55.55',
wait='yes',
region=aws_region,
)
),
register='nat_gateway',
),
dict(
action=dict(
module='debug',
args=dict(
msg='{{nat_gateway}}'
)
)
)
]
)
play = Play().load(play_source, variable_manager=variable_manager, loader=loader)
tqm, results = run(play)
self.failUnless(tqm._stats.ok['localhost'] == 2)
self.assertFalse('localhost' in tqm._stats.changed)
def test_create_gateway_in_subnet_only_if_one_does_not_exist_already(self):
play_source = dict(
name = "Create new nat gateway only if one does not exist already",
hosts = 'localhost',
gather_facts = 'no',
tasks = [
dict(
action=dict(
module='ec2_vpc_nat_gateway',
args=dict(
if_exist_do_not_create='yes',
subnet_id='subnet-123456789',
wait='yes',
region=aws_region,
)
),
register='nat_gateway',
),
dict(
action=dict(
module='debug',
args=dict(
msg='{{nat_gateway}}'
)
)
)
]
)
play = Play().load(play_source, variable_manager=variable_manager, loader=loader)
tqm, results = run(play)
self.failUnless(tqm._stats.ok['localhost'] == 2)
self.assertFalse('localhost' in tqm._stats.changed)
def test_delete_gateway(self):
play_source = dict(
name = "Delete Nat Gateway",
hosts = 'localhost',
gather_facts = 'no',
tasks = [
dict(
action=dict(
module='ec2_vpc_nat_gateway',
args=dict(
nat_gateway_id='nat-123456789',
state='absent',
wait='yes',
region=aws_region,
)
),
register='nat_gateway',
),
dict(
action=dict(
module='debug',
args=dict(
msg='{{nat_gateway}}'
)
)
)
]
)
play = Play().load(play_source, variable_manager=variable_manager, loader=loader)
tqm, results = run(play)
self.failUnless(tqm._stats.ok['localhost'] == 2)
self.assertTrue('localhost' in tqm._stats.changed)
class AnsibleEc2VpcNatGatewayFunctions(unittest.TestCase):
def test_convert_to_lower(self):
example = ng.DRY_RUN_GATEWAY_UNCONVERTED
converted_example = ng.convert_to_lower(example[0])
keys = list(converted_example.keys())
keys.sort()
for i in range(len(keys)):
if i == 0:
self.assertEqual(keys[i], 'create_time')
if i == 1:
self.assertEqual(keys[i], 'nat_gateway_addresses')
gw_addresses_keys = list(converted_example[keys[i]][0].keys())
gw_addresses_keys.sort()
for j in range(len(gw_addresses_keys)):
if j == 0:
self.assertEqual(gw_addresses_keys[j], 'allocation_id')
if j == 1:
self.assertEqual(gw_addresses_keys[j], 'network_interface_id')
if j == 2:
self.assertEqual(gw_addresses_keys[j], 'private_ip')
if j == 3:
self.assertEqual(gw_addresses_keys[j], 'public_ip')
if i == 2:
self.assertEqual(keys[i], 'nat_gateway_id')
if i == 3:
self.assertEqual(keys[i], 'state')
if i == 4:
self.assertEqual(keys[i], 'subnet_id')
if i == 5:
self.assertEqual(keys[i], 'vpc_id')
def test_get_nat_gateways(self):
client = boto3.client('ec2', region_name=aws_region)
success, err_msg, stream = (
ng.get_nat_gateways(client, 'subnet-123456789', check_mode=True)
)
should_return = ng.DRY_RUN_GATEWAYS
self.assertTrue(success)
self.assertEqual(stream, should_return)
def test_get_nat_gateways_no_gateways_found(self):
client = boto3.client('ec2', region_name=aws_region)
success, err_msg, stream = (
ng.get_nat_gateways(client, 'subnet-1234567', check_mode=True)
)
self.assertTrue(success)
self.assertEqual(stream, [])
def test_wait_for_status(self):
client = boto3.client('ec2', region_name=aws_region)
success, err_msg, gws = (
ng.wait_for_status(
client, 5, 'nat-123456789', 'available', check_mode=True
)
)
should_return = ng.DRY_RUN_GATEWAYS[0]
self.assertTrue(success)
self.assertEqual(gws, should_return)
def test_wait_for_status_to_timeout(self):
client = boto3.client('ec2', region_name=aws_region)
success, err_msg, gws = (
ng.wait_for_status(
client, 2, 'nat-12345678', 'available', check_mode=True
)
)
self.assertFalse(success)
self.assertEqual(gws, {})
def test_gateway_in_subnet_exists_with_allocation_id(self):
client = boto3.client('ec2', region_name=aws_region)
gws, err_msg = (
ng.gateway_in_subnet_exists(
client, 'subnet-123456789', 'eipalloc-1234567', check_mode=True
)
)
should_return = ng.DRY_RUN_GATEWAYS
self.assertEqual(gws, should_return)
def test_gateway_in_subnet_exists_with_allocation_id_does_not_exist(self):
client = boto3.client('ec2', region_name=aws_region)
gws, err_msg = (
ng.gateway_in_subnet_exists(
client, 'subnet-123456789', 'eipalloc-123', check_mode=True
)
)
should_return = list()
self.assertEqual(gws, should_return)
def test_gateway_in_subnet_exists_without_allocation_id(self):
client = boto3.client('ec2', region_name=aws_region)
gws, err_msg = (
ng.gateway_in_subnet_exists(
client, 'subnet-123456789', check_mode=True
)
)
should_return = ng.DRY_RUN_GATEWAYS
self.assertEqual(gws, should_return)
def test_get_eip_allocation_id_by_address(self):
client = boto3.client('ec2', region_name=aws_region)
allocation_id, _ = (
ng.get_eip_allocation_id_by_address(
client, '55.55.55.55', check_mode=True
)
)
should_return = 'eipalloc-1234567'
self.assertEqual(allocation_id, should_return)
def test_get_eip_allocation_id_by_address_does_not_exist(self):
client = boto3.client('ec2', region_name=aws_region)
allocation_id, err_msg = (
ng.get_eip_allocation_id_by_address(
client, '52.52.52.52', check_mode=True
)
)
self.assertEqual(err_msg, 'EIP 52.52.52.52 does not exist')
self.assertTrue(allocation_id is None)
def test_allocate_eip_address(self):
client = boto3.client('ec2', region_name=aws_region)
success, err_msg, eip_id = (
ng.allocate_eip_address(
client, check_mode=True
)
)
self.assertTrue(success)
def test_release_address(self):
client = boto3.client('ec2', region_name=aws_region)
success, _ = (
ng.release_address(
client, 'eipalloc-1234567', check_mode=True
)
)
self.assertTrue(success)
def test_create(self):
client = boto3.client('ec2', region_name=aws_region)
success, changed, err_msg, results = (
ng.create(
client, 'subnet-123456', 'eipalloc-1234567', check_mode=True
)
)
self.assertTrue(success)
self.assertTrue(changed)
def test_pre_create(self):
client = boto3.client('ec2', region_name=aws_region)
success, changed, err_msg, results = (
ng.pre_create(
client, 'subnet-123456', check_mode=True
)
)
self.assertTrue(success)
self.assertTrue(changed)
def test_pre_create_idemptotent_with_allocation_id(self):
client = boto3.client('ec2', region_name=aws_region)
success, changed, err_msg, results = (
ng.pre_create(
client, 'subnet-123456789', allocation_id='eipalloc-1234567', check_mode=True
)
)
self.assertTrue(success)
self.assertFalse(changed)
def test_pre_create_idemptotent_with_eip_address(self):
client = boto3.client('ec2', region_name=aws_region)
success, changed, err_msg, results = (
ng.pre_create(
client, 'subnet-123456789', eip_address='55.55.55.55', check_mode=True
)
)
self.assertTrue(success)
self.assertFalse(changed)
def test_pre_create_idemptotent_if_exist_do_not_create(self):
client = boto3.client('ec2', region_name=aws_region)
success, changed, err_msg, results = (
ng.pre_create(
client, 'subnet-123456789', if_exist_do_not_create=True, check_mode=True
)
)
self.assertTrue(success)
self.assertFalse(changed)
def test_delete(self):
client = boto3.client('ec2', region_name=aws_region)
success, changed, err_msg, _ = (
ng.remove(
client, 'nat-123456789', check_mode=True
)
)
self.assertTrue(success)
self.assertTrue(changed)
def test_delete_and_release_ip(self):
client = boto3.client('ec2', region_name=aws_region)
success, changed, err_msg, _ = (
ng.remove(
client, 'nat-123456789', release_eip=True, check_mode=True
)
)
self.assertTrue(success)
self.assertTrue(changed)
def test_delete_if_does_not_exist(self):
client = boto3.client('ec2', region_name=aws_region)
success, changed, err_msg, _ = (
ng.remove(
client, 'nat-12345', check_mode=True
)
)
self.assertFalse(success)
self.assertFalse(changed)
|
gpl-3.0
| 8,971,442,375,591,435,000 | 6,446,847,508,703,008,000 | 34.652807 | 94 | 0.491982 | false |
adlius/osf.io
|
admin/nodes/urls.py
|
6
|
2100
|
from django.conf.urls import url
from admin.nodes import views
app_name = 'admin'
urlpatterns = [
url(r'^$', views.NodeFormView.as_view(),
name='search'),
url(r'^flagged_spam$', views.NodeFlaggedSpamList.as_view(),
name='flagged-spam'),
url(r'^known_spam$', views.NodeKnownSpamList.as_view(),
name='known-spam'),
url(r'^known_ham$', views.NodeKnownHamList.as_view(),
name='known-ham'),
url(r'^(?P<guid>[a-z0-9]+)/$', views.NodeView.as_view(),
name='node'),
url(r'^(?P<guid>[a-z0-9]+)/logs/$', views.AdminNodeLogView.as_view(),
name='node-logs'),
url(r'^registration_list/$', views.RegistrationListView.as_view(),
name='registrations'),
url(r'^stuck_registration_list/$', views.StuckRegistrationListView.as_view(),
name='stuck-registrations'),
url(r'^(?P<guid>[a-z0-9]+)/update_embargo/$',
views.RegistrationUpdateEmbargoView.as_view(), name='update_embargo'),
url(r'^(?P<guid>[a-z0-9]+)/remove/$', views.NodeDeleteView.as_view(),
name='remove'),
url(r'^(?P<guid>[a-z0-9]+)/restore/$', views.NodeDeleteView.as_view(),
name='restore'),
url(r'^(?P<guid>[a-z0-9]+)/confirm_spam/$', views.NodeConfirmSpamView.as_view(),
name='confirm-spam'),
url(r'^(?P<guid>[a-z0-9]+)/confirm_ham/$', views.NodeConfirmHamView.as_view(),
name='confirm-ham'),
url(r'^(?P<guid>[a-z0-9]+)/reindex_share_node/$', views.NodeReindexShare.as_view(),
name='reindex-share-node'),
url(r'^(?P<guid>[a-z0-9]+)/reindex_elastic_node/$', views.NodeReindexElastic.as_view(),
name='reindex-elastic-node'),
url(r'^(?P<guid>[a-z0-9]+)/restart_stuck_registrations/$', views.RestartStuckRegistrationsView.as_view(),
name='restart-stuck-registrations'),
url(r'^(?P<guid>[a-z0-9]+)/remove_stuck_registrations/$', views.RemoveStuckRegistrationsView.as_view(),
name='remove-stuck-registrations'),
url(r'^(?P<guid>[a-z0-9]+)/remove_user/(?P<user_id>[a-z0-9]+)/$',
views.NodeRemoveContributorView.as_view(), name='remove_user'),
]
|
apache-2.0
| -5,043,002,825,422,631,000 | 6,162,683,166,760,165,000 | 47.837209 | 109 | 0.621429 | false |
ttm/percolation
|
percolation/rdf/ontology.py
|
1
|
7460
|
import percolation as P
from .rdflib import NS
a=NS.rdf.type
def percolationSystem():
triples=[
(NS.per.CurrentStatus, a, NS.per.SystemStatus)
]
def minimumTestOntology(context="minimum_ontology"):
triples=[
(NS.po.FacebookSnapshot,NS.rdfs.subClassOf,NS.po.Snapshot),
(NS.facebook.user,NS.rdfs.range,NS.po.Participant),
(NS.facebook.ego,NS.rdfs.domain,NS.po.FacebookSnapshot),
(NS.facebook.userID,NS.rdfs.subPropertyOf,NS.po.userID),
]
P.add(triples,context=context)
def minimumOntology(context="minimum_ontology"):
triples=rdfsTriples()
if context=="triples":
return triples
P.add(triples,context=context)
def rdfsTriples():
"""Sub Class/Property and range domain assertions"""
triples=[
(NS.po.onlineMetaXMLFile, NS.rdfs.subPropertyOf, NS.void.dataDump),
(NS.po.onlineMetaXMLFile, NS.rdfs.subPropertyOf, NS.void.dataDump),
(NS.po.FacebookSnapshot,NS.rdfs.subClassOf,NS.po.Snapshot),
(NS.po.onlineMetaXMLFile, NS.rdfs.subPropertyOf, NS.void.dataDump),
(NS.po.onlineMetaTTLFile, NS.rdfs.subPropertyOf, NS.void.dataDump),
(NS.po.MetaXMLFilename, NS.rdfs.subPropertyOf, NS.void.dataDump),
(NS.po.MetaTTLFilename, NS.rdfs.subPropertyOf, NS.void.dataDump),
(NS.po.onlineInteractionXMLFile,NS.rdfs.subPropertyOf, NS.void.dataDump),
(NS.po.onlineinteractionTTLFile,NS.rdfs.subPropertyOf, NS.void.dataDump),
(NS.po.interactionXMLFilename, NS.rdfs.subPropertyOf, NS.void.dataDump),
(NS.po.interactionTTLFilename, NS.rdfs.subPropertyOf, NS.void.dataDump),
]
return triples
def participantRDFSStructure(): # participant
triples=[
(NS.po.Participant, NS.rdfs.subClassOf, NS.foaf.Person),
(NS.gmane.Participant,NS.rdfs.subClassOf,NS.po.Participant),
(NS.facebook.Participant,NS.rdfs.subClassOf,NS.po.Participant),
(NS.tw.Participant,NS.rdfs.subClassOf,NS.po.Participant),
]
return triples
def snapshotRDFSStructure():
triples=[
(NS.po.InteractionSnapshot, NS.rdfs.subClassOf, NS.po.Snapshot), # fb, part, tw, irc, gmane, cidade
(NS.po.FriendshipSnapshot, NS.rdfs.subClassOf, NS.po.Snapshot), # fb, part
(NS.po.ReportSnapshot, NS.rdfs.subClassOf, NS.po.Snapshot), # aa
(NS.po.FacebookSnapshot, NS.rdfs.subClassOf, NS.po.Snapshot),
(NS.po.FacebookInteractionSnapshot, NS.rdfs.subClassOf, NS.po.FacebookSnapshot),
(NS.po.FacebookInteractionSnapshot, NS.rdfs.subClassOf, NS.po.InteractionSnapshot),
(NS.po.FacebookFriendshipSnapshot, NS.rdfs.subClassOf, NS.po.FacebookSnapshot),
(NS.po.FacebookFriendshipSnapshot, NS.rdfs.subClassOf, NS.po.FriendshipSnapshot),
(NS.po.TwitterSnapshot, NS.rdfs.subClassOf, NS.po.InteractionSnapshot),
(NS.po.GmaneSnapshot, NS.rdfs.subClassOf, NS.po.InteractionSnapshot),
(NS.po.IRCSnapshot, NS.rdfs.subClassOf, NS.po.InteractionSnapshot),
(NS.po.AASnapshot, NS.rdfs.subClassOf, NS.po.ReportSnapshot),
(NS.po.ParticipaSnapshot, NS.rdfs.subClassOf, NS.po.CompleteSnapshot),
(NS.po.CidadeDemocraticaSnapshot, NS.rdfs.subClassOf, NS.po.InteractionSnapshot),
]
return triples
def idRDFSStructure():
# User ID somente, na msg a ID eh a URI pois nao diferem em listas/grupos diferentes
# Mas IDs podem existir para grupos e pessoas, pois se repetem em datasets diferentes
triples=[
(NS.gmane.gmaneID, NS.rdfs.subPropertyOf, NS.po.auxID),
(NS.facebook.groupID, NS.rdfs.subPropertyOf, NS.po.auxID),
(NS.facebook.ID, NS.rdfs.subPropertyOf,NS.po.ID),
(NS.po.numericID, NS.rdfs.subPropertyOf,NS.po.ID),
(NS.po.stringID, NS.rdfs.subPropertyOf,NS.po.ID),
(NS.po.auxID, NS.rdfs.subPropertyOf,NS.po.ID),
(NS.facebook.numericID,NS.rdfs.subPropertyOf,NS.facebook.ID),
(NS.facebook.numericID,NS.rdfs.subPropertyOf,NS.po.numericID),
(NS.facebook.stringID, NS.rdfs.subPropertyOf,NS.facebook.ID),
(NS.facebook.stringID, NS.rdfs.subPropertyOf,NS.po.stringID),
(NS.gmane.stringID,NS.rdfs.subPropertyOf,NS.po.stringID),
(NS.gmane.email, NS.rdfs.subPropertyOf,NS.gmane.stringID),
(NS.tw.stringID,NS.rdfs.subPropertyOf,NS.po.stringID),
(NS.tw.email, NS.rdfs.subPropertyOf,NS.tw.stringID),
]
return triples
def fileRDFSStructure():
triples=[
(NS.po.interactionXMLFile, NS.rdfs.subPropertyOf,NS.po.defaultXML), # fb
(NS.po.rdfFile , NS.rdfs.subPropertyOf,NS.po.defaultXML), # twitter, gmane
(NS.po.friendshipXMLFile , NS.rdfs.subPropertyOf,NS.po.defaultXML), # fb
]
return triples
def graphRDFStructure():
triples=[
(NS.po.MetaNamedGraph, NS.rdfs.subClassOf,NS.po.NamedGraph),
(NS.po.TranslationNamedGraph, NS.rdfs.subClassOf, NS.po.NamedGraph),
(NS.po.metaGraph , NS.rdfs.subPropertyOf,NS.po.namedGraph), # fb
(NS.po.metaGraph , NS.rdfs.range,NS.po.MetaNamedGraph), # fb
(NS.po.translationGraph , NS.rdfs.subPropertyOf,NS.po.namedGraph), # fb
(NS.po.translationGraph , NS.rdfs.range,NS.po.TranslationNamedGraph), # fb
]
return triples
def messageRDFSStructure():
triples=[
(NS.gmane.Message,NS.rdfs.subClassOf,NS.po.Message),
(NS.tw.Message,NS.rdfs.subClassOf,NS.po.Message),
(NS.po.Message,NS.rdfs.subClassOf,NS.po.InteractionInstance),
]
def interactionRDFSStructure():
triples=[
(NS.facebook.Interaction,NS.rdfs.subClassOf,NS.po.InteractionInstance),
(NS.gmane.Response,NS.rdfs.subClassOf,NS.po.InteractionInstance),
(NS.gmane.Retweet,NS.rdfs.subClassOf,NS.po.InteractionInstance),
(NS.facebook.nInterations, NS.rdfs.subPropertyOf,NS.facebook.nRelations),
]
return triples
def friendshipRDFSStructure():
triples=[
(NS.facebook.friendOf,NS.rdfs.subPropertyOf,NS.po.friendOf),
(NS.participa.friendOf,NS.rdfs.subPropertyOf,NS.po.friendOf),
(NS.facebook.nFriendships, NS.rdfs.subPropertyOf,NS.facebook.nRelations),
]
return triples
def friendshipOWLStructure():
triples=[
(NS.facebook.friendOf,a,NS.owl.SymmetricProperty),
]
return triples
def participantRelationRDFStructure():
triples=[
(NS.facebook.nRelations, NS.rdfs.subPropertyOf,NS.po.nRelations),
]
triples+=friendshipRDFSStructure()+interactionRDFSStructure()
return triples
def anonymizationRDFSStructure():
triples=[
(NS.facebook.anonymized, NS.rdfs.subPropertyOf,NS.po.anonymized),
(NS.facebook.friendshipsAnonymized, NS.rdfs.subPropertyOf,NS.facebook.anonymized),
(NS.facebook.interactionssAnonymized, NS.rdfs.subPropertyOf,NS.facebook.anonymized),
]
return triples
def todo():
todo="""type of relation retrievement: 1, 2 or 3
labels equivalence: irc, etc
date equivalence
interaction/relation uris equivalence
textual content equivalence
if text is available"""
return todo
|
gpl-3.0
| -3,400,617,558,957,189,000 | 6,081,114,792,688,575,000 | 40.21547 | 111 | 0.659651 | false |
debsankha/bedtime-programming
|
ls222/visual-lotka.py
|
1
|
5120
|
#!/usr/bin/env python
from math import *
import thread
import random
import time
import pygtk
pygtk.require("2.0")
import gtk
import gtk.glade
import commands
import matplotlib.pyplot
class rodent:
def __init__(self):
self.time_from_last_childbirth=0
class felix:
def __init__(self):
self.size=0
self.is_virgin=1
self.reproduction_gap=0
self.time_from_last_childbirth=0
self.age=0
# print 'painted'
class gui_display:
def __init__(self):
self.gladefile='./lvshort.glade'
self.wTree = gtk.glade.XML(self.gladefile)
dic={"on_start_clicked":self.dynamics,"on_mainwin_destroy":gtk.main_quit}
self.wTree.signal_autoconnect(dic)
self.wTree.get_widget("mainwin").show()
self.wTree.get_widget("image").set_from_file("./start.png")
def visualize(self,catn,mousen):
# while True:
num=40
size=10
catno=catn*num**2/(catn+mousen)
cats=random.sample(range(num**2),catno)
for i in range(num**2):
if i in cats:
self.dic[i].color=visual.color.red
else :
self.dic[i].color=visual.color.green
def dynamics(self,*args,**kwargs):
self.wTree.get_widget("image").set_from_file("./wait.png")
print 'dynamics started'
mouse_size=20 #ind parameter
cat_mature_size=60 #ind parameter
# catch_rate=5*10**-4 #parameter
# cat_efficiency=0.8 #parameter
# a=0.2 #will get from slider
# c=0.2 #will get from slider
cat_catch_rate=self.wTree.get_widget("catchrate").get_value()*10**-4 #parameter
cat_efficiency=self.wTree.get_widget("efficiency").get_value() #parameter
a=self.wTree.get_widget("a").get_value() #parameter
c=self.wTree.get_widget("c").get_value() #parameter
mouse_no=1000
cat_no=1000
t=0
tmax=200
dt=1
timeli=[]
miceli=[]
catli=[]
mice=[rodent() for i in range(mouse_no)]
cats=[felix() for i in range(cat_no)]
catn=len(cats)
mousen=len(mice)
self.dic={}
num=40
size=10
catno=catn*num**2/(catn+mousen)
disp_cats=random.sample(range(num**2),catno)
if self.wTree.get_widget("anim").get_active()==1:
print 'yay!'
for i in range(num**2):
coords=((i%num)*size*2-num*size,(i/num)*size*2-num*size)
if i in disp_cats:
self.dic[i]=visual.sphere(pos=coords,radius=size,color=visual.color.red)
else :
self.dic[i]=visual.sphere(pos=coords,radius=size,color=visual.color.green)
print self.dic
catn=len(cats)
mousen=len(mice)
data=open('tempdata.dat','w')
timestart=time.time()
while (len(mice)>0 or len(cats)>0) and t<tmax and (time.time()-timestart)<60:
# print time.time()-timestart
catn=len(cats)
mousen=len(mice)
if self.wTree.get_widget("anim").get_active()==1:
print 'yay!'
# self.visualize(catn,mousen)
thread.start_new_thread(self.visualize,(catn,mousen))
for mouse in mice:
if mouse.time_from_last_childbirth>=1/a:
mouse.time_from_last_childbirth=0
mice.append(rodent())
mouse.time_from_last_childbirth+=dt
ind=0
while ind<len(cats):
cat=cats[ind]
cat.age+=dt
num=cat_catch_rate*dt*len(mice)
for i in range(int(num)):
caught=random.randint(0,len(mice)-1)
cat.size+=mouse_size*cat_efficiency #size increases
mice.pop(caught)
if (num-int(num))>random.uniform(0,1):
caught=random.randint(0,len(mice)-1)
cat.size+=mouse_size*cat_efficiency #size increases
mice.pop(caught)
if cat.size>cat_mature_size:
if cat.is_virgin:
cat.is_virgin=0
cat.reproduction_gap=cat.age
cats.append(felix())
else :
if cat.time_from_last_childbirth>cat.reproduction_gap:
cats.append(felix())
cat.time_from_last_childbirth=0
if cat.is_virgin==0:
cat.time_from_last_childbirth+=dt
if len(cats)>0:
if c*dt*2*atan(0.05*len(cats))/pi>random.uniform(0,1):
cats.pop(ind)
else :
ind+=1
else :
ind+=1
timeli.append(t)
miceli.append(len(mice))
catli.append(len(cats))
print t,'\t',len(mice),'\t',len(cats)
print >> data, t,'\t',len(mice),'\t',len(cats)
t+=dt
data.close()
upper_limit=1.2*len(mice)
pltfile=open('lv.plt','w')
print >> pltfile,"""se te png
se o "/tmp/lv.png"
unse ke
#se yrange [0:%f]
se xl "Time"
se yl "Number of Prey/Predator"
p 'tempdata.dat' u 1:2 w l,'tempdata.dat' u 1:3 w l
"""%upper_limit
pltfile.close()
commands.getoutput('gnuplot lv.plt')
self.wTree.get_widget("image").set_from_file("/tmp/lv.png")
print 'dynamics ended'
reload(matplotlib.pyplot)
matplotlib.pyplot.plot(timeli,catli,'g-')#timeli,catli,'r-')
matplotlib.pyplot.xlabel("Time")
matplotlib.pyplot.ylabel("Number of mice and cats")
matplotlib.pyplot.show()
gui=gui_display()
gtk.main()
#dynamics()
#import matplotlib.pyplot as plt
#plt.plot(timeli,miceli,'go',timeli,catli,'ro')
#plt.show()
|
gpl-3.0
| -9,037,647,909,445,171,000 | -1,032,744,407,012,818,000 | 24.858586 | 96 | 0.614844 | false |
freedomtan/tensorflow
|
tensorflow/python/ops/confusion_matrix.py
|
14
|
10762
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Confusion matrix related utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util import deprecation
from tensorflow.python.util import dispatch
from tensorflow.python.util.tf_export import tf_export
def remove_squeezable_dimensions(
labels, predictions, expected_rank_diff=0, name=None):
"""Squeeze last dim if ranks differ from expected by exactly 1.
In the common case where we expect shapes to match, `expected_rank_diff`
defaults to 0, and we squeeze the last dimension of the larger rank if they
differ by 1.
But, for example, if `labels` contains class IDs and `predictions` contains 1
probability per class, we expect `predictions` to have 1 more dimension than
`labels`, so `expected_rank_diff` would be 1. In this case, we'd squeeze
`labels` if `rank(predictions) - rank(labels) == 0`, and
`predictions` if `rank(predictions) - rank(labels) == 2`.
This will use static shape if available. Otherwise, it will add graph
operations, which could result in a performance hit.
Args:
labels: Label values, a `Tensor` whose dimensions match `predictions`.
predictions: Predicted values, a `Tensor` of arbitrary dimensions.
expected_rank_diff: Expected result of `rank(predictions) - rank(labels)`.
name: Name of the op.
Returns:
Tuple of `labels` and `predictions`, possibly with last dim squeezed.
"""
with ops.name_scope(name, 'remove_squeezable_dimensions',
[labels, predictions]):
predictions = ops.convert_to_tensor(predictions)
labels = ops.convert_to_tensor(labels)
predictions_shape = predictions.get_shape()
predictions_rank = predictions_shape.ndims
labels_shape = labels.get_shape()
labels_rank = labels_shape.ndims
if (labels_rank is not None) and (predictions_rank is not None):
# Use static rank.
rank_diff = predictions_rank - labels_rank
if (rank_diff == expected_rank_diff + 1 and
predictions_shape.dims[-1].is_compatible_with(1)):
predictions = array_ops.squeeze(predictions, [-1])
elif (rank_diff == expected_rank_diff - 1 and
labels_shape.dims[-1].is_compatible_with(1)):
labels = array_ops.squeeze(labels, [-1])
return labels, predictions
# Use dynamic rank.
rank_diff = array_ops.rank(predictions) - array_ops.rank(labels)
if (predictions_rank is None) or (
predictions_shape.dims[-1].is_compatible_with(1)):
predictions = control_flow_ops.cond(
math_ops.equal(expected_rank_diff + 1, rank_diff),
lambda: array_ops.squeeze(predictions, [-1]),
lambda: predictions)
if (labels_rank is None) or (
labels_shape.dims[-1].is_compatible_with(1)):
labels = control_flow_ops.cond(
math_ops.equal(expected_rank_diff - 1, rank_diff),
lambda: array_ops.squeeze(labels, [-1]),
lambda: labels)
return labels, predictions
@tf_export('math.confusion_matrix', v1=[])
@dispatch.add_dispatch_support
def confusion_matrix(labels,
predictions,
num_classes=None,
weights=None,
dtype=dtypes.int32,
name=None):
"""Computes the confusion matrix from predictions and labels.
The matrix columns represent the prediction labels and the rows represent the
real labels. The confusion matrix is always a 2-D array of shape `[n, n]`,
where `n` is the number of valid labels for a given classification task. Both
prediction and labels must be 1-D arrays of the same shape in order for this
function to work.
If `num_classes` is `None`, then `num_classes` will be set to one plus the
maximum value in either predictions or labels. Class labels are expected to
start at 0. For example, if `num_classes` is 3, then the possible labels
would be `[0, 1, 2]`.
If `weights` is not `None`, then each prediction contributes its
corresponding weight to the total value of the confusion matrix cell.
For example:
```python
tf.math.confusion_matrix([1, 2, 4], [2, 2, 4]) ==>
[[0 0 0 0 0]
[0 0 1 0 0]
[0 0 1 0 0]
[0 0 0 0 0]
[0 0 0 0 1]]
```
Note that the possible labels are assumed to be `[0, 1, 2, 3, 4]`,
resulting in a 5x5 confusion matrix.
Args:
labels: 1-D `Tensor` of real labels for the classification task.
predictions: 1-D `Tensor` of predictions for a given classification.
num_classes: The possible number of labels the classification task can
have. If this value is not provided, it will be calculated
using both predictions and labels array.
weights: An optional `Tensor` whose shape matches `predictions`.
dtype: Data type of the confusion matrix.
name: Scope name.
Returns:
A `Tensor` of type `dtype` with shape `[n, n]` representing the confusion
matrix, where `n` is the number of possible labels in the classification
task.
Raises:
ValueError: If both predictions and labels are not 1-D vectors and have
mismatched shapes, or if `weights` is not `None` and its shape doesn't
match `predictions`.
"""
with ops.name_scope(name, 'confusion_matrix',
(predictions, labels, num_classes, weights)) as name:
labels, predictions = remove_squeezable_dimensions(
ops.convert_to_tensor(labels, name='labels'),
ops.convert_to_tensor(
predictions, name='predictions'))
predictions = math_ops.cast(predictions, dtypes.int64)
labels = math_ops.cast(labels, dtypes.int64)
# Sanity checks - underflow or overflow can cause memory corruption.
labels = control_flow_ops.with_dependencies(
[check_ops.assert_non_negative(
labels, message='`labels` contains negative values')],
labels)
predictions = control_flow_ops.with_dependencies(
[check_ops.assert_non_negative(
predictions, message='`predictions` contains negative values')],
predictions)
if num_classes is None:
num_classes = math_ops.maximum(math_ops.reduce_max(predictions),
math_ops.reduce_max(labels)) + 1
else:
num_classes_int64 = math_ops.cast(num_classes, dtypes.int64)
labels = control_flow_ops.with_dependencies(
[check_ops.assert_less(
labels, num_classes_int64, message='`labels` out of bound')],
labels)
predictions = control_flow_ops.with_dependencies(
[check_ops.assert_less(
predictions, num_classes_int64,
message='`predictions` out of bound')],
predictions)
if weights is not None:
weights = ops.convert_to_tensor(weights, name='weights')
predictions.get_shape().assert_is_compatible_with(weights.get_shape())
weights = math_ops.cast(weights, dtype)
shape = array_ops.stack([num_classes, num_classes])
indices = array_ops.stack([labels, predictions], axis=1)
values = (array_ops.ones_like(predictions, dtype)
if weights is None else weights)
return array_ops.scatter_nd(
indices=indices,
updates=values,
shape=math_ops.cast(shape, dtypes.int64))
@tf_export(v1=['math.confusion_matrix', 'confusion_matrix'])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints('confusion_matrix', 'train.confusion_matrix')
def confusion_matrix_v1(labels,
predictions,
num_classes=None,
dtype=dtypes.int32,
name=None,
weights=None):
"""Computes the confusion matrix from predictions and labels.
The matrix columns represent the prediction labels and the rows represent the
real labels. The confusion matrix is always a 2-D array of shape `[n, n]`,
where `n` is the number of valid labels for a given classification task. Both
prediction and labels must be 1-D arrays of the same shape in order for this
function to work.
If `num_classes` is `None`, then `num_classes` will be set to one plus the
maximum value in either predictions or labels. Class labels are expected to
start at 0. For example, if `num_classes` is 3, then the possible labels
would be `[0, 1, 2]`.
If `weights` is not `None`, then each prediction contributes its
corresponding weight to the total value of the confusion matrix cell.
For example:
```python
tf.math.confusion_matrix([1, 2, 4], [2, 2, 4]) ==>
[[0 0 0 0 0]
[0 0 1 0 0]
[0 0 1 0 0]
[0 0 0 0 0]
[0 0 0 0 1]]
```
Note that the possible labels are assumed to be `[0, 1, 2, 3, 4]`,
resulting in a 5x5 confusion matrix.
Args:
labels: 1-D `Tensor` of real labels for the classification task.
predictions: 1-D `Tensor` of predictions for a given classification.
num_classes: The possible number of labels the classification task can have.
If this value is not provided, it will be calculated using both
predictions and labels array.
dtype: Data type of the confusion matrix.
name: Scope name.
weights: An optional `Tensor` whose shape matches `predictions`.
Returns:
A `Tensor` of type `dtype` with shape `[n, n]` representing the confusion
matrix, where `n` is the number of possible labels in the classification
task.
Raises:
ValueError: If both predictions and labels are not 1-D vectors and have
mismatched shapes, or if `weights` is not `None` and its shape doesn't
match `predictions`.
"""
return confusion_matrix(labels, predictions, num_classes, weights, dtype,
name)
|
apache-2.0
| 4,615,457,542,390,306,000 | 3,834,811,768,112,707,000 | 40.233716 | 80 | 0.666326 | false |
kaiyou/docker-py
|
tests/unit/errors_test.py
|
2
|
3097
|
import unittest
import requests
from docker.errors import (APIError, DockerException,
create_unexpected_kwargs_error)
class APIErrorTest(unittest.TestCase):
def test_api_error_is_caught_by_dockerexception(self):
try:
raise APIError("this should be caught by DockerException")
except DockerException:
pass
def test_status_code_200(self):
"""The status_code property is present with 200 response."""
resp = requests.Response()
resp.status_code = 200
err = APIError('', response=resp)
assert err.status_code == 200
def test_status_code_400(self):
"""The status_code property is present with 400 response."""
resp = requests.Response()
resp.status_code = 400
err = APIError('', response=resp)
assert err.status_code == 400
def test_status_code_500(self):
"""The status_code property is present with 500 response."""
resp = requests.Response()
resp.status_code = 500
err = APIError('', response=resp)
assert err.status_code == 500
def test_is_server_error_200(self):
"""Report not server error on 200 response."""
resp = requests.Response()
resp.status_code = 200
err = APIError('', response=resp)
assert err.is_server_error() is False
def test_is_server_error_300(self):
"""Report not server error on 300 response."""
resp = requests.Response()
resp.status_code = 300
err = APIError('', response=resp)
assert err.is_server_error() is False
def test_is_server_error_400(self):
"""Report not server error on 400 response."""
resp = requests.Response()
resp.status_code = 400
err = APIError('', response=resp)
assert err.is_server_error() is False
def test_is_server_error_500(self):
"""Report server error on 500 response."""
resp = requests.Response()
resp.status_code = 500
err = APIError('', response=resp)
assert err.is_server_error() is True
def test_is_client_error_500(self):
"""Report not client error on 500 response."""
resp = requests.Response()
resp.status_code = 500
err = APIError('', response=resp)
assert err.is_client_error() is False
def test_is_client_error_400(self):
"""Report client error on 400 response."""
resp = requests.Response()
resp.status_code = 400
err = APIError('', response=resp)
assert err.is_client_error() is True
class CreateUnexpectedKwargsErrorTest(unittest.TestCase):
def test_create_unexpected_kwargs_error_single(self):
e = create_unexpected_kwargs_error('f', {'foo': 'bar'})
assert str(e) == "f() got an unexpected keyword argument 'foo'"
def test_create_unexpected_kwargs_error_multiple(self):
e = create_unexpected_kwargs_error('f', {'foo': 'bar', 'baz': 'bosh'})
assert str(e) == "f() got unexpected keyword arguments 'baz', 'foo'"
|
apache-2.0
| -5,584,371,200,627,834,000 | 7,839,992,221,485,463,000 | 34.597701 | 78 | 0.61382 | false |
blaze/distributed
|
distributed/protocol/tests/test_collection_cuda.py
|
1
|
2448
|
import pytest
from distributed.protocol import serialize, deserialize
from dask.dataframe.utils import assert_eq
import pandas as pd
@pytest.mark.parametrize("collection", [tuple, dict])
@pytest.mark.parametrize("y,y_serializer", [(50, "cuda"), (None, "pickle")])
def test_serialize_cupy(collection, y, y_serializer):
cupy = pytest.importorskip("cupy")
x = cupy.arange(100)
if y is not None:
y = cupy.arange(y)
if issubclass(collection, dict):
header, frames = serialize(
{"x": x, "y": y}, serializers=("cuda", "dask", "pickle")
)
else:
header, frames = serialize((x, y), serializers=("cuda", "dask", "pickle"))
t = deserialize(header, frames, deserializers=("cuda", "dask", "pickle", "error"))
assert header["is-collection"] is True
sub_headers = header["sub-headers"]
assert sub_headers[0]["serializer"] == "cuda"
assert sub_headers[1]["serializer"] == y_serializer
assert isinstance(t, collection)
assert ((t["x"] if isinstance(t, dict) else t[0]) == x).all()
if y is None:
assert (t["y"] if isinstance(t, dict) else t[1]) is None
else:
assert ((t["y"] if isinstance(t, dict) else t[1]) == y).all()
@pytest.mark.parametrize("collection", [tuple, dict])
@pytest.mark.parametrize(
"df2,df2_serializer",
[(pd.DataFrame({"C": [3, 4, 5], "D": [2.5, 3.5, 4.5]}), "cuda"), (None, "pickle")],
)
def test_serialize_pandas_pandas(collection, df2, df2_serializer):
cudf = pytest.importorskip("cudf")
df1 = cudf.DataFrame({"A": [1, 2, None], "B": [1.0, 2.0, None]})
if df2 is not None:
df2 = cudf.from_pandas(df2)
if issubclass(collection, dict):
header, frames = serialize(
{"df1": df1, "df2": df2}, serializers=("cuda", "dask", "pickle")
)
else:
header, frames = serialize((df1, df2), serializers=("cuda", "dask", "pickle"))
t = deserialize(header, frames, deserializers=("cuda", "dask", "pickle"))
assert header["is-collection"] is True
sub_headers = header["sub-headers"]
assert sub_headers[0]["serializer"] == "cuda"
assert sub_headers[1]["serializer"] == df2_serializer
assert isinstance(t, collection)
assert_eq(t["df1"] if isinstance(t, dict) else t[0], df1)
if df2 is None:
assert (t["df2"] if isinstance(t, dict) else t[1]) is None
else:
assert_eq(t["df2"] if isinstance(t, dict) else t[1], df2)
|
bsd-3-clause
| -2,612,189,011,257,522,700 | 3,728,268,588,896,349,700 | 36.090909 | 87 | 0.611928 | false |
LTD-Beget/tornado
|
tornado/util.py
|
2
|
13519
|
"""Miscellaneous utility functions and classes.
This module is used internally by Tornado. It is not necessarily expected
that the functions and classes defined here will be useful to other
applications, but they are documented here in case they are.
The one public-facing part of this module is the `Configurable` class
and its `~Configurable.configure` method, which becomes a part of the
interface of its subclasses, including `.AsyncHTTPClient`, `.IOLoop`,
and `.Resolver`.
"""
from __future__ import absolute_import, division, print_function, with_statement
import array
import os
import sys
import zlib
try:
xrange # py2
except NameError:
xrange = range # py3
# inspect.getargspec() raises DeprecationWarnings in Python 3.5.
# The two functions have compatible interfaces for the parts we need.
try:
from inspect import getfullargspec as getargspec # py3
except ImportError:
from inspect import getargspec # py2
class ObjectDict(dict):
"""Makes a dictionary behave like an object, with attribute-style access.
"""
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
self[name] = value
class GzipDecompressor(object):
"""Streaming gzip decompressor.
The interface is like that of `zlib.decompressobj` (without some of the
optional arguments, but it understands gzip headers and checksums.
"""
def __init__(self):
# Magic parameter makes zlib module understand gzip header
# http://stackoverflow.com/questions/1838699/how-can-i-decompress-a-gzip-stream-with-zlib
# This works on cpython and pypy, but not jython.
self.decompressobj = zlib.decompressobj(16 + zlib.MAX_WBITS)
def decompress(self, value, max_length=None):
"""Decompress a chunk, returning newly-available data.
Some data may be buffered for later processing; `flush` must
be called when there is no more input data to ensure that
all data was processed.
If ``max_length`` is given, some input data may be left over
in ``unconsumed_tail``; you must retrieve this value and pass
it back to a future call to `decompress` if it is not empty.
"""
return self.decompressobj.decompress(value, max_length)
@property
def unconsumed_tail(self):
"""Returns the unconsumed portion left over
"""
return self.decompressobj.unconsumed_tail
def flush(self):
"""Return any remaining buffered data not yet returned by decompress.
Also checks for errors such as truncated input.
No other methods may be called on this object after `flush`.
"""
return self.decompressobj.flush()
# Fake unicode literal support: Python 3.2 doesn't have the u'' marker for
# literal strings, and alternative solutions like "from __future__ import
# unicode_literals" have other problems (see PEP 414). u() can be applied
# to ascii strings that include \u escapes (but they must not contain
# literal non-ascii characters).
if not isinstance(b'', type('')):
def u(s):
return s
unicode_type = str
basestring_type = str
else:
def u(s):
return s.decode('unicode_escape')
# These names don't exist in py3, so use noqa comments to disable
# warnings in flake8.
unicode_type = unicode # noqa
basestring_type = basestring # noqa
def import_object(name):
"""Imports an object by name.
import_object('x') is equivalent to 'import x'.
import_object('x.y.z') is equivalent to 'from x.y import z'.
>>> import tornado.escape
>>> import_object('tornado.escape') is tornado.escape
True
>>> import_object('tornado.escape.utf8') is tornado.escape.utf8
True
>>> import_object('tornado') is tornado
True
>>> import_object('tornado.missing_module')
Traceback (most recent call last):
...
ImportError: No module named missing_module
"""
if isinstance(name, unicode_type) and str is not unicode_type:
# On python 2 a byte string is required.
name = name.encode('utf-8')
if name.count('.') == 0:
return __import__(name, None, None)
parts = name.split('.')
obj = __import__('.'.join(parts[:-1]), None, None, [parts[-1]], 0)
try:
return getattr(obj, parts[-1])
except AttributeError:
raise ImportError("No module named %s" % parts[-1])
# Deprecated alias that was used before we dropped py25 support.
# Left here in case anyone outside Tornado is using it.
bytes_type = bytes
if sys.version_info > (3,):
exec("""
def raise_exc_info(exc_info):
raise exc_info[1].with_traceback(exc_info[2])
def exec_in(code, glob, loc=None):
if isinstance(code, str):
code = compile(code, '<string>', 'exec', dont_inherit=True)
exec(code, glob, loc)
""")
else:
exec("""
def raise_exc_info(exc_info):
raise exc_info[0], exc_info[1], exc_info[2]
def exec_in(code, glob, loc=None):
if isinstance(code, basestring):
# exec(string) inherits the caller's future imports; compile
# the string first to prevent that.
code = compile(code, '<string>', 'exec', dont_inherit=True)
exec code in glob, loc
""")
def errno_from_exception(e):
"""Provides the errno from an Exception object.
There are cases that the errno attribute was not set so we pull
the errno out of the args but if someone instantiates an Exception
without any args you will get a tuple error. So this function
abstracts all that behavior to give you a safe way to get the
errno.
"""
if hasattr(e, 'errno'):
return e.errno
elif e.args:
return e.args[0]
else:
return None
class Configurable(object):
"""Base class for configurable interfaces.
A configurable interface is an (abstract) class whose constructor
acts as a factory function for one of its implementation subclasses.
The implementation subclass as well as optional keyword arguments to
its initializer can be set globally at runtime with `configure`.
By using the constructor as the factory method, the interface
looks like a normal class, `isinstance` works as usual, etc. This
pattern is most useful when the choice of implementation is likely
to be a global decision (e.g. when `~select.epoll` is available,
always use it instead of `~select.select`), or when a
previously-monolithic class has been split into specialized
subclasses.
Configurable subclasses must define the class methods
`configurable_base` and `configurable_default`, and use the instance
method `initialize` instead of ``__init__``.
"""
__impl_class = None
__impl_kwargs = None
def __new__(cls, *args, **kwargs):
base = cls.configurable_base()
init_kwargs = {}
if cls is base:
impl = cls.configured_class()
if base.__impl_kwargs:
init_kwargs.update(base.__impl_kwargs)
else:
impl = cls
init_kwargs.update(kwargs)
instance = super(Configurable, cls).__new__(impl)
# initialize vs __init__ chosen for compatibility with AsyncHTTPClient
# singleton magic. If we get rid of that we can switch to __init__
# here too.
instance.initialize(*args, **init_kwargs)
return instance
@classmethod
def configurable_base(cls):
"""Returns the base class of a configurable hierarchy.
This will normally return the class in which it is defined.
(which is *not* necessarily the same as the cls classmethod parameter).
"""
raise NotImplementedError()
@classmethod
def configurable_default(cls):
"""Returns the implementation class to be used if none is configured."""
raise NotImplementedError()
def initialize(self):
"""Initialize a `Configurable` subclass instance.
Configurable classes should use `initialize` instead of ``__init__``.
.. versionchanged:: 4.2
Now accepts positional arguments in addition to keyword arguments.
"""
@classmethod
def configure(cls, impl, **kwargs):
"""Sets the class to use when the base class is instantiated.
Keyword arguments will be saved and added to the arguments passed
to the constructor. This can be used to set global defaults for
some parameters.
"""
base = cls.configurable_base()
if isinstance(impl, (unicode_type, bytes)):
impl = import_object(impl)
if impl is not None and not issubclass(impl, cls):
raise ValueError("Invalid subclass of %s" % cls)
base.__impl_class = impl
base.__impl_kwargs = kwargs
@classmethod
def configured_class(cls):
"""Returns the currently configured class."""
base = cls.configurable_base()
if cls.__impl_class is None:
base.__impl_class = cls.configurable_default()
return base.__impl_class
@classmethod
def _save_configuration(cls):
base = cls.configurable_base()
return (base.__impl_class, base.__impl_kwargs)
@classmethod
def _restore_configuration(cls, saved):
base = cls.configurable_base()
base.__impl_class = saved[0]
base.__impl_kwargs = saved[1]
class ArgReplacer(object):
"""Replaces one value in an ``args, kwargs`` pair.
Inspects the function signature to find an argument by name
whether it is passed by position or keyword. For use in decorators
and similar wrappers.
"""
def __init__(self, func, name):
self.name = name
try:
self.arg_pos = self._getargnames(func).index(name)
except ValueError:
# Not a positional parameter
self.arg_pos = None
def _getargnames(self, func):
try:
return getargspec(func).args
except TypeError:
if hasattr(func, 'func_code'):
# Cython-generated code has all the attributes needed
# by inspect.getargspec (when the
# @cython.binding(True) directive is used), but the
# inspect module only works with ordinary functions.
# Inline the portion of getargspec that we need here.
code = func.func_code
return code.co_varnames[:code.co_argcount]
raise
def get_old_value(self, args, kwargs, default=None):
"""Returns the old value of the named argument without replacing it.
Returns ``default`` if the argument is not present.
"""
if self.arg_pos is not None and len(args) > self.arg_pos:
return args[self.arg_pos]
else:
return kwargs.get(self.name, default)
def replace(self, new_value, args, kwargs):
"""Replace the named argument in ``args, kwargs`` with ``new_value``.
Returns ``(old_value, args, kwargs)``. The returned ``args`` and
``kwargs`` objects may not be the same as the input objects, or
the input objects may be mutated.
If the named argument was not found, ``new_value`` will be added
to ``kwargs`` and None will be returned as ``old_value``.
"""
if self.arg_pos is not None and len(args) > self.arg_pos:
# The arg to replace is passed positionally
old_value = args[self.arg_pos]
args = list(args) # *args is normally a tuple
args[self.arg_pos] = new_value
else:
# The arg to replace is either omitted or passed by keyword.
old_value = kwargs.get(self.name)
kwargs[self.name] = new_value
return old_value, args, kwargs
def timedelta_to_seconds(td):
"""Equivalent to td.total_seconds() (introduced in python 2.7)."""
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / float(10 ** 6)
def _websocket_mask_python(mask, data):
"""Websocket masking function.
`mask` is a `bytes` object of length 4; `data` is a `bytes` object of any length.
Returns a `bytes` object of the same length as `data` with the mask applied
as specified in section 5.3 of RFC 6455.
This pure-python implementation may be replaced by an optimized version when available.
"""
mask = array.array("B", mask)
unmasked = array.array("B", data)
for i in xrange(len(data)):
unmasked[i] = unmasked[i] ^ mask[i % 4]
if hasattr(unmasked, 'tobytes'):
# tostring was deprecated in py32. It hasn't been removed,
# but since we turn on deprecation warnings in our tests
# we need to use the right one.
return unmasked.tobytes()
else:
return unmasked.tostring()
if (os.environ.get('TORNADO_NO_EXTENSION') or
os.environ.get('TORNADO_EXTENSION') == '0'):
# These environment variables exist to make it easier to do performance
# comparisons; they are not guaranteed to remain supported in the future.
_websocket_mask = _websocket_mask_python
else:
try:
from tornado.speedups import websocket_mask as _websocket_mask
except ImportError:
if os.environ.get('TORNADO_EXTENSION') == '1':
raise
_websocket_mask = _websocket_mask_python
def doctests():
import doctest
return doctest.DocTestSuite()
|
apache-2.0
| 5,607,329,887,341,188,000 | -5,569,714,570,594,332,000 | 34.023316 | 97 | 0.645092 | false |
cs243iitg/vehicle-webapp
|
webapp/vms/forms.py
|
1
|
15620
|
from vms import models
from django.contrib.auth.models import User
from django import forms
from django.forms.extras.widgets import SelectDateWidget
from django.contrib.admin.widgets import AdminSplitDateTime
from django.utils.translation import ugettext_lazy as _
from datetimewidget.widgets import DateTimeWidget, DateWidget, DateTimeInput, TimeInput
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Fieldset, ButtonHolder, Submit
from crispy_forms.bootstrap import TabHolder, Tab, Div, Field
from crispy_forms.bootstrap import AppendedText, PrependedText, InlineCheckboxes
from crispy_forms.bootstrap import Accordion, AccordionGroup
from django.contrib.auth import forms as UserForms
from django.core.validators import RegexValidator
from datetime import datetime
from bootstrap3_datetime.widgets import DateTimePicker
class DocumentForm(forms.Form):
docfile = forms.FileField(
label='Select a file'
)
class StudentCycleForm(forms.ModelForm):
class Meta:
model = models.StudentCycle
exclude = ('user','cycle_pass_no')
def __init__(self, *args, **kwargs):
super(StudentCycleForm, self).__init__(*args, **kwargs)
for index, field in enumerate(self.fields):
self.fields[field].widget.attrs.update({
'class': 'form-control',
'tabindex': index+1,
})
class BusTimingForm(forms.ModelForm):
from_time = forms.DateTimeField(required=True, widget=DateTimePicker(options={"format": "DD-MM-YYYY HH:mm", "pickSeconds":True}))
class Meta:
model = models.BusTiming
fields = ['bus_route', 'from_time', 'bus_no', 'starting_point', 'ending_point', 'availability','working_day']
# widgets = {
# 'from_time': forms.TimeInput(format='%H:%M'),
# }
def __init__(self, *args, **kwargs):
super(BusTimingForm, self).__init__(*args, **kwargs)
for index, field in enumerate(self.fields):
self.fields[field].widget.attrs.update({
'class': 'form-control',
'tabindex': index+1,
})
# self.fields['from_time'].widget = TimeInput(attrs={
# 'class':'form-control',
# 'tabindex':index+1,
# 'placeholder': 'HH:MM',
# })
class SuspiciousVehicleForm(forms.ModelForm):
"""
User form for Reporting Suspicious Vehicle
"""
class Meta:
model = models.SuspiciousVehicle
exclude = ('reporter',)
widgets = {
'remarks': forms.Textarea(attrs={'rows':6}),
}
labels = {
'vehicle_image': _('Vehicle Photo'),
'vehicle_number': _('Vehicle Number'),
'vehicle_type': _('Vehicle Type'),
'vehicle_model': _('Vehicle Model'),
}
def __init__(self, *args, **kwargs):
super(SuspiciousVehicleForm, self).__init__(*args, **kwargs)
for index, field in enumerate(self.fields):
self.fields[field].widget.attrs.update({
'class': 'form-control',
'tabindex': index+1,
})
class PersonPassForm(forms.ModelForm):
"""
Admin form for Blocking Passes
"""
class Meta:
model = models.PersonPass
exclude = ('is_blocked','reason')
widgets = {
'expiry_date': SelectDateWidget(years=range(2000, 2030)),#(usel10n = True, bootstrap_version=3,),
'issue_date': SelectDateWidget(years=range(2000, 2030)),#(usel10n = True, bootstrap_version=3,),
}
labels = {
'user_photo': _('Your photo'),
'old_card_reference': _('Old Card Number'),
'age': _('Age'),
'pass_number': _('Pass Number'),
'name': _('Name'),
'identified_by': _('Office'),
'work_area': _('Work Area'),
'working_time': _('Working Time'),
'nature_of_work': _('Job'),
}
class TheftForm(forms.ModelForm):
"""
User form for reporting theft
"""
theft_time = forms.DateTimeField(required=True, widget=DateTimePicker(options={"format": "DD-MM-YYYY HH:mm", "pickSeconds":True}))
class Meta:
model = models.TheftReport
exclude = ('reporter', 'status','stud_vehicle','emp_vehicle')
widgets = {
# 'theft_time': DateTimeWidget(usel10n = True, bootstrap_version=3),
# 'theft_time':DateTimeInput(format="%d-%m-%Y %H:%M"),
'remarks': forms.Textarea(attrs={'rows':6}),
}
def __init__(self, *args, **kwargs):
super(TheftForm, self).__init__(*args, **kwargs)
for index, field in enumerate(self.fields):
self.fields[field].widget.attrs.update({
'class': 'form-control',
'tabindex': index+1,
})
self.fields['theft_time'].widget = DateTimeInput(attrs={
'class':'form-control',
'tabindex':index+1,
'placeholder': 'DD-MM-YYYY hh:mm',
})
class StudentVehicleForm(forms.ModelForm):
"""
Student form for registering vehicle
"""
# date_of_birth = forms.DateTimeField(required=True, widget=DateTimePicker(options={"format": "DD-MM-YYYY", "pickTime":False}))
class Meta:
model = models.StudentVehicle
exclude = ('registered_with_security_section', 'user', 'issue_date', 'expiry_date')
dateOptions = {
'startView': 4,
}
widgets = {
# 'date_of_birth':DateTimePicker(options={"format": "DD-MM-YYYY", "pickTime":False}),
'date_of_birth': SelectDateWidget(years=range(1950, datetime.now().year)),#(usel10n = True, bootstrap_version=3, options = dateOptions),
'insurance_valid_upto': SelectDateWidget(years=range(datetime.now().year, 2035)), #(usel10n = True, bootstrap_version=3, options = dateOptions),
'driving_license_issue_date': SelectDateWidget(years=range(1950, datetime.now().year)), #(usel10n = True, bootstrap_version=3, options = dateOptions),
'driving_license_expiry_date': SelectDateWidget(years=range(datetime.now().year, 2035)), #(usel10n = True, bootstrap_version=3, options = dateOptions),
'remarks': forms.Textarea(attrs={'rows':6}),
'address_of_communication': forms.Textarea(attrs={'rows':4}),
'permanent_address': forms.Textarea(attrs={'rows':4}),
'declaration': forms.Textarea(attrs={'rows':6,
'readonly':True,
'style':'resize:none;',}),
}
labels = {
'user_photo': _('Your photo'),
'address_of_communication': _('Address'),
'address_of_communication_district': _('District'),
'address_of_communication_state': _('State'),
'address_of_communication_pincode': _('Pincode'),
'permanent_address': _('Address'),
'permanent_address_district': _('District'),
'permanent_address_state': _('State'),
'permanent_address_pincode': _('Pincode'),
'parents_contact_no': _('Contact number'),
'parents_emailid': _('Email ID'),
'vehicle_registration_number': _('Registration Number'),
'driving_license_number': _('License number'),
'driving_license_issue_date': _('Issue date'),
'driving_license_expiry_date': _('Expiry Date'),
'driving_license': _('Scanned copy'),
}
def __init__(self, *args, **kwargs):
super(StudentVehicleForm, self).__init__(*args, **kwargs)
for index, field in enumerate(self.fields):
self.fields[field].widget.attrs.update({
'class': 'form-control',
'tabindex': index+1,
})
for field in self.fields.values():
field.error_messages = {'required':''}
self.helper = FormHelper()
self.helper.form_id = 'id_student_vehicle_form'
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-md-2 col-md-offset-1'
self.helper.field_class = 'col-md-4'
self.helper.form_method = 'post'
self.helper.form_action = '/vms/users/submit-vehicle-registration/'
self.helper.layout = Layout(
TabHolder(
Tab('Personal Details',
'name',
'roll_number',
'department',
'programme',
'date_of_birth',
'hostel_name',
'room_number',
'mobile_number',
'user_photo',
'identity_card',
),
Tab('Contact',
Accordion(
AccordionGroup('Address of communication',
'address_of_communication',
'address_of_communication_district',
'address_of_communication_state',
'address_of_communication_pincode',
),
AccordionGroup('Permanent Address',
'permanent_address',
'permanent_address_district',
'permanent_address_state',
'permanent_address_pincode',
),
AccordionGroup('Parent/Guardian Details',
'parents_contact_no',
'parents_emailid',
),
),
),
Tab('Vehicle Details',
'vehicle_registration_number',
#'registered_with_security_section',
'color',
'make_and_model',
'chassis_number',
'engine_number',
'registered_in_the_name_of',
'relation_with_owner',
'vehicle_insurance_no',
'insurance_valid_upto',
'vehicle_registration_card',
'vehicle_insurance',
'vehicle_photo',
),
Tab('Driving License',
'driving_license_number',
'driving_license_issue_date',
'driving_license_expiry_date',
'driving_license',
'declaration'
)
),
ButtonHolder(
Submit('submit', 'Submit',
css_class='btn-primary col-md-offset-5 form-submit')
)
)
class EmployeeVehicleForm(forms.ModelForm):
"""
Employee form for registering vehicle
"""
# date_of_birth=forms.DateField(widget=SelectDateWidget, initial="DD-MM-YYYY")
# insurance_valid_upto = forms.DateField(widget=SelectDateWidget, initial="DD-MM-YYYY")
# driving_license_issue_date = forms.DateField(widget=SelectDateWidget, initial="DD-MM-YYYY")
# driving_license_expiry_date = forms.DateField(widget=SelectDateWidget, initial="DD-MM-YYYY")
class Meta:
model = models.EmployeeVehicle
exclude = ('registered_with_security_section', 'user', 'issue_date', 'expiry_date')
dateOptions = {
'startView': 4,
}
widgets = {
'date_of_birth': SelectDateWidget(years=range(1950, datetime.now().year)), #DateWidget(usel10n = True, bootstrap_version=3,
# options = dateOptions),
'insurance_valid_upto': SelectDateWidget(years=range(datetime.now().year, 2035)), #DateWidget(usel10n = True, bootstrap_version=3,options = dateOptions),
'driving_license_issue_date':SelectDateWidget(years=range(1950, datetime.now().year)), # DateWidget(usel10n = True, bootstrap_version=3, options = dateOptions),
'driving_license_expiry_date': SelectDateWidget(years=range(datetime.now().year, 2035)), #DateWidget(usel10n = True, bootstrap_version=3, options = dateOptions),
'remarks': forms.Textarea(attrs={'rows':6}),
'address_of_communication': forms.Textarea(attrs={'rows':4}),
'permanent_address': forms.Textarea(attrs={'rows':4}),
'declaration': forms.Textarea(attrs={'rows':6,
'readonly':True,
'style':'resize:none;',}),
}
labels = {
'user_photo': _('Your photo'),
'vehicle_registration_number': _('Registration Number'),
'driving_license_number': _('License number'),
'driving_license_issue_date': _('Issue date'),
'driving_license_expiry_date': _('Expiry Date'),
'driving_license': _('Scanned copy'),
}
def __init__(self, *args, **kwargs):
super(EmployeeVehicleForm, self).__init__(*args, **kwargs)
for index, field in enumerate(self.fields):
self.fields[field].widget.attrs.update({
'class': 'form-control',
'tabindex': index+1,
})
for field in self.fields.values():
field.error_messages = {'required':''}
self.helper = FormHelper()
self.helper.form_id = 'id_student_vehicle_form'
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-md-2 col-md-offset-1'
self.helper.field_class = 'col-md-4'
self.helper.form_method = 'post'
self.helper.form_action = '/vms/users/submit-vehicle-registration/'
self.helper.layout = Layout(
TabHolder(
Tab('Personal Details',
'name',
'employee_no',
'department',
'date_of_birth',
'block_number',
'flat_number',
'mobile_number',
'user_photo',
'identity_card',
'parking_slot_no',
),
Tab('Vehicle Details',
'vehicle_registration_number',
'color',
'make_and_model',
'chassis_number',
'engine_number',
'registered_in_the_name_of',
'vehicle_insurance_no',
'insurance_valid_upto',
'vehicle_registration_card',
'vehicle_insurance',
'vehicle_photo',
),
Tab('Driving License',
'driving_license_number',
'driving_license_issue_date',
'driving_license_expiry_date',
'driving_license',
'declaration'
)
),
ButtonHolder(
Submit('submit', 'Submit',
css_class='btn-primary col-md-offset-5 form-submit')
)
)
|
mit
| 9,195,619,474,028,644,000 | -6,528,842,340,072,076,000 | 42.268698 | 271 | 0.516005 | false |
kernel64/AutobahnPython
|
examples/websocket/streaming/message_based_server.py
|
27
|
1622
|
###############################################################################
##
## Copyright 2011 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
import hashlib
from twisted.internet import reactor
from autobahn.websocket import WebSocketServerFactory, \
WebSocketServerProtocol, \
listenWS
class MessageBasedHashServerProtocol(WebSocketServerProtocol):
"""
Message-based WebSockets server that computes a SHA-256 for every
message it receives and sends back the computed digest.
"""
def onMessage(self, message, binary):
sha256 = hashlib.sha256()
sha256.update(message)
digest = sha256.hexdigest()
self.sendMessage(digest)
print "Sent digest for message: %s" % digest
if __name__ == '__main__':
factory = WebSocketServerFactory("ws://localhost:9000")
factory.protocol = MessageBasedHashServerProtocol
listenWS(factory)
reactor.run()
|
apache-2.0
| 5,827,530,319,007,656,000 | 4,052,364,387,712,692,000 | 34.863636 | 79 | 0.609741 | false |
ringemup/satchmo
|
satchmo/apps/payment/views/contact.py
|
6
|
5160
|
####################################################################
# First step in the order process - capture all the demographic info
#####################################################################
from django import http
from django.core import urlresolvers
from django.shortcuts import render_to_response
from django.template import RequestContext
from livesettings import config_get_group, config_value
from satchmo_store.contact import CUSTOMER_ID
from satchmo_store.contact.forms import area_choices_for_country
from satchmo_store.contact.models import Contact
from payment.decorators import cart_has_minimum_order
from payment.forms import ContactInfoForm, PaymentContactInfoForm
from satchmo_store.shop.models import Cart, Config, Order
from satchmo_utils.dynamic import lookup_url
from signals_ahoy.signals import form_initialdata
import logging
log = logging.getLogger('satchmo_store.contact.contact')
def authentication_required(request, template='shop/checkout/authentication_required.html'):
return render_to_response(
template, {}, context_instance = RequestContext(request)
)
def contact_info(request, **kwargs):
"""View which collects demographic information from customer."""
#First verify that the cart exists and has items
tempCart = Cart.objects.from_request(request)
if tempCart.numItems == 0:
return render_to_response('shop/checkout/empty_cart.html',
context_instance=RequestContext(request))
if not request.user.is_authenticated() and config_value('SHOP', 'AUTHENTICATION_REQUIRED'):
url = urlresolvers.reverse('satchmo_checkout_auth_required')
thisurl = urlresolvers.reverse('satchmo_checkout-step1')
return http.HttpResponseRedirect(url + "?next=" + thisurl)
init_data = {}
shop = Config.objects.get_current()
if request.user.is_authenticated():
if request.user.email:
init_data['email'] = request.user.email
if request.user.first_name:
init_data['first_name'] = request.user.first_name
if request.user.last_name:
init_data['last_name'] = request.user.last_name
try:
contact = Contact.objects.from_request(request, create=False)
except Contact.DoesNotExist:
contact = None
try:
order = Order.objects.from_request(request)
if order.discount_code:
init_data['discount'] = order.discount_code
except Order.DoesNotExist:
pass
if request.method == "POST":
new_data = request.POST.copy()
if not tempCart.is_shippable:
new_data['copy_address'] = True
form = PaymentContactInfoForm(data=new_data, shop=shop, contact=contact, shippable=tempCart.is_shippable,
initial=init_data, cart=tempCart)
if form.is_valid():
if contact is None and request.user and request.user.is_authenticated():
contact = Contact(user=request.user)
custID = form.save(request, cart=tempCart, contact=contact)
request.session[CUSTOMER_ID] = custID
modulename = new_data['paymentmethod']
if not modulename.startswith('PAYMENT_'):
modulename = 'PAYMENT_' + modulename
paymentmodule = config_get_group(modulename)
url = lookup_url(paymentmodule, 'satchmo_checkout-step2')
return http.HttpResponseRedirect(url)
else:
log.debug("Form errors: %s", form.errors)
else:
if contact:
#If a person has their contact info, make sure we populate it in the form
for item in contact.__dict__.keys():
init_data[item] = getattr(contact,item)
if contact.shipping_address:
for item in contact.shipping_address.__dict__.keys():
init_data["ship_"+item] = getattr(contact.shipping_address,item)
if contact.billing_address:
for item in contact.billing_address.__dict__.keys():
init_data[item] = getattr(contact.billing_address,item)
if contact.primary_phone:
init_data['phone'] = contact.primary_phone.phone
else:
# Allow them to login from this page.
request.session.set_test_cookie()
#Request additional init_data
form_initialdata.send(sender=PaymentContactInfoForm, initial=init_data,
contact=contact, cart=tempCart, shop=shop)
form = PaymentContactInfoForm(
shop=shop,
contact=contact,
shippable=tempCart.is_shippable,
initial=init_data,
cart=tempCart)
if shop.in_country_only:
only_country = shop.sales_country
else:
only_country = None
context = RequestContext(request, {
'form': form,
'country': only_country,
'paymentmethod_ct': len(form.fields['paymentmethod'].choices)
})
return render_to_response('shop/checkout/form.html',
context_instance=context)
contact_info_view = cart_has_minimum_order()(contact_info)
|
bsd-3-clause
| 611,995,513,992,942,000 | 6,164,227,994,304,558,000 | 40.28 | 113 | 0.634302 | false |
gibiansky/tensorflow
|
tensorflow/python/kernel_tests/reduction_ops_test.py
|
21
|
33412
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for reduction ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class ReducedShapeTest(test.TestCase):
def _check(self, shape, axes, result):
output = math_ops.reduced_shape(shape, axes=axes)
self.assertAllEqual(output.eval(), result)
def testSimple(self):
with self.test_session():
self._check([3], [], [3])
self._check([3], [0], [1])
self._check([5, 3], [], [5, 3])
self._check([5, 3], [0], [1, 3])
self._check([5, 3], [1], [5, 1])
self._check([5, 3], [0, 1], [1, 1])
def testZeros(self):
"""Check that reduced_shape does the right thing with zero dimensions."""
with self.test_session():
self._check([0], [], [0])
self._check([0], [0], [1])
self._check([0, 3], [], [0, 3])
self._check([0, 3], [0], [1, 3])
self._check([0, 3], [1], [0, 1])
self._check([0, 3], [0, 1], [1, 1])
self._check([3, 0], [], [3, 0])
self._check([3, 0], [0], [1, 0])
self._check([3, 0], [1], [3, 1])
self._check([3, 0], [0, 1], [1, 1])
def testNegAxes(self):
with self.test_session():
self._check([10, 10, 10], [-1], [10, 10, 1])
self._check([10, 10, 10], [-1, 2], [10, 10, 1])
self._check([10, 10, 10], [-1, -1], [10, 10, 1])
self._check([10, 10, 10], [-1, 0], [1, 10, 1])
self._check([10, 10, 10], [-3], [1, 10, 10])
class SumReductionTest(test.TestCase):
def _compare(self,
x,
reduction_axes,
keep_dims,
use_gpu=False,
feed_dict=None):
np_ans = x
if reduction_axes is None:
np_ans = np.sum(np_ans, keepdims=keep_dims)
else:
reduction_axes = np.array(reduction_axes).astype(np.int32)
for ra in reduction_axes.ravel()[::-1]:
np_ans = np.sum(np_ans, axis=ra, keepdims=keep_dims)
with self.test_session(use_gpu=use_gpu) as sess:
tf_ans = math_ops.reduce_sum(x, reduction_axes, keep_dims)
out = sess.run(tf_ans, feed_dict)
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes, feed_dict=None):
if reduction_axes is not None and np.shape(reduction_axes) == (1,):
# Test scalar reduction_axes argument
self._compareAll(x, reduction_axes[0])
self._compare(x, reduction_axes, False, use_gpu=True, feed_dict=feed_dict)
self._compare(x, reduction_axes, False, use_gpu=False, feed_dict=feed_dict)
self._compare(x, reduction_axes, True, use_gpu=True, feed_dict=feed_dict)
self._compare(x, reduction_axes, True, use_gpu=False, feed_dict=feed_dict)
def testInfinity(self):
for dtype in [np.float32, np.float64]:
for special_value_x in [-np.inf, np.inf]:
for special_value_y in [-np.inf, np.inf]:
np_arr = np.array([special_value_x, special_value_y]).astype(dtype)
self._compareAll(np_arr, None)
def testFloatReduce1D(self):
# Create a 1D array of floats
np_arr = np.arange(1, 6).reshape([5]).astype(np.float32)
self._compareAll(np_arr, [0])
def testFloatReduce2D(self):
# Create a 2D array of floats and reduce across all possible
# dimensions
np_arr = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [0, 1])
def testFloatReduce3D(self):
# Create a 3D array of floats and reduce across all possible
# dimensions
np_arr = np.arange(0, 30).reshape([2, 3, 5]).astype(np.float32)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
self._compareAll(np_arr, [-1])
self._compareAll(np_arr, [-1, -3])
self._compareAll(np_arr, [-1, 1])
def testFloatReduce4D(self):
# Create a 4D array of floats and reduce across some
# dimensions
np_arr = np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.float32)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
# Need specialization for reduce(4D, [0, 2])
# self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
self._compareAll(np_arr, [1, 2, 3])
self._compareAll(np_arr, [0, 1, 2, 3])
def testFloatReduce5D(self):
# Create a 5D array of floats and reduce across some dimensions
np_arr = np.arange(0, 840).reshape([2, 3, 5, 7, 4]).astype(np.float32)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
# Need specialization for reduce(4D, [0, 2])
# self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
self._compareAll(np_arr, [1, 2, 3])
self._compareAll(np_arr, [0, 1, 2, 3])
self._compareAll(np_arr, [1, 2, 3, 4])
self._compareAll(np_arr, [0, 1, 2, 3, 4])
# Simple tests for various types.
def testDoubleReduce1D(self):
np_arr = np.arange(1, 6).reshape([5]).astype(np.float64)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
def testInt32Reduce1D(self):
np_arr = np.arange(1, 6).reshape([5]).astype(np.int32)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
def testComplex64Reduce1D(self):
np_arr = np.arange(1, 6).reshape([5]).astype(np.complex64)
self._compare(np_arr, [], False)
self._compare(np_arr, [0], False)
def testComplex128Reduce1D(self):
np_arr = np.arange(1, 6).reshape([5]).astype(np.complex128)
self._compare(np_arr, [], False)
self._compare(np_arr, [0], False)
def testInvalidIndex(self):
np_arr = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
input_tensor = ops.convert_to_tensor(np_arr)
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Invalid reduction dimension" in str(e)):
math_ops.reduce_sum(input_tensor, [-3])
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Invalid reduction dimension" in str(e)):
math_ops.reduce_sum(input_tensor, [2])
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Invalid reduction dimension" in str(e)):
math_ops.reduce_sum(input_tensor, [0, 2])
def testPartialShapes(self):
np.random.seed(1618)
# Input shape is unknown.
reduction_axes = [1, 2]
c_unknown = array_ops.placeholder(dtypes.float32)
s_unknown = math_ops.reduce_sum(c_unknown, reduction_axes)
self.assertEqual(tensor_shape.unknown_shape(), s_unknown.get_shape())
np_input = np.random.randn(3, 3, 3)
self._compareAll(np_input, reduction_axes, {c_unknown: np_input})
# Input shape only has known rank.
c_known_rank = array_ops.placeholder(dtypes.float32)
c_known_rank.set_shape(tensor_shape.unknown_shape(ndims=3))
s_known_rank = math_ops.reduce_sum(
c_known_rank, reduction_axes, keep_dims=True)
self.assertEqual(3, s_known_rank.get_shape().ndims)
np_input = np.random.randn(3, 3, 3)
self._compareAll(np_input, reduction_axes, {c_known_rank: np_input})
# Reduction indices are unknown.
unknown_indices = array_ops.placeholder(dtypes.int32)
c_unknown_indices = constant_op.constant([[10.0], [20.0]])
s_unknown_indices = math_ops.reduce_sum(
c_unknown_indices, unknown_indices, keep_dims=False)
self.assertEqual(tensor_shape.unknown_shape(),
s_unknown_indices.get_shape())
s_unknown_indices_keep = math_ops.reduce_sum(
c_unknown_indices, unknown_indices, keep_dims=True)
self.assertEqual(2, s_unknown_indices_keep.get_shape().ndims)
# Int64??
def _compareGradient(self, shape, sum_shape, reduction_axes):
if reduction_axes is not None and np.shape(reduction_axes) == (1,):
# Test scalar reduction_axes argument
self._compareGradient(shape, sum_shape, reduction_axes[0])
x = np.arange(1.0, 49.0).reshape(shape).astype(np.float64)
with self.test_session():
t = ops.convert_to_tensor(x)
su = math_ops.reduce_sum(t, reduction_axes)
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, shape, su, sum_shape, x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testGradient(self):
self._compareGradient([2, 3, 4, 2], [2, 2], [1, 2])
def testGradient2(self):
self._compareGradient([2, 3, 4, 2], [2, 4, 2], [1])
def testGradient3(self):
self._compareGradient([2, 3, 4, 2], [2, 3, 2], [2])
def testGradient4(self):
self._compareGradient([2, 3, 4, 2], [], None)
def testGradient5(self):
self._compareGradient([2, 3, 4, 2], [3, 4, 2], 0)
def testHighRank(self):
# Do a bunch of random high dimensional reductions
np.random.seed(42)
for _ in range(20):
rank = np.random.randint(4, 10 + 1)
axes, = np.nonzero(np.random.randint(2, size=rank))
shape = tuple(np.random.randint(1, 3 + 1, size=rank))
data = np.random.randint(1024, size=shape)
self._compareAll(data, axes)
# Check some particular axis patterns
for rank in 4, 7, 10:
shape = tuple(np.random.randint(1, 3 + 1, size=rank))
data = np.random.randint(1024, size=shape)
for axes in ([], np.arange(rank), np.arange(0, rank, 2),
np.arange(1, rank, 2)):
self._compareAll(data, axes)
def testExpand(self):
# Reduce an empty tensor to a nonempty tensor
x = np.zeros((5, 0))
self._compareAll(x, [1])
def testEmptyGradients(self):
with self.test_session():
x = array_ops.zeros([0, 3])
y = math_ops.reduce_sum(x, [1])
error = gradient_checker.compute_gradient_error(x, [0, 3], y, [0])
self.assertEqual(error, 0)
def testDegenerate(self):
for use_gpu in False, True:
with self.test_session(use_gpu=use_gpu):
for dtype in (dtypes.float16, dtypes.float32, dtypes.float64,
dtypes.complex64, dtypes.complex128):
# A large number is needed to get Eigen to die
x = array_ops.zeros((0, 9938), dtype=dtype)
y = math_ops.reduce_sum(x, [0])
self.assertAllEqual(y.eval(), np.zeros(9938))
class MeanReductionTest(test.TestCase):
def _compare(self, x, reduction_axes, keep_dims, use_gpu=False):
np_ans = x
if reduction_axes is None:
np_ans = np.mean(np_ans, keepdims=keep_dims)
else:
reduction_axes = np.array(reduction_axes).astype(np.int32)
count = 1
for ra in reduction_axes.ravel()[::-1]:
np_ans = np.sum(np_ans, axis=ra, keepdims=keep_dims)
count *= x.shape[ra]
np_ans /= count
with self.test_session(use_gpu=use_gpu):
tf_ans = math_ops.reduce_mean(x, reduction_axes, keep_dims)
out = tf_ans.eval()
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
self._compare(x, reduction_axes, False, use_gpu=True)
self._compare(x, reduction_axes, True, use_gpu=True)
self._compare(x, reduction_axes, False, use_gpu=False)
self._compare(x, reduction_axes, True, use_gpu=False)
def testFloatReduce3D(self):
# Create a 3D array of floats and reduce across all possible
# dimensions
np_arr = np.arange(0, 30).reshape([2, 3, 5]).astype(np.float32)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testInfinity(self):
for dtype in [np.float32, np.float64]:
for special_value_x in [-np.inf, np.inf]:
for special_value_y in [-np.inf, np.inf]:
np_arr = np.array([special_value_x, special_value_y]).astype(dtype)
self._compareAll(np_arr, None)
def testDoubleReduce3D(self):
# Create a 3D array of doubles and reduce across all possible
# dimensions
np_arr = np.arange(0, 30).reshape([2, 3, 5]).astype(np.float64)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testGradient(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float32)
with self.test_session():
t = ops.convert_to_tensor(x)
su = math_ops.reduce_mean(t, [1, 2])
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [2, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
su = math_ops.reduce_mean(t, [0, 1, 2, 3])
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [1], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
su = math_ops.reduce_mean(t, [])
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [2, 3, 4, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
su = math_ops.reduce_mean(t, 0)
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [3, 4, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
def testEmptyGradients(self):
with self.test_session():
x = array_ops.zeros([0, 3])
y = math_ops.reduce_mean(x, [1])
error = gradient_checker.compute_gradient_error(x, [0, 3], y, [0])
self.assertEqual(error, 0)
def testDegenerate(self):
for use_gpu in False, True:
with self.test_session(use_gpu=use_gpu):
for dtype in (dtypes.float16, dtypes.float32, dtypes.float64):
# A large number is needed to get Eigen to die
x = array_ops.zeros((0, 9938), dtype=dtype)
y = math_ops.reduce_mean(x, [0]).eval()
self.assertEqual(y.shape, (9938,))
self.assertTrue(np.all(np.isnan(y)))
class ProdReductionTest(test.TestCase):
def _compare(self, x, reduction_axes, keep_dims):
np_ans = x
if reduction_axes is None:
np_ans = np.prod(np_ans, keepdims=keep_dims)
else:
for ra in reduction_axes[::-1]:
np_ans = np.prod(np_ans, axis=ra, keepdims=keep_dims)
with self.test_session():
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
tf_ans = math_ops.reduce_prod(x, reduction_axes, keep_dims)
out = tf_ans.eval()
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
self._compare(x, reduction_axes, False)
self._compare(x, reduction_axes, True)
def testInfinity(self):
for dtype in [np.float32, np.float64]:
for special_value_x in [-np.inf, np.inf]:
for special_value_y in [-np.inf, np.inf]:
np_arr = np.array([special_value_x, special_value_y]).astype(dtype)
self._compareAll(np_arr, None)
def testFloatReduce3D(self):
# Create a 3D array of floats and reduce across all possible
# dimensions
np_arr = np.arange(0, 30).reshape([2, 3, 5]).astype(np.float32)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def _compareGradient(self, x):
with self.test_session():
t = ops.convert_to_tensor(x)
su = math_ops.reduce_prod(t, [])
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, x.shape, su, [2, 3, 4, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
su = math_ops.reduce_prod(t, [1, 2])
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, x.shape, su, [2, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
su = math_ops.reduce_prod(t, [0, 1, 2, 3])
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, x.shape, su, [1], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
su = math_ops.reduce_prod(t, 0)
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, x.shape, su, [3, 4, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
def testGradientWithZeros(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float32) / 20.
# No zeros in input
self._compareGradient(x)
# Zero at beginning
x1 = x.copy()
x1[:, :, 0, :] = 0
self._compareGradient(x1)
# Zero at end
x2 = x.copy()
x2[:, :, -1, :] = 0
self._compareGradient(x2)
# Zero in middle
x3 = x.copy()
x3[:, :, 2, :] = 0
self._compareGradient(x3)
# All zeros
x4 = x.copy()
x4[:, :, :, :] = 0
self._compareGradient(x4)
def testEmptyGradients(self):
with self.test_session():
x = array_ops.zeros([0, 3])
y = math_ops.reduce_prod(x, [1])
error = gradient_checker.compute_gradient_error(x, [0, 3], y, [0])
self.assertEqual(error, 0)
def testDegenerate(self):
for use_gpu in False, True:
with self.test_session(use_gpu=use_gpu):
for dtype in (dtypes.float16, dtypes.float32, dtypes.float64):
# A large number is needed to get Eigen to die
x = array_ops.zeros((0, 9938), dtype=dtype)
y = math_ops.reduce_prod(x, [0])
self.assertAllEqual(y.eval(), np.ones(9938))
class MinReductionTest(test.TestCase):
def _compare(self, x, reduction_axes, keep_dims, use_gpu=False):
np_ans = x
if reduction_axes is None:
np_ans = np.amin(np_ans, keepdims=keep_dims)
else:
for ra in reduction_axes[::-1]:
np_ans = np.amin(np_ans, axis=ra, keepdims=keep_dims)
with self.test_session(use_gpu=use_gpu):
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
tf_ans = math_ops.reduce_min(x, reduction_axes, keep_dims)
out = tf_ans.eval()
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
self._compare(x, reduction_axes, False, use_gpu=True)
self._compare(x, reduction_axes, False, use_gpu=False)
self._compare(x, reduction_axes, True, use_gpu=True)
self._compare(x, reduction_axes, True, use_gpu=False)
def testInfinity(self):
for dtype in [np.float32, np.float64]:
for special_value_x in [-np.inf, np.inf]:
for special_value_y in [-np.inf, np.inf]:
np_arr = np.array([special_value_x, special_value_y]).astype(dtype)
self._compareAll(np_arr, None)
def testFloatReduce3D(self):
# Create a 3D array of floats and reduce across all possible
# dimensions
np_arr = np.arange(0, 30).reshape([2, 3, 5]).astype(np.float32)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testDoubleReduce3D(self):
# Create a 3D array of doubles and reduce across all possible
# dimensions
np_arr = np.arange(0, 30).reshape([2, 3, 5]).astype(np.float64)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testGradient(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
t = ops.convert_to_tensor(x)
su = math_ops.reduce_min(t, [1, 2])
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [2, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testGradient2(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
t = ops.convert_to_tensor(x)
su = math_ops.reduce_min(t, [1])
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [2, 4, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testGradient3(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
t = ops.convert_to_tensor(x)
su = math_ops.reduce_min(t, [2])
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [2, 3, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testGradient4(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
t = ops.convert_to_tensor(x)
su = math_ops.reduce_min(t)
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [1], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testEmptyGradients(self):
with self.test_session():
x = array_ops.zeros([0, 3])
y = math_ops.reduce_min(x, [1])
error = gradient_checker.compute_gradient_error(x, [0, 3], y, [0])
self.assertEqual(error, 0)
class MaxReductionTest(test.TestCase):
def _compare(self, x, reduction_axes, keep_dims, use_gpu=False):
np_ans = x
if reduction_axes is None:
np_ans = np.amax(np_ans, keepdims=keep_dims)
else:
for ra in reduction_axes[::-1]:
np_ans = np.amax(np_ans, axis=ra, keepdims=keep_dims)
with self.test_session(use_gpu=use_gpu):
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
tf_ans = math_ops.reduce_max(x, reduction_axes, keep_dims)
out = tf_ans.eval()
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
self._compare(x, reduction_axes, False, use_gpu=True)
self._compare(x, reduction_axes, False, use_gpu=False)
self._compare(x, reduction_axes, True, use_gpu=True)
self._compare(x, reduction_axes, True, use_gpu=False)
def testInfinity(self):
for dtype in [np.float32, np.float64]:
for special_value_x in [-np.inf, np.inf]:
for special_value_y in [-np.inf, np.inf]:
np_arr = np.array([special_value_x, special_value_y]).astype(dtype)
self._compareAll(np_arr, None)
def testFloatReduce3D(self):
# Create a 3D array of floats and reduce across all possible
# dimensions
np_arr = np.arange(0, 30).reshape([2, 3, 5]).astype(np.float32)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testDoubleReduce3D(self):
# Create a 3D array of doubles and reduce across all possible
# dimensions
np_arr = np.arange(0, 30).reshape([2, 3, 5]).astype(np.float64)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testGradient(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
t = ops.convert_to_tensor(x)
su = math_ops.reduce_max(t, [1, 2])
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [2, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testGradient2(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
t = ops.convert_to_tensor(x)
su = math_ops.reduce_max(t, [1])
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [2, 4, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testGradient3(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
t = ops.convert_to_tensor(x)
su = math_ops.reduce_max(t, [2])
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [2, 3, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testGradient4(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
t = ops.convert_to_tensor(x)
su = math_ops.reduce_max(t)
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [1], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testEmptyGradients(self):
with self.test_session():
x = array_ops.zeros([0, 3])
y = math_ops.reduce_max(x, [1])
error = gradient_checker.compute_gradient_error(x, [0, 3], y, [0])
self.assertEqual(error, 0)
class AllReductionTest(test.TestCase):
def _compare(self, x, reduction_axes, keep_dims, use_gpu=False):
np_ans = x
if reduction_axes is None:
np_ans = np.all(np_ans, keepdims=keep_dims)
else:
for ra in reduction_axes[::-1]:
np_ans = np.all(np_ans, axis=ra, keepdims=keep_dims)
with self.test_session(use_gpu=use_gpu):
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
tf_ans = math_ops.reduce_all(x, reduction_axes, keep_dims)
out = tf_ans.eval()
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
self._compare(x, reduction_axes, False, use_gpu=True)
self._compare(x, reduction_axes, False, use_gpu=False)
self._compare(x, reduction_axes, True, use_gpu=True)
self._compare(x, reduction_axes, True, use_gpu=False)
def testAll3D(self):
# Create a 3D array of bools and reduce across all possible
# dimensions
np_arr = (np.random.uniform(0, 1, 30) > 0.1).reshape([2, 3, 5])
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testEmpty(self):
self._compareAll([], [0])
class AnyReductionTest(test.TestCase):
def _compare(self, x, reduction_axes, keep_dims, use_gpu=False):
np_ans = x
if reduction_axes is None:
np_ans = np.any(np_ans, keepdims=keep_dims)
else:
for ra in reduction_axes[::-1]:
np_ans = np.any(np_ans, axis=ra, keepdims=keep_dims)
with self.test_session(use_gpu=use_gpu):
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
tf_ans = math_ops.reduce_any(x, reduction_axes, keep_dims)
out = tf_ans.eval()
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
self._compare(x, reduction_axes, False, use_gpu=True)
self._compare(x, reduction_axes, False, use_gpu=False)
self._compare(x, reduction_axes, True, use_gpu=True)
self._compare(x, reduction_axes, True, use_gpu=False)
def testAll3D(self):
# Create a 3D array of bools and reduce across all possible
# dimensions
np_arr = (np.random.uniform(0, 1, 30) > 0.9).reshape([2, 3, 5])
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testEmpty(self):
self._compareAll([], [0])
class CountNonzeroReductionTest(test.TestCase):
def _compare(self,
x,
reduction_axes,
keep_dims,
use_gpu=False,
feed_dict=None):
np_ans = (x != 0).astype(np.int32)
if reduction_axes is None:
np_ans = np.sum(np_ans, keepdims=keep_dims)
else:
reduction_axes = np.array(reduction_axes).astype(np.int32)
for ra in reduction_axes.ravel()[::-1]:
np_ans = np.sum(np_ans, axis=ra, keepdims=keep_dims)
with self.test_session(use_gpu=use_gpu) as sess:
tf_ans = math_ops.count_nonzero(x, reduction_axes, keep_dims)
out = sess.run(tf_ans, feed_dict)
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes, feed_dict=None):
if reduction_axes is not None and np.shape(reduction_axes) == (1,):
# Test scalar reduction_axes argument
self._compareAll(x, reduction_axes[0])
self._compare(x, reduction_axes, False, use_gpu=True, feed_dict=feed_dict)
self._compare(x, reduction_axes, False, use_gpu=False, feed_dict=feed_dict)
self._compare(x, reduction_axes, True, use_gpu=True, feed_dict=feed_dict)
self._compare(x, reduction_axes, True, use_gpu=False, feed_dict=feed_dict)
def testBoolReduce1D(self):
# Create a 1D array of floats
np_arr = np.asarray([False, False, True, False, False, True])
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
def testFloatReduce1D(self):
# Create a 1D array of floats
np_arr = np.asarray([0.0, 1.0, -1.0, 0.0, 0.0, 3.0]).astype(np.float32)
self._compareAll(np_arr, [0])
def testFloatReduce4D(self):
# Create a 4D array of floats and reduce across some
# dimensions
np_arr = np.floor(np.arange(0.0, 210.0) / 100.0).reshape(
[2, 3, 5, 7]).astype(np.float32)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
# Need specialization for reduce(4D, [0, 2])
# self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
self._compareAll(np_arr, [1, 2, 3])
self._compareAll(np_arr, [0, 1, 2, 3])
def testExpand(self):
# Reduce an empty tensor to a nonempty tensor
x = np.zeros((5, 0))
self._compareAll(x, [1])
def testDegenerate(self):
for use_gpu in False, True:
with self.test_session(use_gpu=use_gpu):
for dtype in (dtypes.bool,):
# A large number is needed to get Eigen to die
x = array_ops.zeros((0, 9938), dtype=dtype)
y = math_ops.count_nonzero(x, [0])
self.assertAllEqual(y.eval(), np.zeros(9938))
if __name__ == "__main__":
test.main()
|
apache-2.0
| 8,050,667,278,882,248,000 | 4,730,324,922,053,769,000 | 36.499439 | 80 | 0.619807 | false |
nikolas/lettuce
|
tests/integration/lib/Django-1.2.5/django/core/mail/backends/filebased.py
|
394
|
2485
|
"""Email backend that writes messages to a file."""
import datetime
import os
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.mail.backends.console import EmailBackend as ConsoleEmailBackend
class EmailBackend(ConsoleEmailBackend):
def __init__(self, *args, **kwargs):
self._fname = None
if 'file_path' in kwargs:
self.file_path = kwargs.pop('file_path')
else:
self.file_path = getattr(settings, 'EMAIL_FILE_PATH',None)
# Make sure self.file_path is a string.
if not isinstance(self.file_path, basestring):
raise ImproperlyConfigured('Path for saving emails is invalid: %r' % self.file_path)
self.file_path = os.path.abspath(self.file_path)
# Make sure that self.file_path is an directory if it exists.
if os.path.exists(self.file_path) and not os.path.isdir(self.file_path):
raise ImproperlyConfigured('Path for saving email messages exists, but is not a directory: %s' % self.file_path)
# Try to create it, if it not exists.
elif not os.path.exists(self.file_path):
try:
os.makedirs(self.file_path)
except OSError, err:
raise ImproperlyConfigured('Could not create directory for saving email messages: %s (%s)' % (self.file_path, err))
# Make sure that self.file_path is writable.
if not os.access(self.file_path, os.W_OK):
raise ImproperlyConfigured('Could not write to directory: %s' % self.file_path)
# Finally, call super().
# Since we're using the console-based backend as a base,
# force the stream to be None, so we don't default to stdout
kwargs['stream'] = None
super(EmailBackend, self).__init__(*args, **kwargs)
def _get_filename(self):
"""Return a unique file name."""
if self._fname is None:
timestamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
fname = "%s-%s.log" % (timestamp, abs(id(self)))
self._fname = os.path.join(self.file_path, fname)
return self._fname
def open(self):
if self.stream is None:
self.stream = open(self._get_filename(), 'a')
return True
return False
def close(self):
try:
if self.stream is not None:
self.stream.close()
finally:
self.stream = None
|
gpl-3.0
| -5,210,912,800,729,551,000 | -6,451,132,043,781,148,000 | 41.118644 | 131 | 0.614889 | false |
tashaxe/Red-DiscordBot
|
lib/youtube_dl/extractor/abc.py
|
24
|
6210
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
js_to_json,
int_or_none,
parse_iso8601,
)
class ABCIE(InfoExtractor):
IE_NAME = 'abc.net.au'
_VALID_URL = r'https?://(?:www\.)?abc\.net\.au/news/(?:[^/]+/){1,2}(?P<id>\d+)'
_TESTS = [{
'url': 'http://www.abc.net.au/news/2014-11-05/australia-to-staff-ebola-treatment-centre-in-sierra-leone/5868334',
'md5': 'cb3dd03b18455a661071ee1e28344d9f',
'info_dict': {
'id': '5868334',
'ext': 'mp4',
'title': 'Australia to help staff Ebola treatment centre in Sierra Leone',
'description': 'md5:809ad29c67a05f54eb41f2a105693a67',
},
'skip': 'this video has expired',
}, {
'url': 'http://www.abc.net.au/news/2015-08-17/warren-entsch-introduces-same-sex-marriage-bill/6702326',
'md5': 'db2a5369238b51f9811ad815b69dc086',
'info_dict': {
'id': 'NvqvPeNZsHU',
'ext': 'mp4',
'upload_date': '20150816',
'uploader': 'ABC News (Australia)',
'description': 'Government backbencher Warren Entsch introduces a cross-party sponsored bill to legalise same-sex marriage, saying the bill is designed to promote "an inclusive Australia, not a divided one.". Read more here: http://ab.co/1Mwc6ef',
'uploader_id': 'NewsOnABC',
'title': 'Marriage Equality: Warren Entsch introduces same sex marriage bill',
},
'add_ie': ['Youtube'],
'skip': 'Not accessible from Travis CI server',
}, {
'url': 'http://www.abc.net.au/news/2015-10-23/nab-lifts-interest-rates-following-westpac-and-cba/6880080',
'md5': 'b96eee7c9edf4fc5a358a0252881cc1f',
'info_dict': {
'id': '6880080',
'ext': 'mp3',
'title': 'NAB lifts interest rates, following Westpac and CBA',
'description': 'md5:f13d8edc81e462fce4a0437c7dc04728',
},
}, {
'url': 'http://www.abc.net.au/news/2015-10-19/6866214',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
mobj = re.search(
r'inline(?P<type>Video|Audio|YouTube)Data\.push\((?P<json_data>[^)]+)\);',
webpage)
if mobj is None:
expired = self._html_search_regex(r'(?s)class="expired-(?:video|audio)".+?<span>(.+?)</span>', webpage, 'expired', None)
if expired:
raise ExtractorError('%s said: %s' % (self.IE_NAME, expired), expected=True)
raise ExtractorError('Unable to extract video urls')
urls_info = self._parse_json(
mobj.group('json_data'), video_id, transform_source=js_to_json)
if not isinstance(urls_info, list):
urls_info = [urls_info]
if mobj.group('type') == 'YouTube':
return self.playlist_result([
self.url_result(url_info['url']) for url_info in urls_info])
formats = [{
'url': url_info['url'],
'vcodec': url_info.get('codec') if mobj.group('type') == 'Video' else 'none',
'width': int_or_none(url_info.get('width')),
'height': int_or_none(url_info.get('height')),
'tbr': int_or_none(url_info.get('bitrate')),
'filesize': int_or_none(url_info.get('filesize')),
} for url_info in urls_info]
self._sort_formats(formats)
return {
'id': video_id,
'title': self._og_search_title(webpage),
'formats': formats,
'description': self._og_search_description(webpage),
'thumbnail': self._og_search_thumbnail(webpage),
}
class ABCIViewIE(InfoExtractor):
IE_NAME = 'abc.net.au:iview'
_VALID_URL = r'https?://iview\.abc\.net\.au/programs/[^/]+/(?P<id>[^/?#]+)'
# ABC iview programs are normally available for 14 days only.
_TESTS = [{
'url': 'http://iview.abc.net.au/programs/diaries-of-a-broken-mind/ZX9735A001S00',
'md5': 'cde42d728b3b7c2b32b1b94b4a548afc',
'info_dict': {
'id': 'ZX9735A001S00',
'ext': 'mp4',
'title': 'Diaries Of A Broken Mind',
'description': 'md5:7de3903874b7a1be279fe6b68718fc9e',
'upload_date': '20161010',
'uploader_id': 'abc2',
'timestamp': 1476064920,
},
'skip': 'Video gone',
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_params = self._parse_json(self._search_regex(
r'videoParams\s*=\s*({.+?});', webpage, 'video params'), video_id)
title = video_params.get('title') or video_params['seriesTitle']
stream = next(s for s in video_params['playlist'] if s.get('type') == 'program')
formats = self._extract_akamai_formats(stream['hds-unmetered'], video_id)
self._sort_formats(formats)
subtitles = {}
src_vtt = stream.get('captions', {}).get('src-vtt')
if src_vtt:
subtitles['en'] = [{
'url': src_vtt,
'ext': 'vtt',
}]
return {
'id': video_id,
'title': title,
'description': self._html_search_meta(['og:description', 'twitter:description'], webpage),
'thumbnail': self._html_search_meta(['og:image', 'twitter:image:src'], webpage),
'duration': int_or_none(video_params.get('eventDuration')),
'timestamp': parse_iso8601(video_params.get('pubDate'), ' '),
'series': video_params.get('seriesTitle'),
'series_id': video_params.get('seriesHouseNumber') or video_id[:7],
'episode_number': int_or_none(self._html_search_meta('episodeNumber', webpage, default=None)),
'episode': self._html_search_meta('episode_title', webpage, default=None),
'uploader_id': video_params.get('channel'),
'formats': formats,
'subtitles': subtitles,
}
|
gpl-3.0
| 337,979,553,094,636,350 | 3,072,445,922,648,363,500 | 39.855263 | 259 | 0.561192 | false |
halfcrazy/sqlalchemy
|
lib/sqlalchemy/testing/requirements.py
|
42
|
19949
|
# testing/requirements.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Global database feature support policy.
Provides decorators to mark tests requiring specific feature support from the
target database.
External dialect test suites should subclass SuiteRequirements
to provide specific inclusion/exclusions.
"""
from . import exclusions
from .. import util
class Requirements(object):
pass
class SuiteRequirements(Requirements):
@property
def create_table(self):
"""target platform can emit basic CreateTable DDL."""
return exclusions.open()
@property
def drop_table(self):
"""target platform can emit basic DropTable DDL."""
return exclusions.open()
@property
def foreign_keys(self):
"""Target database must support foreign keys."""
return exclusions.open()
@property
def on_update_cascade(self):
""""target database must support ON UPDATE..CASCADE behavior in
foreign keys."""
return exclusions.open()
@property
def non_updating_cascade(self):
"""target database must *not* support ON UPDATE..CASCADE behavior in
foreign keys."""
return exclusions.closed()
@property
def deferrable_fks(self):
return exclusions.closed()
@property
def on_update_or_deferrable_fks(self):
# TODO: exclusions should be composable,
# somehow only_if([x, y]) isn't working here, negation/conjunctions
# getting confused.
return exclusions.only_if(
lambda: self.on_update_cascade.enabled or
self.deferrable_fks.enabled
)
@property
def self_referential_foreign_keys(self):
"""Target database must support self-referential foreign keys."""
return exclusions.open()
@property
def foreign_key_ddl(self):
"""Target database must support the DDL phrases for FOREIGN KEY."""
return exclusions.open()
@property
def named_constraints(self):
"""target database must support names for constraints."""
return exclusions.open()
@property
def subqueries(self):
"""Target database must support subqueries."""
return exclusions.open()
@property
def offset(self):
"""target database can render OFFSET, or an equivalent, in a
SELECT.
"""
return exclusions.open()
@property
def bound_limit_offset(self):
"""target database can render LIMIT and/or OFFSET using a bound
parameter
"""
return exclusions.open()
@property
def boolean_col_expressions(self):
"""Target database must support boolean expressions as columns"""
return exclusions.closed()
@property
def nullsordering(self):
"""Target backends that support nulls ordering."""
return exclusions.closed()
@property
def standalone_binds(self):
"""target database/driver supports bound parameters as column expressions
without being in the context of a typed column.
"""
return exclusions.closed()
@property
def intersect(self):
"""Target database must support INTERSECT or equivalent."""
return exclusions.closed()
@property
def except_(self):
"""Target database must support EXCEPT or equivalent (i.e. MINUS)."""
return exclusions.closed()
@property
def window_functions(self):
"""Target database must support window functions."""
return exclusions.closed()
@property
def autoincrement_insert(self):
"""target platform generates new surrogate integer primary key values
when insert() is executed, excluding the pk column."""
return exclusions.open()
@property
def fetch_rows_post_commit(self):
"""target platform will allow cursor.fetchone() to proceed after a
COMMIT.
Typically this refers to an INSERT statement with RETURNING which
is invoked within "autocommit". If the row can be returned
after the autocommit, then this rule can be open.
"""
return exclusions.open()
@property
def empty_inserts(self):
"""target platform supports INSERT with no values, i.e.
INSERT DEFAULT VALUES or equivalent."""
return exclusions.only_if(
lambda config: config.db.dialect.supports_empty_insert or
config.db.dialect.supports_default_values,
"empty inserts not supported"
)
@property
def insert_from_select(self):
"""target platform supports INSERT from a SELECT."""
return exclusions.open()
@property
def returning(self):
"""target platform supports RETURNING."""
return exclusions.only_if(
lambda config: config.db.dialect.implicit_returning,
"%(database)s %(does_support)s 'returning'"
)
@property
def duplicate_names_in_cursor_description(self):
"""target platform supports a SELECT statement that has
the same name repeated more than once in the columns list."""
return exclusions.open()
@property
def denormalized_names(self):
"""Target database must have 'denormalized', i.e.
UPPERCASE as case insensitive names."""
return exclusions.skip_if(
lambda config: not config.db.dialect.requires_name_normalize,
"Backend does not require denormalized names."
)
@property
def multivalues_inserts(self):
"""target database must support multiple VALUES clauses in an
INSERT statement."""
return exclusions.skip_if(
lambda config: not config.db.dialect.supports_multivalues_insert,
"Backend does not support multirow inserts."
)
@property
def implements_get_lastrowid(self):
""""target dialect implements the executioncontext.get_lastrowid()
method without reliance on RETURNING.
"""
return exclusions.open()
@property
def emulated_lastrowid(self):
""""target dialect retrieves cursor.lastrowid, or fetches
from a database-side function after an insert() construct executes,
within the get_lastrowid() method.
Only dialects that "pre-execute", or need RETURNING to get last
inserted id, would return closed/fail/skip for this.
"""
return exclusions.closed()
@property
def dbapi_lastrowid(self):
""""target platform includes a 'lastrowid' accessor on the DBAPI
cursor object.
"""
return exclusions.closed()
@property
def views(self):
"""Target database must support VIEWs."""
return exclusions.closed()
@property
def schemas(self):
"""Target database must support external schemas, and have one
named 'test_schema'."""
return exclusions.closed()
@property
def sequences(self):
"""Target database must support SEQUENCEs."""
return exclusions.only_if([
lambda config: config.db.dialect.supports_sequences
], "no sequence support")
@property
def sequences_optional(self):
"""Target database supports sequences, but also optionally
as a means of generating new PK values."""
return exclusions.only_if([
lambda config: config.db.dialect.supports_sequences and
config.db.dialect.sequences_optional
], "no sequence support, or sequences not optional")
@property
def reflects_pk_names(self):
return exclusions.closed()
@property
def table_reflection(self):
return exclusions.open()
@property
def view_column_reflection(self):
"""target database must support retrieval of the columns in a view,
similarly to how a table is inspected.
This does not include the full CREATE VIEW definition.
"""
return self.views
@property
def view_reflection(self):
"""target database must support inspection of the full CREATE VIEW definition.
"""
return self.views
@property
def schema_reflection(self):
return self.schemas
@property
def primary_key_constraint_reflection(self):
return exclusions.open()
@property
def foreign_key_constraint_reflection(self):
return exclusions.open()
@property
def temp_table_reflection(self):
return exclusions.open()
@property
def temp_table_names(self):
"""target dialect supports listing of temporary table names"""
return exclusions.closed()
@property
def temporary_tables(self):
"""target database supports temporary tables"""
return exclusions.open()
@property
def temporary_views(self):
"""target database supports temporary views"""
return exclusions.closed()
@property
def index_reflection(self):
return exclusions.open()
@property
def unique_constraint_reflection(self):
"""target dialect supports reflection of unique constraints"""
return exclusions.open()
@property
def duplicate_key_raises_integrity_error(self):
"""target dialect raises IntegrityError when reporting an INSERT
with a primary key violation. (hint: it should)
"""
return exclusions.open()
@property
def unbounded_varchar(self):
"""Target database must support VARCHAR with no length"""
return exclusions.open()
@property
def unicode_data(self):
"""Target database/dialect must support Python unicode objects with
non-ASCII characters represented, delivered as bound parameters
as well as in result rows.
"""
return exclusions.open()
@property
def unicode_ddl(self):
"""Target driver must support some degree of non-ascii symbol
names.
"""
return exclusions.closed()
@property
def datetime_literals(self):
"""target dialect supports rendering of a date, time, or datetime as a
literal string, e.g. via the TypeEngine.literal_processor() method.
"""
return exclusions.closed()
@property
def datetime(self):
"""target dialect supports representation of Python
datetime.datetime() objects."""
return exclusions.open()
@property
def datetime_microseconds(self):
"""target dialect supports representation of Python
datetime.datetime() with microsecond objects."""
return exclusions.open()
@property
def datetime_historic(self):
"""target dialect supports representation of Python
datetime.datetime() objects with historic (pre 1970) values."""
return exclusions.closed()
@property
def date(self):
"""target dialect supports representation of Python
datetime.date() objects."""
return exclusions.open()
@property
def date_coerces_from_datetime(self):
"""target dialect accepts a datetime object as the target
of a date column."""
return exclusions.open()
@property
def date_historic(self):
"""target dialect supports representation of Python
datetime.datetime() objects with historic (pre 1970) values."""
return exclusions.closed()
@property
def time(self):
"""target dialect supports representation of Python
datetime.time() objects."""
return exclusions.open()
@property
def time_microseconds(self):
"""target dialect supports representation of Python
datetime.time() with microsecond objects."""
return exclusions.open()
@property
def binary_comparisons(self):
"""target database/driver can allow BLOB/BINARY fields to be compared
against a bound parameter value.
"""
return exclusions.open()
@property
def binary_literals(self):
"""target backend supports simple binary literals, e.g. an
expression like::
SELECT CAST('foo' AS BINARY)
Where ``BINARY`` is the type emitted from :class:`.LargeBinary`,
e.g. it could be ``BLOB`` or similar.
Basically fails on Oracle.
"""
return exclusions.open()
@property
def precision_numerics_general(self):
"""target backend has general support for moderately high-precision
numerics."""
return exclusions.open()
@property
def precision_numerics_enotation_small(self):
"""target backend supports Decimal() objects using E notation
to represent very small values."""
return exclusions.closed()
@property
def precision_numerics_enotation_large(self):
"""target backend supports Decimal() objects using E notation
to represent very large values."""
return exclusions.closed()
@property
def precision_numerics_many_significant_digits(self):
"""target backend supports values with many digits on both sides,
such as 319438950232418390.273596, 87673.594069654243
"""
return exclusions.closed()
@property
def precision_numerics_retains_significant_digits(self):
"""A precision numeric type will return empty significant digits,
i.e. a value such as 10.000 will come back in Decimal form with
the .000 maintained."""
return exclusions.closed()
@property
def precision_generic_float_type(self):
"""target backend will return native floating point numbers with at
least seven decimal places when using the generic Float type.
"""
return exclusions.open()
@property
def floats_to_four_decimals(self):
"""target backend can return a floating-point number with four
significant digits (such as 15.7563) accurately
(i.e. without FP inaccuracies, such as 15.75629997253418).
"""
return exclusions.open()
@property
def fetch_null_from_numeric(self):
"""target backend doesn't crash when you try to select a NUMERIC
value that has a value of NULL.
Added to support Pyodbc bug #351.
"""
return exclusions.open()
@property
def text_type(self):
"""Target database must support an unbounded Text() "
"type such as TEXT or CLOB"""
return exclusions.open()
@property
def empty_strings_varchar(self):
"""target database can persist/return an empty string with a
varchar.
"""
return exclusions.open()
@property
def empty_strings_text(self):
"""target database can persist/return an empty string with an
unbounded text."""
return exclusions.open()
@property
def selectone(self):
"""target driver must support the literal statement 'select 1'"""
return exclusions.open()
@property
def savepoints(self):
"""Target database must support savepoints."""
return exclusions.closed()
@property
def two_phase_transactions(self):
"""Target database must support two-phase transactions."""
return exclusions.closed()
@property
def update_from(self):
"""Target must support UPDATE..FROM syntax"""
return exclusions.closed()
@property
def update_where_target_in_subquery(self):
"""Target must support UPDATE where the same table is present in a
subquery in the WHERE clause.
This is an ANSI-standard syntax that apparently MySQL can't handle,
such as:
UPDATE documents SET flag=1 WHERE documents.title IN
(SELECT max(documents.title) AS title
FROM documents GROUP BY documents.user_id
)
"""
return exclusions.open()
@property
def mod_operator_as_percent_sign(self):
"""target database must use a plain percent '%' as the 'modulus'
operator."""
return exclusions.closed()
@property
def percent_schema_names(self):
"""target backend supports weird identifiers with percent signs
in them, e.g. 'some % column'.
this is a very weird use case but often has problems because of
DBAPIs that use python formatting. It's not a critical use
case either.
"""
return exclusions.closed()
@property
def order_by_label_with_expression(self):
"""target backend supports ORDER BY a column label within an
expression.
Basically this::
select data as foo from test order by foo || 'bar'
Lots of databases including Postgresql don't support this,
so this is off by default.
"""
return exclusions.closed()
@property
def unicode_connections(self):
"""Target driver must support non-ASCII characters being passed at
all.
"""
return exclusions.open()
@property
def graceful_disconnects(self):
"""Target driver must raise a DBAPI-level exception, such as
InterfaceError, when the underlying connection has been closed
and the execute() method is called.
"""
return exclusions.open()
@property
def skip_mysql_on_windows(self):
"""Catchall for a large variety of MySQL on Windows failures"""
return exclusions.open()
@property
def ad_hoc_engines(self):
"""Test environment must allow ad-hoc engine/connection creation.
DBs that scale poorly for many connections, even when closed, i.e.
Oracle, may use the "--low-connections" option which flags this
requirement as not present.
"""
return exclusions.skip_if(
lambda config: config.options.low_connections)
@property
def timing_intensive(self):
return exclusions.requires_tag("timing_intensive")
@property
def memory_intensive(self):
return exclusions.requires_tag("memory_intensive")
@property
def threading_with_mock(self):
"""Mark tests that use threading and mock at the same time - stability
issues have been observed with coverage + python 3.3
"""
return exclusions.skip_if(
lambda config: util.py3k and config.options.has_coverage,
"Stability issues with coverage + py3k"
)
@property
def no_coverage(self):
"""Test should be skipped if coverage is enabled.
This is to block tests that exercise libraries that seem to be
sensitive to coverage, such as Postgresql notice logging.
"""
return exclusions.skip_if(
lambda config: config.options.has_coverage,
"Issues observed when coverage is enabled"
)
def _has_mysql_on_windows(self, config):
return False
def _has_mysql_fully_case_sensitive(self, config):
return False
@property
def sqlite(self):
return exclusions.skip_if(lambda: not self._has_sqlite())
@property
def cextensions(self):
return exclusions.skip_if(
lambda: not self._has_cextensions(), "C extensions not installed"
)
def _has_sqlite(self):
from sqlalchemy import create_engine
try:
create_engine('sqlite://')
return True
except ImportError:
return False
def _has_cextensions(self):
try:
from sqlalchemy import cresultproxy, cprocessors
return True
except ImportError:
return False
|
mit
| -495,072,110,938,128,900 | -1,552,452,490,161,216,500 | 27.136812 | 86 | 0.636974 | false |
dragon788/wordfreq
|
tests/test.py
|
1
|
5277
|
from wordfreq import (
word_frequency, available_languages, cB_to_freq,
top_n_list, random_words, random_ascii_words, tokenize
)
from nose.tools import (
eq_, assert_almost_equal, assert_greater, raises
)
def test_freq_examples():
# Stopwords are most common in the correct language
assert_greater(word_frequency('the', 'en'),
word_frequency('de', 'en'))
assert_greater(word_frequency('de', 'es'),
word_frequency('the', 'es'))
def test_languages():
# Make sure the number of available languages doesn't decrease
avail = available_languages()
assert_greater(len(avail), 15)
# Laughter is the universal language
for lang in avail:
if lang not in {'zh', 'ja'}:
# we do not have enough Chinese data
# Japanese people do not lol
assert_greater(word_frequency('lol', lang), 0)
# Make up a weirdly verbose language code and make sure
# we still get it
new_lang_code = '%s-001-x-fake-extension' % lang.upper()
assert_greater(word_frequency('lol', new_lang_code), 0)
def test_twitter():
avail = available_languages('twitter')
assert_greater(len(avail), 14)
for lang in avail:
assert_greater(word_frequency('rt', lang, 'twitter'),
word_frequency('rt', lang, 'combined'))
def test_minimums():
eq_(word_frequency('esquivalience', 'en'), 0)
eq_(word_frequency('esquivalience', 'en', minimum=1e-6), 1e-6)
eq_(word_frequency('the', 'en', minimum=1), 1)
def test_most_common_words():
# If something causes the most common words in well-supported languages to
# change, we should know.
def get_most_common(lang):
"""
Return the single most common word in the language.
"""
return top_n_list(lang, 1)[0]
eq_(get_most_common('ar'), 'في')
eq_(get_most_common('de'), 'die')
eq_(get_most_common('en'), 'the')
eq_(get_most_common('es'), 'de')
eq_(get_most_common('fr'), 'de')
eq_(get_most_common('it'), 'di')
eq_(get_most_common('ja'), 'の')
eq_(get_most_common('nl'), 'de')
eq_(get_most_common('pt'), 'de')
eq_(get_most_common('ru'), 'в')
eq_(get_most_common('tr'), 'bir')
eq_(get_most_common('zh'), '的')
def test_language_matching():
freq = word_frequency('的', 'zh')
eq_(word_frequency('的', 'zh-TW'), freq)
eq_(word_frequency('的', 'zh-CN'), freq)
eq_(word_frequency('的', 'zh-Hant'), freq)
eq_(word_frequency('的', 'zh-Hans'), freq)
eq_(word_frequency('的', 'yue-HK'), freq)
eq_(word_frequency('的', 'cmn'), freq)
def test_cB_conversion():
eq_(cB_to_freq(0), 1.)
assert_almost_equal(cB_to_freq(-100), 0.1)
assert_almost_equal(cB_to_freq(-600), 1e-6)
@raises(ValueError)
def test_failed_cB_conversion():
cB_to_freq(1)
def test_tokenization():
# We preserve apostrophes within words, so "can't" is a single word in the
# data
eq_(tokenize("I don't split at apostrophes, you see.", 'en'),
['i', "don't", 'split', 'at', 'apostrophes', 'you', 'see'])
# Certain punctuation does not inherently split a word.
eq_(tokenize("Anything is possible at zombo.com", 'en'),
['anything', 'is', 'possible', 'at', 'zombo.com'])
# Splits occur after symbols, and at splitting punctuation such as hyphens.
eq_(tokenize('😂test', 'en'), ['😂', 'test'])
eq_(tokenize("flip-flop", 'en'), ['flip', 'flop'])
def test_casefolding():
eq_(tokenize('WEISS', 'de'), ['weiss'])
eq_(tokenize('weiß', 'de'), ['weiss'])
eq_(tokenize('İstanbul', 'tr'), ['istanbul'])
eq_(tokenize('SIKISINCA', 'tr'), ['sıkısınca'])
def test_phrase_freq():
ff = word_frequency("flip-flop", 'en')
assert_greater(ff, 0)
assert_almost_equal(
1.0 / ff,
1.0 / word_frequency('flip', 'en') + 1.0 / word_frequency('flop', 'en')
)
def test_not_really_random():
# If your xkcd-style password comes out like this, maybe you shouldn't
# use it
eq_(random_words(nwords=4, lang='en', bits_per_word=0),
'the the the the')
# This not only tests random_ascii_words, it makes sure we didn't end
# up with 'eos' as a very common Japanese word
eq_(random_ascii_words(nwords=4, lang='ja', bits_per_word=0),
'rt rt rt rt')
@raises(ValueError)
def test_not_enough_ascii():
random_ascii_words(lang='zh')
def test_ar():
# Remove tatweels
eq_(
tokenize('متــــــــعب', 'ar'),
['متعب']
)
# Remove combining marks
eq_(
tokenize('حَرَكَات', 'ar'),
['حركات']
)
eq_(
tokenize('\ufefb', 'ar'), # An Arabic ligature...
['\u0644\u0627'] # ...that is affected by NFKC normalization
)
def test_ideographic_fallback():
# Try tokenizing Chinese text as English -- it should remain stuck together.
eq_(tokenize('中国文字', 'en'), ['中国文字'])
# When Japanese is tagged with the wrong language, it will be split
# at script boundaries.
ja_text = 'ひらがなカタカナromaji'
eq_(
tokenize(ja_text, 'en'),
['ひらがな', 'カタカナ', 'romaji']
)
|
mit
| -592,721,565,082,924,500 | 6,472,518,885,146,841,000 | 28.701149 | 80 | 0.590557 | false |
exelearning/iteexe
|
nevow/url.py
|
14
|
16868
|
# -*- test-case-name: "nevow.test.test_url" -*-
# Copyright (c) 2004 Divmod.
# See LICENSE for details.
"""URL parsing, construction and rendering.
"""
from __future__ import generators
import weakref
from nevow import inevow
from nevow.stan import raw
from nevow.flat import flatten, serialize
from nevow.context import WovenContext
import urlparse
import urllib
from twisted.web.util import redirectTo
def _uqf(query):
for x in query.split('&'):
if '=' in x:
yield tuple( [raw(urllib.unquote(s)) for s in x.split('=')] )
elif x:
yield (raw(urllib.unquote(x)), None)
unquerify = lambda query: list(_uqf(query))
class URL(object):
def __init__(self, scheme='http', netloc='localhost', pathsegs=None, querysegs=None, fragment=''):
self.scheme = scheme
self.netloc = netloc
if pathsegs is None:
pathsegs = ['']
self._qpathlist = pathsegs
if querysegs is None:
querysegs = []
self._querylist = querysegs
self.fragment = fragment
path = property(lambda self: '/'.join(self._qpathlist))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
for attr in ['scheme', 'netloc', '_qpathlist', '_querylist', 'fragment']:
if getattr(self, attr) != getattr(other, attr):
return False
return True
def __ne__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return not self.__eq__(other)
query = property(
lambda self: [y is None and x or '='.join((x,y))
for (x,y) in self._querylist]
)
def _pathMod(self, newpathsegs, newqueryparts):
return self.__class__(self.scheme, self.netloc, newpathsegs, newqueryparts, self.fragment)
## class methods used to build URL objects ##
def fromString(klass, st):
scheme, netloc, path, query, fragment = urlparse.urlsplit(st)
u = klass(
scheme, netloc,
[raw(urllib.unquote(seg)) for seg in path.split('/')[1:]],
unquerify(query), fragment)
return u
fromString = classmethod(fromString)
def fromRequest(klass, request):
import warnings
warnings.warn(
"[v0.4] URL.fromRequest will change behaviour soon. Use fromContext instead",
DeprecationWarning,
stacklevel=2)
uri = request.prePathURL()
if '?' in request.uri:
uri += '?' + request.uri.split('?')[-1]
return klass.fromString(uri)
fromRequest = classmethod(fromRequest)
def fromContext(klass, context):
'''Create a URL object that represents the current URL in the traversal
process.'''
request = inevow.IRequest(context)
uri = request.prePathURL()
if '?' in request.uri:
uri += '?' + request.uri.split('?')[-1]
return klass.fromString(uri)
fromContext = classmethod(fromContext)
## path manipulations ##
def pathList(self, unquote=False, copy=True):
result = self._qpathlist
if unquote:
result = map(urllib.unquote, result)
if copy:
result = result[:]
return result
def sibling(self, path):
"""Construct a url where the given path segment is a sibling of this url
"""
l = self.pathList()
l[-1] = path
return self._pathMod(l, self.queryList(0))
def child(self, path):
"""Construct a url where the given path segment is a child of this url
"""
l = self.pathList()
if l[-1] == '':
l[-1] = path
else:
l.append(path)
return self._pathMod(l, self.queryList(0))
def isRoot(self, pathlist):
return (pathlist == [''] or not pathlist)
def parent(self):
import warnings
warnings.warn(
"[v0.4] URL.parent has been deprecated and replaced with parentdir (which does what parent used to do) and up (which does what you probably thought parent would do ;-))",
DeprecationWarning,
stacklevel=2)
return self.parentdir()
def here(self):
import warnings
warnings.warn(
"URL.here() is deprecated, please use URL.curdir() instead!",
DeprecationWarning,
stacklevel=2)
return self.curdir()
def curdir(self):
"""Construct a url which is a logical equivalent to '.'
of the current url. For example:
>>> print URL.fromString('http://foo.com/bar').curdir()
http://foo.com/
>>> print URL.fromString('http://foo.com/bar/').curdir()
http://foo.com/bar/
"""
l = self.pathList()
if l[-1] != '':
l[-1] = ''
return self._pathMod(l, self.queryList(0))
def up(self):
"""Pop a URL segment from this url.
"""
l = self.pathList()
if len(l):
l.pop()
return self._pathMod(l, self.queryList(0))
def parentdir(self):
"""Construct a url which is the parent of this url's directory;
This is logically equivalent to '..' of the current url.
For example:
>>> print URL.fromString('http://foo.com/bar/file').parentdir()
http://foo.com/
>>> print URL.fromString('http://foo.com/bar/dir/').parentdir()
http://foo.com/bar/
"""
l = self.pathList()
if not self.isRoot(l) and l[-1] == '':
del l[-2]
else:
# we are a file, such as http://example.com/foo/bar our
# parent directory is http://example.com/
l.pop()
if self.isRoot(l): l.append('')
else: l[-1] = ''
return self._pathMod(l, self.queryList(0))
def click(self, href):
"""Build a path by merging 'href' and this path.
Return a path which is the URL where a browser would presumably
take you if you clicked on a link with an 'href' as given.
"""
scheme, netloc, path, query, fragment = urlparse.urlsplit(href)
if (scheme, netloc, path, query, fragment) == ('', '', '', '', ''):
return self
query = unquerify(query)
if scheme:
if path and path[0] == '/':
path = path[1:]
return URL(scheme, netloc, map(raw, path.split('/')), query, fragment)
else:
scheme = self.scheme
if not netloc:
netloc = self.netloc
if not path:
path = self.path
if not query:
query = self._querylist
if not fragment:
fragment = self.fragment
else:
if path[0] == '/':
path = path[1:]
else:
l = self.pathList()
l[-1] = path
path = '/'.join(l)
path = normURLPath(path)
return URL(scheme, netloc, map(raw, path.split('/')), query, fragment)
## query manipulation ##
def queryList(self, copy=True):
"""Return current query as a list of tuples."""
if copy:
return self._querylist[:]
return self._querylist
# FIXME: here we call str() on query arg values: is this right?
def add(self, name, value=None):
"""Add a query argument with the given value
None indicates that the argument has no value
"""
q = self.queryList()
q.append((name, value))
return self._pathMod(self.pathList(copy=False), q)
def replace(self, name, value=None):
"""Remove all existing occurrances of the query
argument 'name', *if it exists*, then add the argument
with the given value.
None indicates that the argument has no value
"""
ql = self.queryList(False)
## Preserve the original position of the query key in the list
i = 0
for (k, v) in ql:
if k == name:
break
i += 1
q = filter(lambda x: x[0] != name, ql)
q.insert(i, (name, value))
return self._pathMod(self.pathList(copy=False), q)
def remove(self, name):
"""Remove all query arguments with the given name
"""
return self._pathMod(
self.pathList(copy=False),
filter(
lambda x: x[0] != name, self.queryList(False)))
def clear(self, name=None):
"""Remove all existing query arguments
"""
if name is None:
q = []
else:
q = filter(lambda x: x[0] != name, self.queryList(False))
return self._pathMod(self.pathList(copy=False), q)
## scheme manipulation ##
def secure(self, secure=True, port=None):
"""Modify the scheme to https/http and return the new URL.
@param secure: choose between https and http, default to True (https)
@param port: port, override the scheme's normal port
"""
# Choose the scheme and default port.
if secure:
scheme, defaultPort = 'https', 443
else:
scheme, defaultPort = 'http', 80
# Rebuild the netloc with port if not default.
netloc = self.netloc.split(':',1)[0]
if port is not None and port != defaultPort:
netloc = '%s:%d' % (netloc, port)
return self.__class__(scheme, netloc, self._qpathlist, self._querylist, self.fragment)
## fragment/anchor manipulation
def anchor(self, anchor=None):
'''Modify the fragment/anchor and return a new URL. An anchor of
None (the default) or '' (the empty string) will the current anchor.
'''
return self.__class__(self.scheme, self.netloc, self._qpathlist, self._querylist, anchor)
## object protocol override ##
def __str__(self):
return flatten(self)
def __repr__(self):
return (
'URL(scheme=%r, netloc=%r, pathsegs=%r, querysegs=%r, fragment=%r)'
% (self.scheme, self.netloc, self._qpathlist, self._querylist, self.fragment))
def normURLPath(path):
'''Normalise the URL path by resolving segments of '.' and ',,'.
'''
segs = []
addEmpty = False
pathSegs = path.split('/')
for seg in pathSegs:
if seg == '.':
pass
elif seg == '..':
if segs:
segs.pop()
else:
segs.append(seg)
if pathSegs[-1:] in (['.'],['..']):
segs.append('')
return '/'.join(segs)
class URLOverlay(object):
def __init__(self, urlaccessor, doc=None, dolater=None, keep=None):
"""A Proto like object for abstractly specifying urls in stan trees.
@param urlaccessor: a function which takes context and returns a URL
@param doc: a a string documenting this URLOverlay instance's usage
@param dolater: a list of tuples of (command, args, kw) where
command is a string, args is a tuple and kw is a dict; when the
URL is returned from urlaccessor during rendering, these
methods will be applied to the URL in order
"""
if doc is not None:
self.__doc__ = doc
self.urlaccessor = urlaccessor
if dolater is None:
dolater= []
self.dolater = dolater
if keep is None:
keep = []
self._keep = keep
def addCommand(self, cmd, args, kw):
dl = self.dolater[:]
dl.append((cmd, args, kw))
return self.__class__(self.urlaccessor, dolater=dl, keep=self._keep[:])
def keep(self, *args):
"""A list of arguments to carry over from the previous url.
"""
K = self._keep[:]
K.extend(args)
return self.__class__(self.urlaccessor, dolater=self.dolater[:], keep=K)
def createForwarder(cmd):
return lambda self, *args, **kw: self.addCommand(cmd, args, kw)
for cmd in [
'sibling', 'child', 'parent', 'here', 'curdir', 'click', 'add',
'replace', 'clear', 'remove', 'secure', 'anchor', 'up', 'parentdir'
]:
setattr(URLOverlay, cmd, createForwarder(cmd))
def hereaccessor(context):
return URL.fromContext(context).clear()
here = URLOverlay(
hereaccessor,
"A lazy url construction object representing the current page's URL. "
"The URL which will be used will be determined at render time by "
"looking at the request. Any query parameters will be "
"cleared automatically.")
def gethereaccessor(context):
return URL.fromContext(context)
gethere = URLOverlay(gethereaccessor,
"A lazy url construction object like 'here' except query parameters "
"are preserved. Useful for constructing a URL to this same object "
"when query parameters need to be preserved but modified slightly.")
def viewhereaccessor(context):
U = hereaccessor(context)
i = 1
while True:
try:
params = context.locate(inevow.IViewParameters, depth=i)
except KeyError:
break
for (cmd, args, kw) in iter(params):
U = getattr(U, cmd)(*args, **kw)
i += 1
return U
viewhere = URLOverlay(viewhereaccessor,
"A lazy url construction object like 'here' IViewParameters objects "
"are looked up in the context during rendering. Commands provided by "
"any found IViewParameters objects are applied to the URL object before "
"rendering it.")
def rootaccessor(context):
req = context.locate(inevow.IRequest)
root = req.getRootURL()
if root is None:
return URL.fromContext(context).click('/')
return URL.fromString(root)
root = URLOverlay(rootaccessor,
"A lazy URL construction object representing the root of the "
"application. Normally, this will just be the logical '/', but if "
"request.rememberRootURL() has previously been used in "
"the request traversal process, the url of the resource "
"where rememberRootURL was called will be used instead.")
def URLSerializer(original, context):
urlContext = WovenContext(parent=context, precompile=context.precompile, inURL=True)
if original.scheme:
yield "%s://%s" % (original.scheme, original.netloc)
for pathsegment in original._qpathlist:
yield '/'
yield serialize(pathsegment, urlContext)
query = original._querylist
if query:
yield '?'
first = True
for key, value in query:
if not first:
yield '&'
else:
first = False
yield serialize(key, urlContext)
if value is not None:
yield '='
yield serialize(value, urlContext)
if original.fragment:
yield "#"
yield serialize(original.fragment, urlContext)
def URLOverlaySerializer(original, context):
if context.precompile:
yield original
else:
url = original.urlaccessor(context)
for (cmd, args, kw) in original.dolater:
url = getattr(url, cmd)(*args, **kw)
req = context.locate(inevow.IRequest)
for key in original._keep:
for value in req.args.get(key, []):
url = url.add(key, value)
yield serialize(url, context)
## This is totally unfinished and doesn't work yet.
#class IURLGenerator(compy.Interface):
# pass
class URLGenerator:
#__implements__ = IURLGenerator,
def __init__(self):
self._objmap = weakref.WeakKeyDictionary()
def objectMountedAt(self, obj, at):
self._objmap[obj] = at
def url(self, obj):
try:
return self._objmap.get(obj, None)
except TypeError:
return None
__call__ = url
def __getstate__(self):
d = self.__dict__.copy()
del d['_objmap']
return d
def __setstate__(self, state):
self.__dict__ = state
self._objmap = weakref.WeakKeyDictionary()
class URLRedirectAdapter:
"""Adapt URL objects so that trying to render one causes a HTTP
redirect.
"""
__implements__ = inevow.IResource,
def __init__(self, original):
self.original = original
def locateChild(self, ctx, segments):
return self, ()
def renderHTTP(self, ctx):
# The URL may have deferred parts so flatten it
u = flatten(self.original, ctx)
# It might also be relative so resolve it against the current URL
# and flatten it again.
u = flatten(URL.fromContext(ctx).click(u), ctx)
return redirectTo(u, inevow.IRequest(ctx))
|
gpl-2.0
| 2,206,488,086,269,287,000 | 8,798,825,523,913,253,000 | 30.766478 | 182 | 0.571378 | false |
ddd332/presto
|
presto-docs/target/sphinx/docutils/writers/xetex/__init__.py
|
4
|
5079
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# :Author: Günter Milde <[email protected]>
# :Revision: $Revision: 7389 $
# :Date: $Date: 2012-03-30 13:58:21 +0200 (Fre, 30 Mär 2012) $
# :Copyright: © 2010 Günter Milde.
# :License: Released under the terms of the `2-Clause BSD license`_, in short:
#
# Copying and distribution of this file, with or without modification,
# are permitted in any medium without royalty provided the copyright
# notice and this notice are preserved.
# This file is offered as-is, without any warranty.
#
# .. _2-Clause BSD license: http://www.spdx.org/licenses/BSD-2-Clause
"""
XeLaTeX document tree Writer.
A variant of Docutils' standard 'latex2e' writer producing output
suited for processing with XeLaTeX (http://tug.org/xetex/).
"""
__docformat__ = 'reStructuredText'
import os
import os.path
import re
import docutils
from docutils import frontend, nodes, utils, writers, languages
from docutils.writers import latex2e
class Writer(latex2e.Writer):
"""A writer for Unicode-based LaTeX variants (XeTeX, LuaTeX)"""
supported = ('xetex','xelatex','luatex')
"""Formats this writer supports."""
default_template = 'xelatex.tex'
default_preamble = '\n'.join([
r'% Linux Libertine (free, wide coverage, not only for Linux)',
r'\setmainfont{Linux Libertine O}',
r'\setsansfont{Linux Biolinum O}',
r'\setmonofont[HyphenChar=None]{DejaVu Sans Mono}',
])
config_section = 'xetex writer'
config_section_dependencies = ('writers', 'latex2e writer')
settings_spec = frontend.filter_settings_spec(
latex2e.Writer.settings_spec,
'font_encoding',
template=('Template file. Default: "%s".' % default_template,
['--template'], {'default': default_template, 'metavar': '<file>'}),
latex_preamble=('Customization by LaTeX code in the preamble. '
'Default: select PDF standard fonts (Times, Helvetica, Courier).',
['--latex-preamble'],
{'default': default_preamble}),
)
def __init__(self):
latex2e.Writer.__init__(self)
self.settings_defaults.update({'fontencoding': ''}) # use default (EU1 or EU2)
self.translator_class = XeLaTeXTranslator
class Babel(latex2e.Babel):
"""Language specifics for XeTeX.
Use `polyglossia` instead of `babel` and adapt settings.
"""
language_codes = latex2e.Babel.language_codes.copy()
# Additionally supported or differently named languages:
language_codes.update({
# code Polyglossia-name comment
'cop': 'coptic',
'de': 'german', # new spelling (de_1996)
'de_1901': 'ogerman', # old spelling
'dv': 'divehi', # Maldivian
'dsb': 'lsorbian',
'el_polyton': 'polygreek',
'fa': 'farsi',
'grc': 'ancientgreek',
'hsb': 'usorbian',
'sh-cyrl': 'serbian', # Serbo-Croatian, Cyrillic script
'sh-latn': 'croatian', # Serbo-Croatian, Latin script
'sq': 'albanian',
'sr': 'serbian', # Cyrillic script (sr-cyrl)
'th': 'thai',
'vi': 'vietnamese',
# zh-latn: ??? # Chinese Pinyin
})
# Languages without Polyglossia support:
for key in ('af', # 'afrikaans',
'de_at', # 'naustrian',
'de_at_1901', # 'austrian',
'fr_ca', # 'canadien',
'grc_ibycus', # 'ibycus', (Greek Ibycus encoding)
'sr-latn', # 'serbian script=latin'
):
del(language_codes[key])
def __init__(self, language_code, reporter):
self.language_code = language_code
self.reporter = reporter
self.language = self.language_name(language_code)
self.otherlanguages = {}
self.warn_msg = 'Language "%s" not supported by XeTeX (polyglossia).'
self.quote_index = 0
self.quotes = ('"', '"')
# language dependent configuration:
# double quotes are "active" in some languages (e.g. German).
self.literal_double_quote = u'"' # TODO: use \textquotedbl
def __call__(self):
setup = [r'\usepackage{polyglossia}',
r'\setdefaultlanguage{%s}' % self.language]
if self.otherlanguages:
setup.append(r'\setotherlanguages{%s}' %
','.join(self.otherlanguages.keys()))
return '\n'.join(setup)
class XeLaTeXTranslator(latex2e.LaTeXTranslator):
def __init__(self, document):
self.is_xetex = True # typeset with XeTeX or LuaTeX engine
latex2e.LaTeXTranslator.__init__(self, document, Babel)
if self.latex_encoding == 'utf8':
self.requirements.pop('_inputenc', None)
else:
self.requirements['_inputenc'] = (r'\XeTeXinputencoding %s '
% self.latex_encoding)
|
apache-2.0
| -8,795,137,584,706,536,000 | 2,953,931,823,806,864,400 | 36.873134 | 86 | 0.582069 | false |
wasade/qiime
|
tests/test_plot_semivariogram.py
|
1
|
12517
|
#!/usr/bin/env python
__author__ = "Antonio Gonzalez Pena"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["Antonio Gonzalez Pena"]
__license__ = "GPL"
__version__ = "1.8.0-dev"
__maintainer__ = "Antonio Gonzalez Pena"
__email__ = "[email protected]"
from qiime.plot_semivariogram import hist_bins, fit_semivariogram
from unittest import TestCase, main
from numpy.testing import assert_almost_equal
from numpy import asarray
class FunctionTests(TestCase):
"""Tests of top-level functions"""
def test_hist_bins(self):
""" test hist_bins """
x = asarray(
[3.,
4.12310563,
4.24264069,
4.47213595,
5.,
5.,
5.,
5.,
5.38516481,
5.65685425,
6.40312424,
6.40312424,
6.70820393,
7.,
7.07106781,
7.07106781,
7.28010989,
7.81024968,
8.,
8.06225775,
8.06225775,
8.24621125,
9.,
9.48683298,
9.48683298,
9.89949494,
9.89949494,
10.,
10.04987562,
10.04987562])
bins = [2.0, 5.0, 7.5, 10.0, 11.0]
hist_res = [0., 8., 9., 11., 2.]
vals, hist = hist_bins(bins, x)
assert_almost_equal(vals, bins)
assert_almost_equal(hist, hist_res)
def test_reorder_samples(self):
""" test that regural and irregular order give the same results """
model = "linear"
# Test normal order
x_lbl = ['s1', 's2', 's3', 's4', 's5', 's6']
x = asarray(
[[0.0,
1.0,
2.0,
3.0,
4.0,
5.0],
[0.0,
0.0,
6.0,
7.0,
8.0,
9.0],
[0.0,
0.0,
0.0,
10.0,
11.0,
12.0],
[0.0,
0.0,
0.0,
0.0,
13.0,
14.0],
[0.0,
0.0,
0.0,
0.0,
0.0,
15.0]])
y_lbl = ['s1', 's2', 's3', 's4', 's5', 's6']
y = asarray(
[[0.0,
1.0,
2.0,
3.0,
4.0,
5.0],
[0.0,
0.0,
6.0,
7.0,
8.0,
9.0],
[0.0,
0.0,
0.0,
10.0,
11.0,
12.0],
[0.0,
0.0,
0.0,
0.0,
13.0,
14.0],
[0.0,
0.0,
0.0,
0.0,
0.0,
15.0]])
vals_exp = [0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0, 6.0, 7.0]
x_vals, y_vals, x_fit, y_fit, func_text = fit_semivariogram(
(x_lbl, x), (x_lbl, x), model, [])
assert_almost_equal(x_vals, vals_exp)
assert_almost_equal(y_vals, vals_exp)
assert_almost_equal(x_fit, vals_exp)
assert_almost_equal(y_fit, vals_exp)
# Test altered
model = "linear"
# order = [5, 1, 3, 4, 0, 2]
x_lbl = ['s6', 's2', 's4', 's5', 's1', 's3']
x = asarray(
[[0.0,
0.0,
0.0,
0.0,
0.0,
0.0],
[9.0,
0.0,
7.0,
8.0,
0.0,
6.0],
[14.0,
0.0,
0.0,
13.0,
0.0,
0.0],
[15.0,
0.0,
0.0,
0.0,
0.0,
0.0],
[5.0,
1.0,
3.0,
4.0,
0.0,
2.0],
[12.0,
0.0,
10.0,
11.0,
0.0,
0.0]])
y_lbl = ['s1', 's2', 's3', 's4', 's5', 's6']
y = asarray(
[[0.0,
1.0,
2.0,
3.0,
4.0,
5.0],
[0.0,
0.0,
6.0,
7.0,
8.0,
9.0],
[0.0,
0.0,
0.0,
10.0,
11.0,
12.0],
[0.0,
0.0,
0.0,
0.0,
13.0,
14.0],
[0.0,
0.0,
0.0,
0.0,
0.0,
15.0],
[0.0,
0.0,
0.0,
0.0,
0.0,
0.0]])
vals_exp = [
1.,
2.,
3.,
4.,
5.,
6.,
7.,
8.,
9.,
10.,
11.,
12.,
13.,
14.,
15.]
x_vals, y_vals, x_fit, y_fit, func_text = fit_semivariogram(
(x_lbl, x), (y_lbl, y), model, [])
assert_almost_equal(x_vals, vals_exp)
assert_almost_equal(y_vals, vals_exp)
assert_almost_equal(x_fit, vals_exp)
assert_almost_equal(y_fit, vals_exp)
def test_models_semivariograms(self):
""" test the semivariogram fitting models """
# All models should return the same x_vals, y_vals, x_fit
# because we are using the same x
x_lbl = ['s1', 's2', 's3', 's4', 's5', 's6']
x = asarray(
[[0.0,
1.0,
2.0,
3.0,
4.0,
5.0],
[0.0,
0.0,
6.0,
7.0,
8.0,
9.0],
[0.0,
0.0,
0.0,
10.0,
11.0,
12.0],
[0.0,
0.0,
0.0,
0.0,
13.0,
14.0],
[0.0,
0.0,
0.0,
0.0,
0.0,
15.0]])
vals_exp = [0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0, 6.0, 7.0]
model = "nugget"
y_lbl = ['s1', 's2', 's3', 's4', 's5', 's6']
y = asarray(
[[0.0,
5.0,
5.0,
5.0,
5.0,
5.0],
[0.0,
0.0,
5.0,
5.0,
5.0,
5.0],
[0.0,
0.0,
0.0,
5.0,
5.0,
5.0],
[0.0,
0.0,
0.0,
0.0,
5.0,
5.0],
[0.0,
0.0,
0.0,
0.0,
0.0,
5.0]])
y_vals_exp = [2.3000000143667378] * (len(x) * 2)
x_vals, y_vals, x_fit, y_fit, func_text = fit_semivariogram(
(x_lbl, x), (x_lbl, x), model, [])
assert_almost_equal(x_vals, vals_exp)
assert_almost_equal(y_vals, vals_exp)
assert_almost_equal(x_fit, vals_exp)
assert_almost_equal(y_fit, y_vals_exp)
model = "exponential"
y_lbl = ['s1', 's2', 's3', 's4', 's5', 's6']
y = asarray(
[[0.0,
1.0,
22.0,
33.0,
44.0,
55.0],
[0.0,
0.0,
66.0,
77.0,
88.0,
99.0],
[0.0,
0.0,
0.0,
1010.0,
1111.0,
1212.0],
[0.0,
0.0,
0.0,
0.0,
1313.0,
1414.0],
[0.0,
0.0,
0.0,
0.0,
0.0,
1515.0]])
x_vals_exp = [0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0, 6.0, 7.0]
y_vals_exp = [0.0, 0.0, 0.0, 0.0, 1.0, 22.0, 33.0, 44.0, 66.0, 77.0]
x_fit_exp = [0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0, 6.0, 7.0]
y_fit_exp = [-1.481486808707005, -1.481486808707005, -1.481486808707005,
-1.481486808707005, 9.72783464904061, 20.937152199747878,
32.14646584698613, 43.355775583612704, 65.7743833464588,
76.983681369107]
x_vals, y_vals, x_fit, y_fit, func_text = fit_semivariogram(
(x_lbl, x), (y_lbl, y), model, [])
assert_almost_equal(x_vals, x_vals_exp)
assert_almost_equal(y_vals, y_vals_exp)
assert_almost_equal(x_fit, x_fit_exp)
assert_almost_equal(y_fit, y_fit_exp, decimal=2)
model = "gaussian"
y_lbl = ['s1', 's2', 's3', 's4', 's5', 's6']
y = asarray(
[[0.0,
1.0,
22.0,
33.0,
44.0,
55.0],
[0.0,
0.0,
66.0,
77.0,
88.0,
99.0],
[0.0,
0.0,
0.0,
1010.0,
1111.0,
1212.0],
[0.0,
0.0,
0.0,
0.0,
1313.0,
1414.0],
[0.0,
0.0,
0.0,
0.0,
0.0,
1515.0]])
y_vals_exp = [0.17373844, 0.17373844, 0.17373844, 0.17373844,
0.54915099, 1.5597716 , 2.91606171, 4.2880578 ,
6.24509872, 6.74690541]
x_vals, y_vals, x_fit, y_fit, func_text = fit_semivariogram(
(x_lbl, x), (x_lbl, x), model, [])
assert_almost_equal(x_vals, vals_exp)
assert_almost_equal(y_vals, vals_exp)
assert_almost_equal(x_fit, vals_exp)
assert_almost_equal(y_fit, y_vals_exp, decimal=2)
model = "periodic"
y_lbl = ['s1', 's2', 's3', 's4', 's5', 's6']
y = asarray(
[[0.0,
1.0,
22.0,
33.0,
44.0,
55.0],
[0.0,
0.0,
66.0,
77.0,
88.0,
99.0],
[0.0,
0.0,
0.0,
1010.0,
1111.0,
1212.0],
[0.0,
0.0,
0.0,
0.0,
1313.0,
1414.0],
[0.0,
0.0,
0.0,
0.0,
0.0,
1515.0]])
y_vals_exp = [0.2324873886681871, 0.2324873886681871,
0.2324873886681871, 0.2324873886681871,
0.5528698895985695, 1.4508010363573784,
2.7491053124879112, 4.191607473962063,
6.39840364731269, 6.727263101495738]
x_vals, y_vals, x_fit, y_fit, func_text = fit_semivariogram(
(x_lbl, x), (x_lbl, x), model, [])
assert_almost_equal(x_vals, vals_exp)
assert_almost_equal(y_vals, vals_exp)
assert_almost_equal(x_fit, vals_exp)
assert_almost_equal(y_fit, y_vals_exp, decimal=2)
model = "linear"
y_lbl = x_lbl
y = x
x_vals, y_vals, x_fit, y_fit, func_text = fit_semivariogram(
(x_lbl, x), (x_lbl, x), model, [])
assert_almost_equal(x_vals, vals_exp)
assert_almost_equal(y_vals, vals_exp)
assert_almost_equal(x_fit, vals_exp)
assert_almost_equal(y_fit, vals_exp, decimal=2)
# run tests if called from command line
if __name__ == '__main__':
main()
|
gpl-2.0
| -9,173,341,239,767,405,000 | 3,018,276,786,150,458,000 | 25.688699 | 80 | 0.308141 | false |
auready/docker-py
|
docker/utils/socket.py
|
10
|
1771
|
import errno
import os
import select
import struct
import six
try:
from ..transport import NpipeSocket
except ImportError:
NpipeSocket = type(None)
class SocketError(Exception):
pass
def read(socket, n=4096):
"""
Reads at most n bytes from socket
"""
recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK)
# wait for data to become available
if not isinstance(socket, NpipeSocket):
select.select([socket], [], [])
try:
if hasattr(socket, 'recv'):
return socket.recv(n)
return os.read(socket.fileno(), n)
except EnvironmentError as e:
if e.errno not in recoverable_errors:
raise
def read_exactly(socket, n):
"""
Reads exactly n bytes from socket
Raises SocketError if there isn't enough data
"""
data = six.binary_type()
while len(data) < n:
next_data = read(socket, n - len(data))
if not next_data:
raise SocketError("Unexpected EOF")
data += next_data
return data
def next_frame_size(socket):
"""
Returns the size of the next frame of data waiting to be read from socket,
according to the protocol defined here:
https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/attach-to-a-container
"""
try:
data = read_exactly(socket, 8)
except SocketError:
return 0
_, actual = struct.unpack('>BxxxL', data)
return actual
def frames_iter(socket):
"""
Returns a generator of frames read from socket
"""
while True:
n = next_frame_size(socket)
if n == 0:
break
while n > 0:
result = read(socket, n)
n -= len(result)
yield result
|
apache-2.0
| -1,708,133,124,733,011,700 | 7,041,475,678,035,744,000 | 21.417722 | 96 | 0.607002 | false |
SuperKogito/Cryptos
|
CryptosCode/ExitPage.py
|
1
|
2699
|
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 23 19:05:35 2017
@author: SuperKogito
"""
# Define imports
import tkinter as tk
class ExitPage(tk.Frame):
""" Exit page class """
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
self.controller = controller
self.configure(background='black')
# Define main frame
self.main_frame = tk.Frame(self, background='black')
self.main_frame.pack(expand=1)
self.main_frame.pack()
# Define upper frame
upper_frame = tk.Frame(self.main_frame, width=300, height=50,
background='black')
upper_frame.grid(column=0, row=0)
# Define label
exit_string = '\n\nAre you sure that you want to exit Crypotos?\n'
exit_label = tk.Label(upper_frame, text=exit_string,
background='black', foreground="white")
exit_label.pack(side="top", fill="x", pady=10)
# Define middle frame
middle_frame = tk.Frame(self.main_frame, background='black',
width=300, height=50)
middle_frame.grid(column=0, row=1)
# Define cancel button
cancel_button = tk.Button(middle_frame, text="Cancel",
command=lambda: controller.show_frame("PageOne"))
cancel_button.pack(side=tk.RIGHT)
# Define yes button
yes_button = tk.Button(middle_frame, text="Yes",
command=lambda: controller.quit_func())
yes_button.pack(side=tk.RIGHT, padx=5, pady=5)
# Configure the buttons
cancel_button.configure(background='black', foreground='white',
activebackground='#0080ff',
activeforeground='white')
yes_button.configure(background='black', foreground='white',
activebackground='#0080ff',
activeforeground='white')
# Define lower frame
lower_frame = tk.Frame(self.main_frame, background='black',
width=300, height=50)
lower_frame.grid(column=0, row=2)
# Define label
dev_text = (
"\nDeveloped by: SuperKogito\n"
"Gihthub repository: "
"https://github.com/SuperKogito/Cryptos"
)
self.developper_text = tk.Label(lower_frame,
text=dev_text,
background='black',
foreground='White')
self.developper_text.pack(side="bottom")
|
mit
| -7,603,334,696,116,407,000 | 3,915,652,186,876,183,600 | 42.532258 | 83 | 0.527603 | false |
scorphus/thefuck
|
thefuck/specific/git.py
|
4
|
1128
|
import re
from decorator import decorator
from ..utils import is_app
from ..shells import shell
@decorator
def git_support(fn, command):
"""Resolves git aliases and supports testing for both git and hub."""
# supports GitHub's `hub` command
# which is recommended to be used with `alias git=hub`
# but at this point, shell aliases have already been resolved
if not is_app(command, 'git', 'hub'):
return False
# perform git aliases expansion
if 'trace: alias expansion:' in command.output:
search = re.search("trace: alias expansion: ([^ ]*) => ([^\n]*)",
command.output)
alias = search.group(1)
# by default git quotes everything, for example:
# 'commit' '--amend'
# which is surprising and does not allow to easily test for
# eg. 'git commit'
expansion = ' '.join(shell.quote(part)
for part in shell.split_command(search.group(2)))
new_script = command.script.replace(alias, expansion)
command = command.update(script=new_script)
return fn(command)
|
mit
| 1,809,268,168,286,498,800 | -8,164,625,128,224,067,000 | 34.25 | 78 | 0.620567 | false |
astrofrog/numpy
|
numpy/matlib.py
|
90
|
9494
|
import numpy as np
from numpy.matrixlib.defmatrix import matrix, asmatrix
# need * as we're copying the numpy namespace
from numpy import *
__version__ = np.__version__
__all__ = np.__all__[:] # copy numpy namespace
__all__ += ['rand', 'randn', 'repmat']
def empty(shape, dtype=None, order='C'):
"""
Return a new matrix of given shape and type, without initializing entries.
Parameters
----------
shape : int or tuple of int
Shape of the empty matrix.
dtype : data-type, optional
Desired output data-type.
order : {'C', 'F'}, optional
Whether to store multi-dimensional data in C (row-major) or
Fortran (column-major) order in memory.
See Also
--------
empty_like, zeros
Notes
-----
`empty`, unlike `zeros`, does not set the matrix values to zero,
and may therefore be marginally faster. On the other hand, it requires
the user to manually set all the values in the array, and should be
used with caution.
Examples
--------
>>> import numpy.matlib
>>> np.matlib.empty((2, 2)) # filled with random data
matrix([[ 6.76425276e-320, 9.79033856e-307],
[ 7.39337286e-309, 3.22135945e-309]]) #random
>>> np.matlib.empty((2, 2), dtype=int)
matrix([[ 6600475, 0],
[ 6586976, 22740995]]) #random
"""
return ndarray.__new__(matrix, shape, dtype, order=order)
def ones(shape, dtype=None, order='C'):
"""
Matrix of ones.
Return a matrix of given shape and type, filled with ones.
Parameters
----------
shape : {sequence of ints, int}
Shape of the matrix
dtype : data-type, optional
The desired data-type for the matrix, default is np.float64.
order : {'C', 'F'}, optional
Whether to store matrix in C- or Fortran-contiguous order,
default is 'C'.
Returns
-------
out : matrix
Matrix of ones of given shape, dtype, and order.
See Also
--------
ones : Array of ones.
matlib.zeros : Zero matrix.
Notes
-----
If `shape` has length one i.e. ``(N,)``, or is a scalar ``N``,
`out` becomes a single row matrix of shape ``(1,N)``.
Examples
--------
>>> np.matlib.ones((2,3))
matrix([[ 1., 1., 1.],
[ 1., 1., 1.]])
>>> np.matlib.ones(2)
matrix([[ 1., 1.]])
"""
a = ndarray.__new__(matrix, shape, dtype, order=order)
a.fill(1)
return a
def zeros(shape, dtype=None, order='C'):
"""
Return a matrix of given shape and type, filled with zeros.
Parameters
----------
shape : int or sequence of ints
Shape of the matrix
dtype : data-type, optional
The desired data-type for the matrix, default is float.
order : {'C', 'F'}, optional
Whether to store the result in C- or Fortran-contiguous order,
default is 'C'.
Returns
-------
out : matrix
Zero matrix of given shape, dtype, and order.
See Also
--------
numpy.zeros : Equivalent array function.
matlib.ones : Return a matrix of ones.
Notes
-----
If `shape` has length one i.e. ``(N,)``, or is a scalar ``N``,
`out` becomes a single row matrix of shape ``(1,N)``.
Examples
--------
>>> import numpy.matlib
>>> np.matlib.zeros((2, 3))
matrix([[ 0., 0., 0.],
[ 0., 0., 0.]])
>>> np.matlib.zeros(2)
matrix([[ 0., 0.]])
"""
a = ndarray.__new__(matrix, shape, dtype, order=order)
a.fill(0)
return a
def identity(n,dtype=None):
"""
Returns the square identity matrix of given size.
Parameters
----------
n : int
Size of the returned identity matrix.
dtype : data-type, optional
Data-type of the output. Defaults to ``float``.
Returns
-------
out : matrix
`n` x `n` matrix with its main diagonal set to one,
and all other elements zero.
See Also
--------
numpy.identity : Equivalent array function.
matlib.eye : More general matrix identity function.
Examples
--------
>>> import numpy.matlib
>>> np.matlib.identity(3, dtype=int)
matrix([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
"""
a = array([1]+n*[0],dtype=dtype)
b = empty((n,n),dtype=dtype)
b.flat = a
return b
def eye(n,M=None, k=0, dtype=float):
"""
Return a matrix with ones on the diagonal and zeros elsewhere.
Parameters
----------
n : int
Number of rows in the output.
M : int, optional
Number of columns in the output, defaults to `n`.
k : int, optional
Index of the diagonal: 0 refers to the main diagonal,
a positive value refers to an upper diagonal,
and a negative value to a lower diagonal.
dtype : dtype, optional
Data-type of the returned matrix.
Returns
-------
I : matrix
A `n` x `M` matrix where all elements are equal to zero,
except for the `k`-th diagonal, whose values are equal to one.
See Also
--------
numpy.eye : Equivalent array function.
identity : Square identity matrix.
Examples
--------
>>> import numpy.matlib
>>> np.matlib.eye(3, k=1, dtype=float)
matrix([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
return asmatrix(np.eye(n,M,k,dtype))
def rand(*args):
"""
Return a matrix of random values with given shape.
Create a matrix of the given shape and propagate it with
random samples from a uniform distribution over ``[0, 1)``.
Parameters
----------
\\*args : Arguments
Shape of the output.
If given as N integers, each integer specifies the size of one
dimension.
If given as a tuple, this tuple gives the complete shape.
Returns
-------
out : ndarray
The matrix of random values with shape given by `\\*args`.
See Also
--------
randn, numpy.random.rand
Examples
--------
>>> import numpy.matlib
>>> np.matlib.rand(2, 3)
matrix([[ 0.68340382, 0.67926887, 0.83271405],
[ 0.00793551, 0.20468222, 0.95253525]]) #random
>>> np.matlib.rand((2, 3))
matrix([[ 0.84682055, 0.73626594, 0.11308016],
[ 0.85429008, 0.3294825 , 0.89139555]]) #random
If the first argument is a tuple, other arguments are ignored:
>>> np.matlib.rand((2, 3), 4)
matrix([[ 0.46898646, 0.15163588, 0.95188261],
[ 0.59208621, 0.09561818, 0.00583606]]) #random
"""
if isinstance(args[0], tuple):
args = args[0]
return asmatrix(np.random.rand(*args))
def randn(*args):
"""
Return a random matrix with data from the "standard normal" distribution.
`randn` generates a matrix filled with random floats sampled from a
univariate "normal" (Gaussian) distribution of mean 0 and variance 1.
Parameters
----------
\\*args : Arguments
Shape of the output.
If given as N integers, each integer specifies the size of one
dimension. If given as a tuple, this tuple gives the complete shape.
Returns
-------
Z : matrix of floats
A matrix of floating-point samples drawn from the standard normal
distribution.
See Also
--------
rand, random.randn
Notes
-----
For random samples from :math:`N(\\mu, \\sigma^2)`, use:
``sigma * np.matlib.randn(...) + mu``
Examples
--------
>>> import numpy.matlib
>>> np.matlib.randn(1)
matrix([[-0.09542833]]) #random
>>> np.matlib.randn(1, 2, 3)
matrix([[ 0.16198284, 0.0194571 , 0.18312985],
[-0.7509172 , 1.61055 , 0.45298599]]) #random
Two-by-four matrix of samples from :math:`N(3, 6.25)`:
>>> 2.5 * np.matlib.randn((2, 4)) + 3
matrix([[ 4.74085004, 8.89381862, 4.09042411, 4.83721922],
[ 7.52373709, 5.07933944, -2.64043543, 0.45610557]]) #random
"""
if isinstance(args[0], tuple):
args = args[0]
return asmatrix(np.random.randn(*args))
def repmat(a, m, n):
"""
Repeat a 0-D to 2-D array or matrix MxN times.
Parameters
----------
a : array_like
The array or matrix to be repeated.
m, n : int
The number of times `a` is repeated along the first and second axes.
Returns
-------
out : ndarray
The result of repeating `a`.
Examples
--------
>>> import numpy.matlib
>>> a0 = np.array(1)
>>> np.matlib.repmat(a0, 2, 3)
array([[1, 1, 1],
[1, 1, 1]])
>>> a1 = np.arange(4)
>>> np.matlib.repmat(a1, 2, 2)
array([[0, 1, 2, 3, 0, 1, 2, 3],
[0, 1, 2, 3, 0, 1, 2, 3]])
>>> a2 = np.asmatrix(np.arange(6).reshape(2, 3))
>>> np.matlib.repmat(a2, 2, 3)
matrix([[0, 1, 2, 0, 1, 2, 0, 1, 2],
[3, 4, 5, 3, 4, 5, 3, 4, 5],
[0, 1, 2, 0, 1, 2, 0, 1, 2],
[3, 4, 5, 3, 4, 5, 3, 4, 5]])
"""
a = asanyarray(a)
ndim = a.ndim
if ndim == 0:
origrows, origcols = (1,1)
elif ndim == 1:
origrows, origcols = (1, a.shape[0])
else:
origrows, origcols = a.shape
rows = origrows * m
cols = origcols * n
c = a.reshape(1,a.size).repeat(m, 0).reshape(rows, origcols).repeat(n,0)
return c.reshape(rows, cols)
|
bsd-3-clause
| -1,674,400,489,547,274,800 | -4,640,046,482,000,400,000 | 25.668539 | 78 | 0.548136 | false |
nicholaschris/landsatpy
|
utils.py
|
1
|
2693
|
import operator
import pandas as pd
import numpy as np
from numpy import ma
from scipy.misc import imresize
import scipy.ndimage as ndimage
from skimage.morphology import disk, dilation
def get_truth(input_one, input_two, comparison): # too much abstraction
ops = {'>': operator.gt,
'<': operator.lt,
'>=': operator.ge,
'<=': operator.le,
'=': operator.eq}
return ops[comparison](input_one, input_two)
def convert_to_celsius(brightness_temp_input):
return brightness_temp_input - 272.15
def calculate_percentile(input_masked_array, percentile):
flat_fill_input = input_masked_array.filled(np.nan).flatten()
df = pd.DataFrame(flat_fill_input)
percentile = df.quantile(percentile/100.0)
return percentile[0]
def save_object(obj, filename):
import pickle
with open(filename, 'wb') as output:
pickle.dump(obj, output)
def downsample(input_array, factor=4):
output_array = input_array[::2, ::2] / 4 + input_array[1::2, ::2] / 4 + input_array[::2, 1::2] / 4 + input_array[1::2, 1::2] / 4
return output_array
def dilate_boolean_array(input_array, disk_size=3):
selem = disk(disk_size)
dilated = dilation(input_array, selem)
return dilated
def get_resized_array(img, size):
lena = imresize(img, (size, size))
return lena
def interp_and_resize(array, new_length):
orig_y_length, orig_x_length = array.shape
interp_factor_y = new_length / orig_y_length
interp_factor_x = new_length / orig_x_length
y = round(interp_factor_y * orig_y_length)
x = round(interp_factor_x * orig_x_length)
# http://docs.scipy.org/doc/numpy/reference/generated/numpy.mgrid.html
new_indicies = np.mgrid[0:orig_y_length:y * 1j, 0:orig_x_length:x * 1j]
# order=1 indicates bilinear interpolation.
interp_array = ndimage.map_coordinates(array, new_indicies,
order=1, output=array.dtype)
interp_array = interp_array.reshape((y, x))
return interp_array
def parse_mtl(in_file):
awesome = True
f = open(in_file, 'r')
print(in_file)
mtl_dict = {}
with open(in_file, 'r') as f:
while awesome:
line = f.readline()
if line.strip() == '' or line.strip() == 'END':
return mtl_dict
elif 'END_GROUP' in line:
pass
elif 'GROUP' in line:
curr_group = line.split('=')[1].strip()
mtl_dict[curr_group] = {}
else:
attr, value = line.split('=')[0].strip(), line.split('=')[1].strip()
mtl_dict[curr_group][attr] = value
|
mit
| 7,201,330,274,549,144,000 | 1,296,873,725,484,850,200 | 33.101266 | 132 | 0.602674 | false |
guludo/ardupilot-1
|
Tools/scripts/frame_sizes.py
|
351
|
1117
|
#!/usr/bin/env python
import re, sys, operator, os
code_line = re.compile("^\s*\d+:/")
frame_line = re.compile("^\s*\d+\s+/\* frame size = (\d+) \*/")
class frame(object):
def __init__(self, code, frame_size):
self.code = code
self.frame_size = int(frame_size)
frames = []
def process_lst(filename):
'''process one lst file'''
last_code = ''
h = open(filename, mode='r')
for line in h:
if code_line.match(line):
last_code = line.strip()
elif frame_line.match(line):
frames.append(frame(last_code, frame_line.match(line).group(1)))
h.close()
if len(sys.argv) > 1:
dname = sys.argv[1]
else:
dname = '.'
for root, dirs, files in os.walk(dname):
for f in files:
if f.endswith(".lst"):
process_lst(os.path.join(root, f))
sorted_frames = sorted(frames,
key=operator.attrgetter('frame_size'),
reverse=True)
print("FrameSize Code")
for frame in sorted_frames:
if frame.frame_size > 0:
print("%9u %s" % (frame.frame_size, frame.code))
|
gpl-3.0
| -8,480,747,154,990,214,000 | 1,219,545,658,808,217,300 | 24.386364 | 76 | 0.557744 | false |
John-Hart/autorest
|
src/generator/AutoRest.Python.Tests/AcceptanceTests/model_flattening_tests.py
|
6
|
11782
|
# --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
import unittest
import subprocess
import sys
import isodate
import tempfile
import json
from datetime import date, datetime, timedelta
import os
from os.path import dirname, pardir, join, realpath, sep, pardir
cwd = dirname(realpath(__file__))
root = realpath(join(cwd , pardir, pardir, pardir, pardir))
sys.path.append(join(root, "src" , "client" , "Python", "msrest"))
log_level = int(os.environ.get('PythonLogLevel', 30))
tests = realpath(join(cwd, pardir, "Expected", "AcceptanceTests"))
sys.path.append(join(tests, "ModelFlattening"))
from msrest.serialization import Deserializer
from msrest.exceptions import DeserializationError
from autorestresourceflatteningtestservice import AutoRestResourceFlatteningTestService
from autorestresourceflatteningtestservice.models import (
FlattenedProduct,
ErrorException,
ResourceCollection,
SimpleProduct,
FlattenParameterGroup)
class ModelFlatteningTests(unittest.TestCase):
def setUp(self):
self.client = AutoRestResourceFlatteningTestService(base_url="http://localhost:3000")
return super(ModelFlatteningTests, self).setUp()
def test_flattening_array(self):
#Array
result = self.client.get_array()
self.assertEqual(3, len(result))
# Resource 1
self.assertEqual("1", result[0].id)
self.assertEqual("OK", result[0].provisioning_state_values)
self.assertEqual("Product1", result[0].pname)
self.assertEqual("Flat", result[0].flattened_product_type)
self.assertEqual("Building 44", result[0].location)
self.assertEqual("Resource1", result[0].name)
self.assertEqual("Succeeded", result[0].provisioning_state)
self.assertEqual("Microsoft.Web/sites", result[0].type)
self.assertEqual("value1", result[0].tags["tag1"])
self.assertEqual("value3", result[0].tags["tag2"])
# Resource 2
self.assertEqual("2", result[1].id)
self.assertEqual("Resource2", result[1].name)
self.assertEqual("Building 44", result[1].location)
# Resource 3
self.assertEqual("3", result[2].id)
self.assertEqual("Resource3", result[2].name)
resourceArray = [
{
'location': "West US",
'tags': {"tag1":"value1", "tag2":"value3"}},
{
'location': "Building 44"}]
self.client.put_array(resourceArray)
def test_flattening_dictionary(self):
#Dictionary
resultDictionary = self.client.get_dictionary()
self.assertEqual(3, len(resultDictionary))
# Resource 1
self.assertEqual("1", resultDictionary["Product1"].id)
self.assertEqual("OK", resultDictionary["Product1"].provisioning_state_values)
self.assertEqual("Product1", resultDictionary["Product1"].pname)
self.assertEqual("Flat", resultDictionary["Product1"].flattened_product_type)
self.assertEqual("Building 44", resultDictionary["Product1"].location)
self.assertEqual("Resource1", resultDictionary["Product1"].name)
self.assertEqual("Succeeded", resultDictionary["Product1"].provisioning_state)
self.assertEqual("Microsoft.Web/sites", resultDictionary["Product1"].type)
self.assertEqual("value1", resultDictionary["Product1"].tags["tag1"])
self.assertEqual("value3", resultDictionary["Product1"].tags["tag2"])
# Resource 2
self.assertEqual("2", resultDictionary["Product2"].id)
self.assertEqual("Resource2", resultDictionary["Product2"].name)
self.assertEqual("Building 44", resultDictionary["Product2"].location)
# Resource 3
self.assertEqual("3", resultDictionary["Product3"].id)
self.assertEqual("Resource3", resultDictionary["Product3"].name)
resourceDictionary = {
"Resource1": {
'location': "West US",
'tags': {"tag1":"value1", "tag2":"value3"},
'pname': "Product1",
'flattened_product_type': "Flat"},
"Resource2": {
'location': "Building 44",
'pname': "Product2",
'flattened_product_type': "Flat"}}
self.client.put_dictionary(resourceDictionary)
def test_flattening_complex_object(self):
#ResourceCollection
resultResource = self.client.get_resource_collection()
#dictionaryofresources
self.assertEqual(3, len(resultResource.dictionaryofresources))
# Resource 1
self.assertEqual("1", resultResource.dictionaryofresources["Product1"].id)
self.assertEqual("OK", resultResource.dictionaryofresources["Product1"].provisioning_state_values)
self.assertEqual("Product1", resultResource.dictionaryofresources["Product1"].pname)
self.assertEqual("Flat", resultResource.dictionaryofresources["Product1"].flattened_product_type)
self.assertEqual("Building 44", resultResource.dictionaryofresources["Product1"].location)
self.assertEqual("Resource1", resultResource.dictionaryofresources["Product1"].name)
self.assertEqual("Succeeded", resultResource.dictionaryofresources["Product1"].provisioning_state)
self.assertEqual("Microsoft.Web/sites", resultResource.dictionaryofresources["Product1"].type)
self.assertEqual("value1", resultResource.dictionaryofresources["Product1"].tags["tag1"])
self.assertEqual("value3", resultResource.dictionaryofresources["Product1"].tags["tag2"])
# Resource 2
self.assertEqual("2", resultResource.dictionaryofresources["Product2"].id)
self.assertEqual("Resource2", resultResource.dictionaryofresources["Product2"].name)
self.assertEqual("Building 44", resultResource.dictionaryofresources["Product2"].location)
# Resource 3
self.assertEqual("3", resultResource.dictionaryofresources["Product3"].id)
self.assertEqual("Resource3", resultResource.dictionaryofresources["Product3"].name)
#arrayofresources
self.assertEqual(3, len(resultResource.arrayofresources))
# Resource 1
self.assertEqual("4", resultResource.arrayofresources[0].id)
self.assertEqual("OK", resultResource.arrayofresources[0].provisioning_state_values)
self.assertEqual("Product4", resultResource.arrayofresources[0].pname)
self.assertEqual("Flat", resultResource.arrayofresources[0].flattened_product_type)
self.assertEqual("Building 44", resultResource.arrayofresources[0].location)
self.assertEqual("Resource4", resultResource.arrayofresources[0].name)
self.assertEqual("Succeeded", resultResource.arrayofresources[0].provisioning_state)
self.assertEqual("Microsoft.Web/sites", resultResource.arrayofresources[0].type)
self.assertEqual("value1", resultResource.arrayofresources[0].tags["tag1"])
self.assertEqual("value3", resultResource.arrayofresources[0].tags["tag2"])
# Resource 2
self.assertEqual("5", resultResource.arrayofresources[1].id)
self.assertEqual("Resource5", resultResource.arrayofresources[1].name)
self.assertEqual("Building 44", resultResource.arrayofresources[1].location)
# Resource 3
self.assertEqual("6", resultResource.arrayofresources[2].id)
self.assertEqual("Resource6", resultResource.arrayofresources[2].name)
#productresource
self.assertEqual("7", resultResource.productresource.id)
self.assertEqual("Resource7", resultResource.productresource.name)
resourceDictionary = {
"Resource1": FlattenedProduct(
location = "West US",
tags = {"tag1":"value1", "tag2":"value3"},
pname = "Product1",
flattened_product_type = "Flat"),
"Resource2": FlattenedProduct(
location = "Building 44",
pname = "Product2",
flattened_product_type = "Flat")}
resourceComplexObject = ResourceCollection(
dictionaryofresources = resourceDictionary,
arrayofresources = [
FlattenedProduct(
location = "West US",
tags = {"tag1":"value1", "tag2":"value3"},
pname = "Product1",
flattened_product_type = "Flat"),
FlattenedProduct(
location = "East US",
pname = "Product2",
flattened_product_type = "Flat")],
productresource = FlattenedProduct(
location = "India",
pname = "Azure",
flattened_product_type = "Flat"))
self.client.put_resource_collection(resourceComplexObject)
def test_model_flattening_simple(self):
simple_prduct = SimpleProduct(
product_id = "123",
description = "product description",
max_product_display_name = "max name",
odatavalue = "http://foo",
generic_value = "https://generic"
)
result = self.client.put_simple_product(simple_prduct)
self.assertEqual(result, simple_prduct)
def test_model_flattening_with_parameter_flattening(self):
simple_product = SimpleProduct(
product_id = "123",
description = "product description",
max_product_display_name = "max name",
odatavalue = "http://foo"
)
result = self.client.post_flattened_simple_product("123", "max name", "product description", None, "http://foo")
self.assertEqual(result, simple_product)
def test_model_flattening_with_grouping(self):
simple_prduct = SimpleProduct(
product_id = "123",
description = "product description",
max_product_display_name = "max name",
odatavalue = "http://foo"
)
group = FlattenParameterGroup(
product_id = "123",
description = "product description",
max_product_display_name="max name",
odatavalue="http://foo",
name="groupproduct")
result = self.client.put_simple_product_with_grouping(group)
self.assertEqual(result, simple_prduct)
if __name__ == '__main__':
unittest.main()
|
mit
| 7,823,381,392,223,634,000 | -6,277,473,395,717,004,000 | 44.658915 | 120 | 0.642615 | false |
ashishfinoit/django-rest-framework
|
tests/test_permissions.py
|
68
|
18850
|
from __future__ import unicode_literals
import base64
from django.contrib.auth.models import Group, Permission, User
from django.core.urlresolvers import ResolverMatch
from django.db import models
from django.test import TestCase
from django.utils import unittest
from rest_framework import (
HTTP_HEADER_ENCODING, authentication, generics, permissions, serializers,
status
)
from rest_framework.compat import get_model_name, guardian
from rest_framework.filters import DjangoObjectPermissionsFilter
from rest_framework.routers import DefaultRouter
from rest_framework.test import APIRequestFactory
from tests.models import BasicModel
factory = APIRequestFactory()
class BasicSerializer(serializers.ModelSerializer):
class Meta:
model = BasicModel
class RootView(generics.ListCreateAPIView):
queryset = BasicModel.objects.all()
serializer_class = BasicSerializer
authentication_classes = [authentication.BasicAuthentication]
permission_classes = [permissions.DjangoModelPermissions]
class InstanceView(generics.RetrieveUpdateDestroyAPIView):
queryset = BasicModel.objects.all()
serializer_class = BasicSerializer
authentication_classes = [authentication.BasicAuthentication]
permission_classes = [permissions.DjangoModelPermissions]
class GetQuerySetListView(generics.ListCreateAPIView):
serializer_class = BasicSerializer
authentication_classes = [authentication.BasicAuthentication]
permission_classes = [permissions.DjangoModelPermissions]
def get_queryset(self):
return BasicModel.objects.all()
class EmptyListView(generics.ListCreateAPIView):
queryset = BasicModel.objects.none()
serializer_class = BasicSerializer
authentication_classes = [authentication.BasicAuthentication]
permission_classes = [permissions.DjangoModelPermissions]
root_view = RootView.as_view()
api_root_view = DefaultRouter().get_api_root_view()
instance_view = InstanceView.as_view()
get_queryset_list_view = GetQuerySetListView.as_view()
empty_list_view = EmptyListView.as_view()
def basic_auth_header(username, password):
credentials = ('%s:%s' % (username, password))
base64_credentials = base64.b64encode(credentials.encode(HTTP_HEADER_ENCODING)).decode(HTTP_HEADER_ENCODING)
return 'Basic %s' % base64_credentials
class ModelPermissionsIntegrationTests(TestCase):
def setUp(self):
User.objects.create_user('disallowed', '[email protected]', 'password')
user = User.objects.create_user('permitted', '[email protected]', 'password')
user.user_permissions = [
Permission.objects.get(codename='add_basicmodel'),
Permission.objects.get(codename='change_basicmodel'),
Permission.objects.get(codename='delete_basicmodel')
]
user = User.objects.create_user('updateonly', '[email protected]', 'password')
user.user_permissions = [
Permission.objects.get(codename='change_basicmodel'),
]
self.permitted_credentials = basic_auth_header('permitted', 'password')
self.disallowed_credentials = basic_auth_header('disallowed', 'password')
self.updateonly_credentials = basic_auth_header('updateonly', 'password')
BasicModel(text='foo').save()
def test_has_create_permissions(self):
request = factory.post('/', {'text': 'foobar'}, format='json',
HTTP_AUTHORIZATION=self.permitted_credentials)
response = root_view(request, pk=1)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_api_root_view_discard_default_django_model_permission(self):
"""
We check that DEFAULT_PERMISSION_CLASSES can
apply to APIRoot view. More specifically we check expected behavior of
``_ignore_model_permissions`` attribute support.
"""
request = factory.get('/', format='json',
HTTP_AUTHORIZATION=self.permitted_credentials)
request.resolver_match = ResolverMatch('get', (), {})
response = api_root_view(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_get_queryset_has_create_permissions(self):
request = factory.post('/', {'text': 'foobar'}, format='json',
HTTP_AUTHORIZATION=self.permitted_credentials)
response = get_queryset_list_view(request, pk=1)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_has_put_permissions(self):
request = factory.put('/1', {'text': 'foobar'}, format='json',
HTTP_AUTHORIZATION=self.permitted_credentials)
response = instance_view(request, pk='1')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_has_delete_permissions(self):
request = factory.delete('/1', HTTP_AUTHORIZATION=self.permitted_credentials)
response = instance_view(request, pk=1)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
def test_does_not_have_create_permissions(self):
request = factory.post('/', {'text': 'foobar'}, format='json',
HTTP_AUTHORIZATION=self.disallowed_credentials)
response = root_view(request, pk=1)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_does_not_have_put_permissions(self):
request = factory.put('/1', {'text': 'foobar'}, format='json',
HTTP_AUTHORIZATION=self.disallowed_credentials)
response = instance_view(request, pk='1')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_does_not_have_delete_permissions(self):
request = factory.delete('/1', HTTP_AUTHORIZATION=self.disallowed_credentials)
response = instance_view(request, pk=1)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_options_permitted(self):
request = factory.options(
'/',
HTTP_AUTHORIZATION=self.permitted_credentials
)
response = root_view(request, pk='1')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn('actions', response.data)
self.assertEqual(list(response.data['actions'].keys()), ['POST'])
request = factory.options(
'/1',
HTTP_AUTHORIZATION=self.permitted_credentials
)
response = instance_view(request, pk='1')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn('actions', response.data)
self.assertEqual(list(response.data['actions'].keys()), ['PUT'])
def test_options_disallowed(self):
request = factory.options(
'/',
HTTP_AUTHORIZATION=self.disallowed_credentials
)
response = root_view(request, pk='1')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertNotIn('actions', response.data)
request = factory.options(
'/1',
HTTP_AUTHORIZATION=self.disallowed_credentials
)
response = instance_view(request, pk='1')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertNotIn('actions', response.data)
def test_options_updateonly(self):
request = factory.options(
'/',
HTTP_AUTHORIZATION=self.updateonly_credentials
)
response = root_view(request, pk='1')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertNotIn('actions', response.data)
request = factory.options(
'/1',
HTTP_AUTHORIZATION=self.updateonly_credentials
)
response = instance_view(request, pk='1')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn('actions', response.data)
self.assertEqual(list(response.data['actions'].keys()), ['PUT'])
def test_empty_view_does_not_assert(self):
request = factory.get('/1', HTTP_AUTHORIZATION=self.permitted_credentials)
response = empty_list_view(request, pk=1)
self.assertEqual(response.status_code, status.HTTP_200_OK)
class BasicPermModel(models.Model):
text = models.CharField(max_length=100)
class Meta:
app_label = 'tests'
permissions = (
('view_basicpermmodel', 'Can view basic perm model'),
# add, change, delete built in to django
)
class BasicPermSerializer(serializers.ModelSerializer):
class Meta:
model = BasicPermModel
# Custom object-level permission, that includes 'view' permissions
class ViewObjectPermissions(permissions.DjangoObjectPermissions):
perms_map = {
'GET': ['%(app_label)s.view_%(model_name)s'],
'OPTIONS': ['%(app_label)s.view_%(model_name)s'],
'HEAD': ['%(app_label)s.view_%(model_name)s'],
'POST': ['%(app_label)s.add_%(model_name)s'],
'PUT': ['%(app_label)s.change_%(model_name)s'],
'PATCH': ['%(app_label)s.change_%(model_name)s'],
'DELETE': ['%(app_label)s.delete_%(model_name)s'],
}
class ObjectPermissionInstanceView(generics.RetrieveUpdateDestroyAPIView):
queryset = BasicPermModel.objects.all()
serializer_class = BasicPermSerializer
authentication_classes = [authentication.BasicAuthentication]
permission_classes = [ViewObjectPermissions]
object_permissions_view = ObjectPermissionInstanceView.as_view()
class ObjectPermissionListView(generics.ListAPIView):
queryset = BasicPermModel.objects.all()
serializer_class = BasicPermSerializer
authentication_classes = [authentication.BasicAuthentication]
permission_classes = [ViewObjectPermissions]
object_permissions_list_view = ObjectPermissionListView.as_view()
class GetQuerysetObjectPermissionInstanceView(generics.RetrieveUpdateDestroyAPIView):
serializer_class = BasicPermSerializer
authentication_classes = [authentication.BasicAuthentication]
permission_classes = [ViewObjectPermissions]
def get_queryset(self):
return BasicPermModel.objects.all()
get_queryset_object_permissions_view = GetQuerysetObjectPermissionInstanceView.as_view()
@unittest.skipUnless(guardian, 'django-guardian not installed')
class ObjectPermissionsIntegrationTests(TestCase):
"""
Integration tests for the object level permissions API.
"""
def setUp(self):
from guardian.shortcuts import assign_perm
# create users
create = User.objects.create_user
users = {
'fullaccess': create('fullaccess', '[email protected]', 'password'),
'readonly': create('readonly', '[email protected]', 'password'),
'writeonly': create('writeonly', '[email protected]', 'password'),
'deleteonly': create('deleteonly', '[email protected]', 'password'),
}
# give everyone model level permissions, as we are not testing those
everyone = Group.objects.create(name='everyone')
model_name = get_model_name(BasicPermModel)
app_label = BasicPermModel._meta.app_label
f = '{0}_{1}'.format
perms = {
'view': f('view', model_name),
'change': f('change', model_name),
'delete': f('delete', model_name)
}
for perm in perms.values():
perm = '{0}.{1}'.format(app_label, perm)
assign_perm(perm, everyone)
everyone.user_set.add(*users.values())
# appropriate object level permissions
readers = Group.objects.create(name='readers')
writers = Group.objects.create(name='writers')
deleters = Group.objects.create(name='deleters')
model = BasicPermModel.objects.create(text='foo')
assign_perm(perms['view'], readers, model)
assign_perm(perms['change'], writers, model)
assign_perm(perms['delete'], deleters, model)
readers.user_set.add(users['fullaccess'], users['readonly'])
writers.user_set.add(users['fullaccess'], users['writeonly'])
deleters.user_set.add(users['fullaccess'], users['deleteonly'])
self.credentials = {}
for user in users.values():
self.credentials[user.username] = basic_auth_header(user.username, 'password')
# Delete
def test_can_delete_permissions(self):
request = factory.delete('/1', HTTP_AUTHORIZATION=self.credentials['deleteonly'])
response = object_permissions_view(request, pk='1')
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
def test_cannot_delete_permissions(self):
request = factory.delete('/1', HTTP_AUTHORIZATION=self.credentials['readonly'])
response = object_permissions_view(request, pk='1')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
# Update
def test_can_update_permissions(self):
request = factory.patch(
'/1', {'text': 'foobar'}, format='json',
HTTP_AUTHORIZATION=self.credentials['writeonly']
)
response = object_permissions_view(request, pk='1')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('text'), 'foobar')
def test_cannot_update_permissions(self):
request = factory.patch(
'/1', {'text': 'foobar'}, format='json',
HTTP_AUTHORIZATION=self.credentials['deleteonly']
)
response = object_permissions_view(request, pk='1')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_cannot_update_permissions_non_existing(self):
request = factory.patch(
'/999', {'text': 'foobar'}, format='json',
HTTP_AUTHORIZATION=self.credentials['deleteonly']
)
response = object_permissions_view(request, pk='999')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
# Read
def test_can_read_permissions(self):
request = factory.get('/1', HTTP_AUTHORIZATION=self.credentials['readonly'])
response = object_permissions_view(request, pk='1')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_cannot_read_permissions(self):
request = factory.get('/1', HTTP_AUTHORIZATION=self.credentials['writeonly'])
response = object_permissions_view(request, pk='1')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_can_read_get_queryset_permissions(self):
"""
same as ``test_can_read_permissions`` but with a view
that rely on ``.get_queryset()`` instead of ``.queryset``.
"""
request = factory.get('/1', HTTP_AUTHORIZATION=self.credentials['readonly'])
response = get_queryset_object_permissions_view(request, pk='1')
self.assertEqual(response.status_code, status.HTTP_200_OK)
# Read list
def test_can_read_list_permissions(self):
request = factory.get('/', HTTP_AUTHORIZATION=self.credentials['readonly'])
object_permissions_list_view.cls.filter_backends = (DjangoObjectPermissionsFilter,)
response = object_permissions_list_view(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data[0].get('id'), 1)
def test_cannot_read_list_permissions(self):
request = factory.get('/', HTTP_AUTHORIZATION=self.credentials['writeonly'])
object_permissions_list_view.cls.filter_backends = (DjangoObjectPermissionsFilter,)
response = object_permissions_list_view(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertListEqual(response.data, [])
class BasicPerm(permissions.BasePermission):
def has_permission(self, request, view):
return False
class BasicPermWithDetail(permissions.BasePermission):
message = 'Custom: You cannot access this resource'
def has_permission(self, request, view):
return False
class BasicObjectPerm(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
return False
class BasicObjectPermWithDetail(permissions.BasePermission):
message = 'Custom: You cannot access this resource'
def has_object_permission(self, request, view, obj):
return False
class PermissionInstanceView(generics.RetrieveUpdateDestroyAPIView):
queryset = BasicModel.objects.all()
serializer_class = BasicSerializer
class DeniedView(PermissionInstanceView):
permission_classes = (BasicPerm,)
class DeniedViewWithDetail(PermissionInstanceView):
permission_classes = (BasicPermWithDetail,)
class DeniedObjectView(PermissionInstanceView):
permission_classes = (BasicObjectPerm,)
class DeniedObjectViewWithDetail(PermissionInstanceView):
permission_classes = (BasicObjectPermWithDetail,)
denied_view = DeniedView.as_view()
denied_view_with_detail = DeniedViewWithDetail.as_view()
denied_object_view = DeniedObjectView.as_view()
denied_object_view_with_detail = DeniedObjectViewWithDetail.as_view()
class CustomPermissionsTests(TestCase):
def setUp(self):
BasicModel(text='foo').save()
User.objects.create_user('username', '[email protected]', 'password')
credentials = basic_auth_header('username', 'password')
self.request = factory.get('/1', format='json', HTTP_AUTHORIZATION=credentials)
self.custom_message = 'Custom: You cannot access this resource'
def test_permission_denied(self):
response = denied_view(self.request, pk=1)
detail = response.data.get('detail')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertNotEqual(detail, self.custom_message)
def test_permission_denied_with_custom_detail(self):
response = denied_view_with_detail(self.request, pk=1)
detail = response.data.get('detail')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(detail, self.custom_message)
def test_permission_denied_for_object(self):
response = denied_object_view(self.request, pk=1)
detail = response.data.get('detail')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertNotEqual(detail, self.custom_message)
def test_permission_denied_for_object_with_custom_detail(self):
response = denied_object_view_with_detail(self.request, pk=1)
detail = response.data.get('detail')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(detail, self.custom_message)
|
bsd-2-clause
| -3,318,475,768,790,521,300 | -5,420,724,654,986,284,000 | 39.106383 | 112 | 0.676499 | false |
taigaio/taiga-back
|
taiga/projects/settings/migrations/0001_initial.py
|
1
|
1897
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2018-09-24 11:49
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import taiga.projects.settings.choices
class Migration(migrations.Migration):
initial = True
dependencies = [
('projects', '0061_auto_20180918_1355'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UserProjectSettings',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('homepage', models.SmallIntegerField(choices=[(taiga.projects.settings.choices.Section(1), 'Timeline'), (taiga.projects.settings.choices.Section(2), 'Epics'), (taiga.projects.settings.choices.Section(3), 'Backlog'), (taiga.projects.settings.choices.Section(4), 'Kanban'), (taiga.projects.settings.choices.Section(5), 'Issues'), (taiga.projects.settings.choices.Section(6), 'TeamWiki')], default=taiga.projects.settings.choices.Section(1))),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
('modified_at', models.DateTimeField()),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_project_settings', to='projects.Project')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_project_settings', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['created_at'],
},
),
migrations.AlterUniqueTogether(
name='userprojectsettings',
unique_together=set([('project', 'user')]),
),
]
|
agpl-3.0
| 3,027,821,932,241,282,600 | -742,281,520,489,120,300 | 46.425 | 457 | 0.651555 | false |
mogers/buck
|
third-party/nailgun/pynailgun/ng.py
|
17
|
19064
|
#!/usr/bin/env python
#
# Copyright 2004-2015, Martian Software, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ctypes
import platform
import optparse
import os
import os.path
import Queue
import select
import socket
import struct
import sys
from threading import Condition, Event, Thread
# @author <a href="http://www.martiansoftware.com/contact.html">Marty Lamb</a>
# @author Pete Kirkham (Win32 port)
# @author Ben Hamilton (Python port)
#
# Please try to keep this working on Python 2.6.
NAILGUN_VERSION = '0.9.0'
BUFSIZE = 2048
NAILGUN_PORT_DEFAULT = 2113
CHUNK_HEADER_LEN = 5
CHUNKTYPE_STDIN = '0'
CHUNKTYPE_STDOUT = '1'
CHUNKTYPE_STDERR = '2'
CHUNKTYPE_STDIN_EOF = '.'
CHUNKTYPE_ARG = 'A'
CHUNKTYPE_LONGARG = 'L'
CHUNKTYPE_ENV = 'E'
CHUNKTYPE_DIR = 'D'
CHUNKTYPE_CMD = 'C'
CHUNKTYPE_EXIT = 'X'
CHUNKTYPE_SENDINPUT = 'S'
CHUNKTYPE_HEARTBEAT = 'H'
NSEC_PER_SEC = 1000000000
# 500 ms heartbeat timeout
HEARTBEAT_TIMEOUT_NANOS = NSEC_PER_SEC / 2
HEARTBEAT_TIMEOUT_SECS = HEARTBEAT_TIMEOUT_NANOS / (NSEC_PER_SEC * 1.0)
# We need to support Python 2.6 hosts which lack memoryview().
import __builtin__
HAS_MEMORYVIEW = 'memoryview' in dir(__builtin__)
EVENT_STDIN_CHUNK = 0
EVENT_STDIN_CLOSED = 1
EVENT_STDIN_EXCEPTION = 2
class NailgunException(Exception):
SOCKET_FAILED = 231
CONNECT_FAILED = 230
UNEXPECTED_CHUNKTYPE = 229
CONNECTION_BROKEN = 227
def __init__(self, message, code):
self.message = message
self.code = code
def __str__(self):
return self.message
class NailgunConnection(object):
'''Stateful object holding the connection to the Nailgun server.'''
def __init__(
self,
server_name,
server_port=None,
stdin=sys.stdin,
stdout=sys.stdout,
stderr=sys.stderr,
cwd=None):
self.socket = make_nailgun_socket(server_name, server_port, cwd)
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.recv_flags = 0
self.send_flags = 0
if hasattr(socket, 'MSG_WAITALL'):
self.recv_flags |= socket.MSG_WAITALL
if hasattr(socket, 'MSG_NOSIGNAL'):
self.send_flags |= socket.MSG_NOSIGNAL
self.header_buf = ctypes.create_string_buffer(CHUNK_HEADER_LEN)
self.buf = ctypes.create_string_buffer(BUFSIZE)
self.ready_to_send_condition = Condition()
self.sendtime_nanos = 0
self.exit_code = None
self.stdin_queue = Queue.Queue()
self.shutdown_event = Event()
self.stdin_thread = Thread(
target=stdin_thread_main,
args=(self.stdin, self.stdin_queue, self.shutdown_event, self.ready_to_send_condition))
self.stdin_thread.daemon = True
def send_command(
self,
cmd,
cmd_args=[],
filearg=None,
env=os.environ,
cwd=os.getcwd()):
'''
Sends the command and environment to the nailgun server, then loops forever
reading the response until the server sends an exit chunk.
Returns the exit value, or raises NailgunException on error.
'''
try:
return self._send_command_and_read_response(cmd, cmd_args, filearg, env, cwd)
except socket.error as e:
raise NailgunException(
'Server disconnected unexpectedly: {0}'.format(e),
NailgunException.CONNECTION_BROKEN)
def _send_command_and_read_response(self, cmd, cmd_args, filearg, env, cwd):
if filearg:
send_file_arg(filearg, self)
for cmd_arg in cmd_args:
send_chunk(cmd_arg, CHUNKTYPE_ARG, self)
send_env_var('NAILGUN_FILESEPARATOR', os.sep, self)
send_env_var('NAILGUN_PATHSEPARATOR', os.pathsep, self)
send_tty_format(self.stdin, self)
send_tty_format(self.stdout, self)
send_tty_format(self.stderr, self)
for k, v in env.iteritems():
send_env_var(k, v, self)
send_chunk(cwd, CHUNKTYPE_DIR, self)
send_chunk(cmd, CHUNKTYPE_CMD, self)
self.stdin_thread.start()
while self.exit_code is None:
self._process_next_chunk()
self._check_stdin_queue()
self.shutdown_event.set()
with self.ready_to_send_condition:
self.ready_to_send_condition.notify()
# We can't really join on self.stdin_thread, since
# there's no way to interrupt its call to sys.stdin.readline.
return self.exit_code
def _process_next_chunk(self):
'''
Processes the next chunk from the nailgun server.
'''
select_list = set([self.socket])
readable, _, exceptional = select.select(
select_list, [], select_list, HEARTBEAT_TIMEOUT_SECS)
if self.socket in readable:
process_nailgun_stream(self)
now = monotonic_time_nanos()
if now - self.sendtime_nanos > HEARTBEAT_TIMEOUT_NANOS:
send_heartbeat(self)
if self.socket in exceptional:
raise NailgunException(
'Server disconnected in select',
NailgunException.CONNECTION_BROKEN)
def _check_stdin_queue(self):
'''Check if the stdin thread has read anything.'''
while not self.stdin_queue.empty():
try:
(event_type, event_arg) = self.stdin_queue.get_nowait()
if event_type == EVENT_STDIN_CHUNK:
send_chunk(event_arg, CHUNKTYPE_STDIN, self)
elif event_type == EVENT_STDIN_CLOSED:
send_chunk('', CHUNKTYPE_STDIN_EOF, self)
elif event_type == EVENT_STDIN_EXCEPTION:
raise event_arg
except Queue.Empty:
break
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
try:
self.socket.close()
except socket.error:
pass
def monotonic_time_nanos():
'''Returns a monotonically-increasing timestamp value in nanoseconds.
The epoch of the return value is undefined. To use this, you must call
it more than once and calculate the delta between two calls.
'''
# This function should be overwritten below on supported platforms.
raise Exception('Unsupported platform: ' + platform.system())
if platform.system() == 'Linux':
# From <linux/time.h>, available since 2.6.28 (released 24-Dec-2008).
CLOCK_MONOTONIC_RAW = 4
librt = ctypes.CDLL('librt.so.1', use_errno=True)
clock_gettime = librt.clock_gettime
class struct_timespec(ctypes.Structure):
_fields_ = [('tv_sec', ctypes.c_long), ('tv_nsec', ctypes.c_long)]
clock_gettime.argtypes = [ctypes.c_int, ctypes.POINTER(struct_timespec)]
def _monotonic_time_nanos_linux():
t = struct_timespec()
clock_gettime(CLOCK_MONOTONIC_RAW, ctypes.byref(t))
return t.tv_sec * NSEC_PER_SEC + t.tv_nsec
monotonic_time_nanos = _monotonic_time_nanos_linux
elif platform.system() == 'Darwin':
# From <mach/mach_time.h>
KERN_SUCCESS = 0
libSystem = ctypes.CDLL('/usr/lib/libSystem.dylib', use_errno=True)
mach_timebase_info = libSystem.mach_timebase_info
class struct_mach_timebase_info(ctypes.Structure):
_fields_ = [('numer', ctypes.c_uint32), ('denom', ctypes.c_uint32)]
mach_timebase_info.argtypes = [ctypes.POINTER(struct_mach_timebase_info)]
mach_ti = struct_mach_timebase_info()
ret = mach_timebase_info(ctypes.byref(mach_ti))
if ret != KERN_SUCCESS:
raise Exception('Could not get mach_timebase_info, error: ' + str(ret))
mach_absolute_time = libSystem.mach_absolute_time
mach_absolute_time.restype = ctypes.c_uint64
def _monotonic_time_nanos_darwin():
return (mach_absolute_time() * mach_ti.numer) / mach_ti.denom
monotonic_time_nanos = _monotonic_time_nanos_darwin
elif platform.system() == 'Windows':
# From <Winbase.h>
perf_frequency = ctypes.c_uint64()
ctypes.windll.kernel32.QueryPerformanceFrequency(ctypes.byref(perf_frequency))
def _monotonic_time_nanos_windows():
perf_counter = ctypes.c_uint64()
ctypes.windll.kernel32.QueryPerformanceCounter(ctypes.byref(perf_counter))
return perf_counter.value * NSEC_PER_SEC / perf_frequency.value
monotonic_time_nanos = _monotonic_time_nanos_windows
elif sys.platform == 'cygwin':
k32 = ctypes.CDLL('Kernel32', use_errno=True)
perf_frequency = ctypes.c_uint64()
k32.QueryPerformanceFrequency(ctypes.byref(perf_frequency))
def _monotonic_time_nanos_cygwin():
perf_counter = ctypes.c_uint64()
k32.QueryPerformanceCounter(ctypes.byref(perf_counter))
return perf_counter.value * NSEC_PER_SEC / perf_frequency.value
monotonic_time_nanos = _monotonic_time_nanos_cygwin
def send_chunk(buf, chunk_type, nailgun_connection):
'''
Sends a chunk noting the specified payload size and chunk type.
'''
struct.pack_into('>ic', nailgun_connection.header_buf, 0, len(buf), chunk_type)
nailgun_connection.sendtime_nanos = monotonic_time_nanos()
nailgun_connection.socket.sendall(
nailgun_connection.header_buf.raw,
nailgun_connection.send_flags)
nailgun_connection.socket.sendall(buf, nailgun_connection.send_flags)
def send_env_var(name, value, nailgun_connection):
'''
Sends an environment variable in KEY=VALUE format.
'''
send_chunk('='.join((name, value)), CHUNKTYPE_ENV, nailgun_connection)
def send_tty_format(f, nailgun_connection):
'''
Sends a NAILGUN_TTY_# environment variable.
'''
if not f or not hasattr(f, 'fileno'):
return
fileno = f.fileno()
isatty = os.isatty(fileno)
send_env_var('NAILGUN_TTY_' + str(fileno), str(int(isatty)), nailgun_connection)
def send_file_arg(filename, nailgun_connection):
'''
Sends the contents of a file to the server.
'''
with open(filename) as f:
while True:
num_bytes = f.readinto(nailgun_connection.buf)
if not num_bytes:
break
send_chunk(
nailgun_connection.buf.raw[:num_bytes], CHUNKTYPE_LONGARG, nailgun_connection)
def recv_to_fd(dest_file, num_bytes, nailgun_connection):
'''
Receives num_bytes bytes from the nailgun socket and copies them to the specified file
object. Used to route data to stdout or stderr on the client.
'''
bytes_read = 0
while bytes_read < num_bytes:
bytes_to_read = min(len(nailgun_connection.buf), num_bytes - bytes_read)
bytes_received = nailgun_connection.socket.recv_into(
nailgun_connection.buf,
bytes_to_read,
nailgun_connection.recv_flags)
if dest_file:
dest_file.write(nailgun_connection.buf[:bytes_received])
bytes_read += bytes_received
def recv_to_buffer(num_bytes, buf, nailgun_connection):
'''
Receives num_bytes from the nailgun socket and writes them into the specified buffer.
'''
# We'd love to use socket.recv_into() everywhere to avoid
# unnecessary copies, but we need to support Python 2.6. The
# only way to provide an offset to recv_into() is to use
# memoryview(), which doesn't exist until Python 2.7.
if HAS_MEMORYVIEW:
recv_into_memoryview(num_bytes, memoryview(buf), nailgun_connection)
else:
recv_to_buffer_with_copy(num_bytes, buf, nailgun_connection)
def recv_into_memoryview(num_bytes, buf_view, nailgun_connection):
'''
Receives num_bytes from the nailgun socket and writes them into the specified memoryview
to avoid an extra copy.
'''
bytes_read = 0
while bytes_read < num_bytes:
bytes_received = nailgun_connection.socket.recv_into(
buf_view[bytes_read:],
num_bytes - bytes_read,
nailgun_connection.recv_flags)
if not bytes_received:
raise NailgunException(
'Server unexpectedly disconnected in recv_into()',
NailgunException.CONNECTION_BROKEN)
bytes_read += bytes_received
def recv_to_buffer_with_copy(num_bytes, buf, nailgun_connection):
'''
Receives num_bytes from the nailgun socket and writes them into the specified buffer.
'''
bytes_read = 0
while bytes_read < num_bytes:
recv_buf = nailgun_connection.socket.recv(
num_bytes - bytes_read,
nailgun_connection.recv_flags)
if not len(recv_buf):
raise NailgunException(
'Server unexpectedly disconnected in recv()',
NailgunException.CONNECTION_BROKEN)
buf[bytes_read:bytes_read + len(recv_buf)] = recv_buf
bytes_read += len(recv_buf)
def process_exit(exit_len, nailgun_connection):
'''
Receives an exit code from the nailgun server and sets nailgun_connection.exit_code
to indicate the client should exit.
'''
num_bytes = min(len(nailgun_connection.buf), exit_len)
recv_to_buffer(num_bytes, nailgun_connection.buf, nailgun_connection)
nailgun_connection.exit_code = int(''.join(nailgun_connection.buf.raw[:num_bytes]))
def send_heartbeat(nailgun_connection):
'''
Sends a heartbeat to the nailgun server to indicate the client is still alive.
'''
try:
send_chunk('', CHUNKTYPE_HEARTBEAT, nailgun_connection)
except IOError as e:
# The Nailgun C client ignores SIGPIPE etc. on heartbeats,
# so we do too. (This typically happens when shutting down.)
pass
def stdin_thread_main(stdin, queue, shutdown_event, ready_to_send_condition):
if not stdin:
return
try:
while not shutdown_event.is_set():
with ready_to_send_condition:
ready_to_send_condition.wait()
if shutdown_event.is_set():
break
# This is a bit cheesy, but there isn't a great way to
# portably tell Python to read as much as possible on
# stdin without blocking.
buf = stdin.readline()
if buf == '':
queue.put((EVENT_STDIN_CLOSED, None))
break
queue.put((EVENT_STDIN_CHUNK, buf))
except Exception as e:
queue.put((EVENT_STDIN_EXCEPTION, e))
def process_nailgun_stream(nailgun_connection):
'''
Processes a single chunk from the nailgun server.
'''
recv_to_buffer(
len(nailgun_connection.header_buf), nailgun_connection.header_buf, nailgun_connection)
(chunk_len, chunk_type) = struct.unpack_from('>ic', nailgun_connection.header_buf.raw)
if chunk_type == CHUNKTYPE_STDOUT:
recv_to_fd(nailgun_connection.stdout, chunk_len, nailgun_connection)
elif chunk_type == CHUNKTYPE_STDERR:
recv_to_fd(nailgun_connection.stderr, chunk_len, nailgun_connection)
elif chunk_type == CHUNKTYPE_EXIT:
process_exit(chunk_len, nailgun_connection)
elif chunk_type == CHUNKTYPE_SENDINPUT:
with nailgun_connection.ready_to_send_condition:
# Wake up the stdin thread and tell it to read as much data as possible.
nailgun_connection.ready_to_send_condition.notify()
else:
raise NailgunException(
'Unexpected chunk type: {0}'.format(chunk_type),
NailgunException.UNEXPECTED_CHUNKTYPE)
def make_nailgun_socket(nailgun_server, nailgun_port=None, cwd=None):
'''
Creates and returns a socket connection to the nailgun server.
'''
s = None
if nailgun_server.startswith('local:'):
try:
s = socket.socket(socket.AF_UNIX)
except socket.error as msg:
raise NailgunException(
'Could not create local socket connection to server: {0}'.format(msg),
NailgunException.SOCKET_FAILED)
socket_addr = nailgun_server[6:]
prev_cwd = os.getcwd()
try:
if cwd is not None:
os.chdir(cwd)
s.connect(socket_addr)
except socket.error as msg:
raise NailgunException(
'Could not connect to local server at {0}: {1}'.format(socket_addr, msg),
NailgunException.CONNECT_FAILED)
finally:
if cwd is not None:
os.chdir(prev_cwd)
else:
socket_addr = nailgun_server
socket_family = socket.AF_UNSPEC
for (af, socktype, proto, _, sa) in socket.getaddrinfo(
nailgun_server, nailgun_port, socket.AF_UNSPEC, socket.SOCK_STREAM):
try:
s = socket.socket(af, socktype, proto)
except socket.error as msg:
s = None
continue
try:
s.connect(sa)
except socket.error as msg:
s.close()
s = None
continue
break
if s is None:
raise NailgunException(
'Could not connect to server {0}:{1}'.format(nailgun_server, nailgun_port),
NailgunException.NAILGUN_CONNECT_FAILED)
return s
def main():
'''
Main entry point to the nailgun client.
'''
default_nailgun_server = os.environ.get('NAILGUN_SERVER', '127.0.0.1')
default_nailgun_port = int(os.environ.get('NAILGUN_PORT', NAILGUN_PORT_DEFAULT))
parser = optparse.OptionParser(usage='%prog [options] cmd arg1 arg2 ...')
parser.add_option('--nailgun-server', default=default_nailgun_server)
parser.add_option('--nailgun-port', type='int', default=default_nailgun_port)
parser.add_option('--nailgun-filearg')
parser.add_option('--nailgun-showversion', action='store_true')
parser.add_option('--nailgun-help', action='help')
(options, args) = parser.parse_args()
if options.nailgun_showversion:
print 'NailGun client version ' + NAILGUN_VERSION
if len(args):
cmd = args.pop(0)
else:
cmd = os.path.basename(sys.argv[0])
# Pass any remaining command line arguments to the server.
cmd_args = args
try:
with NailgunConnection(
options.nailgun_server,
server_port=options.nailgun_port) as c:
exit_code = c.send_command(cmd, cmd_args, options.nailgun_filearg)
sys.exit(exit_code)
except NailgunException as e:
print >>sys.stderr, str(e)
sys.exit(e.code)
except KeyboardInterrupt as e:
pass
if __name__ == '__main__':
main()
|
apache-2.0
| 6,710,399,040,660,500,000 | 8,320,504,744,057,669,000 | 34.834586 | 99 | 0.632921 | false |
chooyan-eng/ChooyanHttp
|
http_client.py
|
1
|
2148
|
import socket
class ChooyanHttpClient:
def request(host, port=80):
response = ChooyanResponse()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
request_str = 'GET / HTTP/1.1\nHost: %s\r\n\r\n' % (host)
s.send(request_str.encode('utf-8'))
headerbuffer = ResponseBuffer()
allbuffer = ResponseBuffer()
while True:
chunk = s.recv(4096)
allbuffer.append(chunk)
if response.content_length == -1:
headerbuffer.append(chunk)
response.content_length = ChooyanHttpClient.parse_contentlength(headerbuffer)
else:
if len(allbuffer.get_body()) >= response.content_length:
break
response.body = allbuffer.get_body()
response.responce_code = 200
s.close()
return response
def parse_contentlength(buffer):
while True:
line = buffer.read_line()
if line.startswith('Content-Length'):
return int(line.replace('Content-Length: ', ''))
if line == None:
return -1
class ChooyanResponse:
def __init__(self):
self.responce_code = None
self.body = None
self.content_length = -1
class ResponseBuffer:
def __init__(self):
self.data = b''
def append(self, data):
self.data += data
def read_line(self):
if self.data == b'':
return None
end_index = self.data.find(b'\r\n')
if end_index == -1:
ret = self.data
self.data = b''
else:
ret = self.data[:end_index]
self.data = self.data[end_index + len(b'\r\n'):]
return ret.decode('utf-8')
def get_body(self):
body_index = self.data.find(b'\r\n\r\n')
if body_index == -1:
return None
else:
return self.data[body_index + len(b'\r\n\r\n'):]
if __name__ == '__main__':
resp = ChooyanHttpClient.request('www.hasam.jp', 80)
if resp.responce_code == 200:
print(resp.body)
|
apache-2.0
| -5,118,249,381,439,972,000 | -7,359,427,892,084,212,000 | 26.189873 | 93 | 0.53352 | false |
nttks/jenkins-test
|
cms/djangoapps/contentstore/views/entrance_exam.py
|
7
|
8293
|
"""
Entrance Exams view module -- handles all requests related to entrance exam management via Studio
Intended to be utilized as an AJAX callback handler, versus a proper view/screen
"""
import json
import logging
from django.contrib.auth.decorators import login_required
from django_future.csrf import ensure_csrf_cookie
from django.http import HttpResponse
from django.test import RequestFactory
from contentstore.views.item import create_item, delete_item
from milestones import api as milestones_api
from models.settings.course_metadata import CourseMetadata
from opaque_keys.edx.keys import CourseKey, UsageKey
from opaque_keys import InvalidKeyError
from student.auth import has_course_author_access
from util.milestones_helpers import generate_milestone_namespace, NAMESPACE_CHOICES
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError
from django.conf import settings
__all__ = ['entrance_exam', ]
log = logging.getLogger(__name__)
@login_required
@ensure_csrf_cookie
def entrance_exam(request, course_key_string):
"""
The restful handler for entrance exams.
It allows retrieval of all the assets (as an HTML page), as well as uploading new assets,
deleting assets, and changing the "locked" state of an asset.
GET
Retrieves the entrance exam module (metadata) for the specified course
POST
Adds an entrance exam module to the specified course.
DELETE
Removes the entrance exam from the course
"""
course_key = CourseKey.from_string(course_key_string)
# Deny access if the user is valid, but they lack the proper object access privileges
if not has_course_author_access(request.user, course_key):
return HttpResponse(status=403)
# Retrieve the entrance exam module for the specified course (returns 404 if none found)
if request.method == 'GET':
return _get_entrance_exam(request, course_key)
# Create a new entrance exam for the specified course (returns 201 if created)
elif request.method == 'POST':
response_format = request.REQUEST.get('format', 'html')
http_accept = request.META.get('http_accept')
if response_format == 'json' or 'application/json' in http_accept:
ee_min_score = request.POST.get('entrance_exam_minimum_score_pct', None)
# if request contains empty value or none then save the default one.
entrance_exam_minimum_score_pct = float(settings.ENTRANCE_EXAM_MIN_SCORE_PCT)
if ee_min_score != '' and ee_min_score is not None:
entrance_exam_minimum_score_pct = float(ee_min_score)
return create_entrance_exam(request, course_key, entrance_exam_minimum_score_pct)
return HttpResponse(status=400)
# Remove the entrance exam module for the specified course (returns 204 regardless of existence)
elif request.method == 'DELETE':
return delete_entrance_exam(request, course_key)
# No other HTTP verbs/methods are supported at this time
else:
return HttpResponse(status=405)
def create_entrance_exam(request, course_key, entrance_exam_minimum_score_pct):
"""
api method to create an entrance exam.
First clean out any old entrance exams.
"""
_delete_entrance_exam(request, course_key)
return _create_entrance_exam(
request=request,
course_key=course_key,
entrance_exam_minimum_score_pct=entrance_exam_minimum_score_pct
)
def _create_entrance_exam(request, course_key, entrance_exam_minimum_score_pct=None):
"""
Internal workflow operation to create an entrance exam
"""
# Provide a default value for the minimum score percent if nothing specified
if entrance_exam_minimum_score_pct is None:
entrance_exam_minimum_score_pct = float(settings.ENTRANCE_EXAM_MIN_SCORE_PCT)
# Confirm the course exists
course = modulestore().get_course(course_key)
if course is None:
return HttpResponse(status=400)
# Create the entrance exam item (currently it's just a chapter)
payload = {
'category': "chapter",
'display_name': "Entrance Exam",
'parent_locator': unicode(course.location),
'is_entrance_exam': True,
'in_entrance_exam': True,
}
factory = RequestFactory()
internal_request = factory.post('/', json.dumps(payload), content_type="application/json")
internal_request.user = request.user
created_item = json.loads(create_item(internal_request).content)
# Set the entrance exam metadata flags for this course
# Reload the course so we don't overwrite the new child reference
course = modulestore().get_course(course_key)
metadata = {
'entrance_exam_enabled': True,
'entrance_exam_minimum_score_pct': entrance_exam_minimum_score_pct / 100,
'entrance_exam_id': created_item['locator'],
}
CourseMetadata.update_from_dict(metadata, course, request.user)
# Add an entrance exam milestone if one does not already exist
milestone_namespace = generate_milestone_namespace(
NAMESPACE_CHOICES['ENTRANCE_EXAM'],
course_key
)
milestones = milestones_api.get_milestones(milestone_namespace)
if len(milestones):
milestone = milestones[0]
else:
description = 'Autogenerated during {} entrance exam creation.'.format(unicode(course.id))
milestone = milestones_api.add_milestone({
'name': 'Completed Course Entrance Exam',
'namespace': milestone_namespace,
'description': description
})
relationship_types = milestones_api.get_milestone_relationship_types()
milestones_api.add_course_milestone(
unicode(course.id),
relationship_types['REQUIRES'],
milestone
)
milestones_api.add_course_content_milestone(
unicode(course.id),
created_item['locator'],
relationship_types['FULFILLS'],
milestone
)
return HttpResponse(status=201)
def _get_entrance_exam(request, course_key): # pylint: disable=W0613
"""
Internal workflow operation to retrieve an entrance exam
"""
course = modulestore().get_course(course_key)
if course is None:
return HttpResponse(status=400)
if not getattr(course, 'entrance_exam_id'):
return HttpResponse(status=404)
try:
exam_key = UsageKey.from_string(course.entrance_exam_id)
except InvalidKeyError:
return HttpResponse(status=404)
try:
exam_descriptor = modulestore().get_item(exam_key)
return HttpResponse(
_serialize_entrance_exam(exam_descriptor),
status=200, mimetype='application/json')
except ItemNotFoundError:
return HttpResponse(status=404)
def delete_entrance_exam(request, course_key):
"""
api method to delete an entrance exam
"""
return _delete_entrance_exam(request=request, course_key=course_key)
def _delete_entrance_exam(request, course_key):
"""
Internal workflow operation to remove an entrance exam
"""
store = modulestore()
course = store.get_course(course_key)
if course is None:
return HttpResponse(status=400)
course_children = store.get_items(
course_key,
qualifiers={'category': 'chapter'}
)
for course_child in course_children:
if course_child.is_entrance_exam:
delete_item(request, course_child.scope_ids.usage_id)
milestones_api.remove_content_references(unicode(course_child.scope_ids.usage_id))
# Reset the entrance exam flags on the course
# Reload the course so we have the latest state
course = store.get_course(course_key)
if getattr(course, 'entrance_exam_id'):
metadata = {
'entrance_exam_enabled': False,
'entrance_exam_minimum_score_pct': None,
'entrance_exam_id': None,
}
CourseMetadata.update_from_dict(metadata, course, request.user)
return HttpResponse(status=204)
def _serialize_entrance_exam(entrance_exam_module):
"""
Internal helper to convert an entrance exam module/object into JSON
"""
return json.dumps({
'locator': unicode(entrance_exam_module.location)
})
|
agpl-3.0
| 6,055,174,946,302,910,000 | 2,076,344,890,735,272,700 | 36.022321 | 100 | 0.689256 | false |
amenonsen/ansible
|
lib/ansible/modules/network/aci/aci_contract_subject_to_filter.py
|
26
|
9088
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: aci_contract_subject_to_filter
short_description: Bind Contract Subjects to Filters (vz:RsSubjFiltAtt)
description:
- Bind Contract Subjects to Filters on Cisco ACI fabrics.
version_added: '2.4'
options:
contract:
description:
- The name of the contract.
type: str
aliases: [ contract_name ]
filter:
description:
- The name of the Filter to bind to the Subject.
type: str
aliases: [ filter_name ]
log:
description:
- Determines if the binding should be set to log.
- The APIC defaults to C(none) when unset during creation.
type: str
choices: [ log, none ]
aliases: [ directive ]
subject:
description:
- The name of the Contract Subject.
type: str
aliases: [ contract_subject, subject_name ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
type: str
choices: [ absent, present, query ]
default: present
tenant:
description:
- The name of the tenant.
type: str
required: yes
aliases: [ tenant_name ]
extends_documentation_fragment: aci
notes:
- The C(tenant), C(contract), C(subject), and C(filter_name) must exist before using this module in your playbook.
The M(aci_tenant), M(aci_contract), M(aci_contract_subject), and M(aci_filter) modules can be used for these.
seealso:
- module: aci_contract_subject
- module: aci_filter
- name: APIC Management Information Model reference
description: More information about the internal APIC class B(vz:RsSubjFiltAtt).
link: https://developer.cisco.com/docs/apic-mim-ref/
author:
- Jacob McGill (@jmcgill298)
'''
EXAMPLES = r'''
- name: Add a new contract subject to filer binding
aci_contract_subject_to_filter:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
contract: web_to_db
subject: test
filter: '{{ filter }}'
log: '{{ log }}'
state: present
delegate_to: localhost
- name: Remove an existing contract subject to filter binding
aci_contract_subject_to_filter:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
contract: web_to_db
subject: test
filter: '{{ filter }}'
log: '{{ log }}'
state: present
delegate_to: localhost
- name: Query a specific contract subject to filter binding
aci_contract_subject_to_filter:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
contract: web_to_db
subject: test
filter: '{{ filter }}'
state: query
delegate_to: localhost
register: query_result
- name: Query all contract subject to filter bindings
aci_contract_subject_to_filter:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
contract: web_to_db
subject: test
state: query
delegate_to: localhost
register: query_result
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: str
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: str
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: str
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: str
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: str
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
contract=dict(type='str', aliases=['contract_name']), # Not required for querying all objects
filter=dict(type='str', aliases=['filter_name']), # Not required for querying all objects
subject=dict(type='str', aliases=['contract_subject', 'subject_name']), # Not required for querying all objects
tenant=dict(type='str', aliases=['tenant_name']), # Not required for querying all objects
log=dict(tyep='str', choices=['log', 'none'], aliases=['directive']),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['contract', 'filter', 'subject', 'tenant']],
['state', 'present', ['contract', 'filter', 'subject', 'tenant']],
],
)
contract = module.params['contract']
filter_name = module.params['filter']
log = module.params['log']
subject = module.params['subject']
tenant = module.params['tenant']
state = module.params['state']
# Add subject_filter key to modul.params for building the URL
module.params['subject_filter'] = filter_name
# Convert log to empty string if none, as that is what API expects. An empty string is not a good option to present the user.
if log == 'none':
log = ''
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class='fvTenant',
aci_rn='tn-{0}'.format(tenant),
module_object=tenant,
target_filter={'name': tenant},
),
subclass_1=dict(
aci_class='vzBrCP',
aci_rn='brc-{0}'.format(contract),
module_object=contract,
target_filter={'name': contract},
),
subclass_2=dict(
aci_class='vzSubj',
aci_rn='subj-{0}'.format(subject),
module_object=subject,
target_filter={'name': subject},
),
subclass_3=dict(
aci_class='vzRsSubjFiltAtt',
aci_rn='rssubjFiltAtt-{0}'.format(filter_name),
module_object=filter_name,
target_filter={'tnVzFilterName': filter_name},
),
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='vzRsSubjFiltAtt',
class_config=dict(
tnVzFilterName=filter_name,
directives=log,
),
)
aci.get_diff(aci_class='vzRsSubjFiltAtt')
aci.post_config()
elif state == 'absent':
aci.delete_config()
# Remove subject_filter used to build URL from module.params
module.params.pop('subject_filter')
aci.exit_json()
if __name__ == "__main__":
main()
|
gpl-3.0
| 131,075,660,908,415,220 | -7,803,309,092,659,697,000 | 27.489028 | 141 | 0.607614 | false |
mollstam/UnrealPy
|
UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/django-1.8.2/tests/auth_tests/test_hashers.py
|
12
|
14727
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from unittest import skipUnless
from django.conf.global_settings import PASSWORD_HASHERS
from django.contrib.auth.hashers import (
UNUSABLE_PASSWORD_PREFIX, UNUSABLE_PASSWORD_SUFFIX_LENGTH,
BasePasswordHasher, PBKDF2PasswordHasher, PBKDF2SHA1PasswordHasher,
check_password, get_hasher, identify_hasher, is_password_usable,
make_password,
)
from django.test import SimpleTestCase
from django.test.utils import override_settings
from django.utils import six
try:
import crypt
except ImportError:
crypt = None
try:
import bcrypt
except ImportError:
bcrypt = None
class PBKDF2SingleIterationHasher(PBKDF2PasswordHasher):
iterations = 1
@override_settings(PASSWORD_HASHERS=PASSWORD_HASHERS)
class TestUtilsHashPass(SimpleTestCase):
def test_simple(self):
encoded = make_password('lètmein')
self.assertTrue(encoded.startswith('pbkdf2_sha256$'))
self.assertTrue(is_password_usable(encoded))
self.assertTrue(check_password('lètmein', encoded))
self.assertFalse(check_password('lètmeinz', encoded))
# Blank passwords
blank_encoded = make_password('')
self.assertTrue(blank_encoded.startswith('pbkdf2_sha256$'))
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password('', blank_encoded))
self.assertFalse(check_password(' ', blank_encoded))
def test_pkbdf2(self):
encoded = make_password('lètmein', 'seasalt', 'pbkdf2_sha256')
self.assertEqual(encoded,
'pbkdf2_sha256$20000$seasalt$oBSd886ysm3AqYun62DOdin8YcfbU1z9cksZSuLP9r0=')
self.assertTrue(is_password_usable(encoded))
self.assertTrue(check_password('lètmein', encoded))
self.assertFalse(check_password('lètmeinz', encoded))
self.assertEqual(identify_hasher(encoded).algorithm, "pbkdf2_sha256")
# Blank passwords
blank_encoded = make_password('', 'seasalt', 'pbkdf2_sha256')
self.assertTrue(blank_encoded.startswith('pbkdf2_sha256$'))
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password('', blank_encoded))
self.assertFalse(check_password(' ', blank_encoded))
def test_sha1(self):
encoded = make_password('lètmein', 'seasalt', 'sha1')
self.assertEqual(encoded,
'sha1$seasalt$cff36ea83f5706ce9aa7454e63e431fc726b2dc8')
self.assertTrue(is_password_usable(encoded))
self.assertTrue(check_password('lètmein', encoded))
self.assertFalse(check_password('lètmeinz', encoded))
self.assertEqual(identify_hasher(encoded).algorithm, "sha1")
# Blank passwords
blank_encoded = make_password('', 'seasalt', 'sha1')
self.assertTrue(blank_encoded.startswith('sha1$'))
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password('', blank_encoded))
self.assertFalse(check_password(' ', blank_encoded))
def test_md5(self):
encoded = make_password('lètmein', 'seasalt', 'md5')
self.assertEqual(encoded,
'md5$seasalt$3f86d0d3d465b7b458c231bf3555c0e3')
self.assertTrue(is_password_usable(encoded))
self.assertTrue(check_password('lètmein', encoded))
self.assertFalse(check_password('lètmeinz', encoded))
self.assertEqual(identify_hasher(encoded).algorithm, "md5")
# Blank passwords
blank_encoded = make_password('', 'seasalt', 'md5')
self.assertTrue(blank_encoded.startswith('md5$'))
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password('', blank_encoded))
self.assertFalse(check_password(' ', blank_encoded))
def test_unsalted_md5(self):
encoded = make_password('lètmein', '', 'unsalted_md5')
self.assertEqual(encoded, '88a434c88cca4e900f7874cd98123f43')
self.assertTrue(is_password_usable(encoded))
self.assertTrue(check_password('lètmein', encoded))
self.assertFalse(check_password('lètmeinz', encoded))
self.assertEqual(identify_hasher(encoded).algorithm, "unsalted_md5")
# Alternate unsalted syntax
alt_encoded = "md5$$%s" % encoded
self.assertTrue(is_password_usable(alt_encoded))
self.assertTrue(check_password('lètmein', alt_encoded))
self.assertFalse(check_password('lètmeinz', alt_encoded))
# Blank passwords
blank_encoded = make_password('', '', 'unsalted_md5')
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password('', blank_encoded))
self.assertFalse(check_password(' ', blank_encoded))
def test_unsalted_sha1(self):
encoded = make_password('lètmein', '', 'unsalted_sha1')
self.assertEqual(encoded, 'sha1$$6d138ca3ae545631b3abd71a4f076ce759c5700b')
self.assertTrue(is_password_usable(encoded))
self.assertTrue(check_password('lètmein', encoded))
self.assertFalse(check_password('lètmeinz', encoded))
self.assertEqual(identify_hasher(encoded).algorithm, "unsalted_sha1")
# Raw SHA1 isn't acceptable
alt_encoded = encoded[6:]
self.assertFalse(check_password('lètmein', alt_encoded))
# Blank passwords
blank_encoded = make_password('', '', 'unsalted_sha1')
self.assertTrue(blank_encoded.startswith('sha1$'))
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password('', blank_encoded))
self.assertFalse(check_password(' ', blank_encoded))
@skipUnless(crypt, "no crypt module to generate password.")
def test_crypt(self):
encoded = make_password('lètmei', 'ab', 'crypt')
self.assertEqual(encoded, 'crypt$$ab1Hv2Lg7ltQo')
self.assertTrue(is_password_usable(encoded))
self.assertTrue(check_password('lètmei', encoded))
self.assertFalse(check_password('lètmeiz', encoded))
self.assertEqual(identify_hasher(encoded).algorithm, "crypt")
# Blank passwords
blank_encoded = make_password('', 'ab', 'crypt')
self.assertTrue(blank_encoded.startswith('crypt$'))
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password('', blank_encoded))
self.assertFalse(check_password(' ', blank_encoded))
@skipUnless(bcrypt, "bcrypt not installed")
def test_bcrypt_sha256(self):
encoded = make_password('lètmein', hasher='bcrypt_sha256')
self.assertTrue(is_password_usable(encoded))
self.assertTrue(encoded.startswith('bcrypt_sha256$'))
self.assertTrue(check_password('lètmein', encoded))
self.assertFalse(check_password('lètmeinz', encoded))
self.assertEqual(identify_hasher(encoded).algorithm, "bcrypt_sha256")
# Verify that password truncation no longer works
password = ('VSK0UYV6FFQVZ0KG88DYN9WADAADZO1CTSIVDJUNZSUML6IBX7LN7ZS3R5'
'JGB3RGZ7VI7G7DJQ9NI8BQFSRPTG6UWTTVESA5ZPUN')
encoded = make_password(password, hasher='bcrypt_sha256')
self.assertTrue(check_password(password, encoded))
self.assertFalse(check_password(password[:72], encoded))
# Blank passwords
blank_encoded = make_password('', hasher='bcrypt_sha256')
self.assertTrue(blank_encoded.startswith('bcrypt_sha256$'))
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password('', blank_encoded))
self.assertFalse(check_password(' ', blank_encoded))
@skipUnless(bcrypt, "bcrypt not installed")
def test_bcrypt(self):
encoded = make_password('lètmein', hasher='bcrypt')
self.assertTrue(is_password_usable(encoded))
self.assertTrue(encoded.startswith('bcrypt$'))
self.assertTrue(check_password('lètmein', encoded))
self.assertFalse(check_password('lètmeinz', encoded))
self.assertEqual(identify_hasher(encoded).algorithm, "bcrypt")
# Blank passwords
blank_encoded = make_password('', hasher='bcrypt')
self.assertTrue(blank_encoded.startswith('bcrypt$'))
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password('', blank_encoded))
self.assertFalse(check_password(' ', blank_encoded))
def test_unusable(self):
encoded = make_password(None)
self.assertEqual(len(encoded), len(UNUSABLE_PASSWORD_PREFIX) + UNUSABLE_PASSWORD_SUFFIX_LENGTH)
self.assertFalse(is_password_usable(encoded))
self.assertFalse(check_password(None, encoded))
self.assertFalse(check_password(encoded, encoded))
self.assertFalse(check_password(UNUSABLE_PASSWORD_PREFIX, encoded))
self.assertFalse(check_password('', encoded))
self.assertFalse(check_password('lètmein', encoded))
self.assertFalse(check_password('lètmeinz', encoded))
self.assertRaises(ValueError, identify_hasher, encoded)
# Assert that the unusable passwords actually contain a random part.
# This might fail one day due to a hash collision.
self.assertNotEqual(encoded, make_password(None), "Random password collision?")
def test_unspecified_password(self):
"""
Makes sure specifying no plain password with a valid encoded password
returns `False`.
"""
self.assertFalse(check_password(None, make_password('lètmein')))
def test_bad_algorithm(self):
with self.assertRaises(ValueError):
make_password('lètmein', hasher='lolcat')
self.assertRaises(ValueError, identify_hasher, "lolcat$salt$hash")
def test_bad_encoded(self):
self.assertFalse(is_password_usable('lètmein_badencoded'))
self.assertFalse(is_password_usable(''))
def test_low_level_pkbdf2(self):
hasher = PBKDF2PasswordHasher()
encoded = hasher.encode('lètmein', 'seasalt2')
self.assertEqual(encoded,
'pbkdf2_sha256$20000$seasalt2$Flpve/uAcyo6+IFI6YAhjeABGPVbRQjzHDxRhqxewgw=')
self.assertTrue(hasher.verify('lètmein', encoded))
def test_low_level_pbkdf2_sha1(self):
hasher = PBKDF2SHA1PasswordHasher()
encoded = hasher.encode('lètmein', 'seasalt2')
self.assertEqual(encoded,
'pbkdf2_sha1$20000$seasalt2$pJt86NmjAweBY1StBvxCu7l1o9o=')
self.assertTrue(hasher.verify('lètmein', encoded))
def test_upgrade(self):
self.assertEqual('pbkdf2_sha256', get_hasher('default').algorithm)
for algo in ('sha1', 'md5'):
encoded = make_password('lètmein', hasher=algo)
state = {'upgraded': False}
def setter(password):
state['upgraded'] = True
self.assertTrue(check_password('lètmein', encoded, setter))
self.assertTrue(state['upgraded'])
def test_no_upgrade(self):
encoded = make_password('lètmein')
state = {'upgraded': False}
def setter():
state['upgraded'] = True
self.assertFalse(check_password('WRONG', encoded, setter))
self.assertFalse(state['upgraded'])
def test_no_upgrade_on_incorrect_pass(self):
self.assertEqual('pbkdf2_sha256', get_hasher('default').algorithm)
for algo in ('sha1', 'md5'):
encoded = make_password('lètmein', hasher=algo)
state = {'upgraded': False}
def setter():
state['upgraded'] = True
self.assertFalse(check_password('WRONG', encoded, setter))
self.assertFalse(state['upgraded'])
def test_pbkdf2_upgrade(self):
hasher = get_hasher('default')
self.assertEqual('pbkdf2_sha256', hasher.algorithm)
self.assertNotEqual(hasher.iterations, 1)
old_iterations = hasher.iterations
try:
# Generate a password with 1 iteration.
hasher.iterations = 1
encoded = make_password('letmein')
algo, iterations, salt, hash = encoded.split('$', 3)
self.assertEqual(iterations, '1')
state = {'upgraded': False}
def setter(password):
state['upgraded'] = True
# Check that no upgrade is triggered
self.assertTrue(check_password('letmein', encoded, setter))
self.assertFalse(state['upgraded'])
# Revert to the old iteration count and ...
hasher.iterations = old_iterations
# ... check if the password would get updated to the new iteration count.
self.assertTrue(check_password('letmein', encoded, setter))
self.assertTrue(state['upgraded'])
finally:
hasher.iterations = old_iterations
def test_pbkdf2_upgrade_new_hasher(self):
hasher = get_hasher('default')
self.assertEqual('pbkdf2_sha256', hasher.algorithm)
self.assertNotEqual(hasher.iterations, 1)
state = {'upgraded': False}
def setter(password):
state['upgraded'] = True
with self.settings(PASSWORD_HASHERS=[
'auth_tests.test_hashers.PBKDF2SingleIterationHasher']):
encoded = make_password('letmein')
algo, iterations, salt, hash = encoded.split('$', 3)
self.assertEqual(iterations, '1')
# Check that no upgrade is triggered
self.assertTrue(check_password('letmein', encoded, setter))
self.assertFalse(state['upgraded'])
# Revert to the old iteration count and check if the password would get
# updated to the new iteration count.
with self.settings(PASSWORD_HASHERS=[
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'auth_tests.test_hashers.PBKDF2SingleIterationHasher']):
self.assertTrue(check_password('letmein', encoded, setter))
self.assertTrue(state['upgraded'])
def test_load_library_no_algorithm(self):
with self.assertRaises(ValueError) as e:
BasePasswordHasher()._load_library()
self.assertEqual("Hasher 'BasePasswordHasher' doesn't specify a "
"library attribute", str(e.exception))
def test_load_library_importerror(self):
PlainHasher = type(str('PlainHasher'), (BasePasswordHasher,),
{'algorithm': 'plain', 'library': 'plain'})
# Python 3.3 adds quotes around module name
with six.assertRaisesRegex(self, ValueError,
"Couldn't load 'PlainHasher' algorithm library: No module named '?plain'?"):
PlainHasher()._load_library()
|
mit
| -3,115,346,084,628,662,300 | -7,885,698,630,694,761,000 | 43.905199 | 103 | 0.656429 | false |
PennPanda/xenproject
|
tools/libxl/gentypes.py
|
8
|
27441
|
#!/usr/bin/python
import sys
import re
import idl
def libxl_C_instance_of(ty, instancename):
if isinstance(ty, idl.Aggregate) and ty.typename is None:
if instancename is None:
return libxl_C_type_define(ty)
else:
return libxl_C_type_define(ty) + " " + instancename
s = ""
if isinstance(ty, idl.Array):
s += libxl_C_instance_of(ty.lenvar.type, ty.lenvar.name) + ";\n"
return s + ty.typename + " " + instancename
def libxl_C_type_define(ty, indent = ""):
s = ""
if isinstance(ty, idl.Enumeration):
if ty.typename is None:
s += "enum {\n"
else:
s += "typedef enum %s {\n" % ty.typename
for v in ty.values:
x = "%s = %d" % (v.name, v.value)
x = x.replace("\n", "\n ")
s += " " + x + ",\n"
if ty.typename is None:
s += "}"
else:
s += "} %s" % ty.typename
elif isinstance(ty, idl.Aggregate):
if isinstance(ty, idl.KeyedUnion):
s += libxl_C_instance_of(ty.keyvar.type, ty.keyvar.name) + ";\n"
if ty.typename is None:
s += "%s {\n" % ty.kind
else:
s += "typedef %s %s {\n" % (ty.kind, ty.typename)
for f in ty.fields:
if isinstance(ty, idl.KeyedUnion) and f.type is None: continue
x = libxl_C_instance_of(f.type, f.name)
if f.const:
x = "const " + x
x = x.replace("\n", "\n ")
s += " " + x + ";\n"
if ty.typename is None:
s += "}"
else:
s += "} %s" % ty.typename
else:
raise NotImplementedError("%s" % type(ty))
return s.replace("\n", "\n%s" % indent)
def libxl_C_type_dispose(ty, v, indent = " ", parent = None):
s = ""
if isinstance(ty, idl.KeyedUnion):
if parent is None:
raise Exception("KeyedUnion type must have a parent")
s += "switch (%s) {\n" % (parent + ty.keyvar.name)
for f in ty.fields:
(nparent,fexpr) = ty.member(v, f, parent is None)
s += "case %s:\n" % f.enumname
if f.type is not None:
s += libxl_C_type_dispose(f.type, fexpr, indent + " ", nparent)
s += " break;\n"
s += "}\n"
elif isinstance(ty, idl.Array):
if parent is None:
raise Exception("Array type must have a parent")
if ty.elem_type.dispose_fn is not None:
s += "{\n"
s += " int i;\n"
s += " for (i=0; i<%s; i++)\n" % (parent + ty.lenvar.name)
s += libxl_C_type_dispose(ty.elem_type, v+"[i]",
indent + " ", parent)
if ty.dispose_fn is not None:
if ty.elem_type.dispose_fn is not None:
s += " "
s += "%s(%s);\n" % (ty.dispose_fn, ty.pass_arg(v, parent is None))
if ty.elem_type.dispose_fn is not None:
s += "}\n"
elif isinstance(ty, idl.Struct) and (parent is None or ty.dispose_fn is None):
for f in [f for f in ty.fields if not f.const]:
(nparent,fexpr) = ty.member(v, f, parent is None)
s += libxl_C_type_dispose(f.type, fexpr, "", nparent)
else:
if ty.dispose_fn is not None:
s += "%s(%s);\n" % (ty.dispose_fn, ty.pass_arg(v, parent is None))
if s != "":
s = indent + s
return s.replace("\n", "\n%s" % indent).rstrip(indent)
def libxl_C_type_copy(ty, v, w, indent = " ", vparent = None, wparent = None):
s = ""
if vparent is None:
s += "GC_INIT(ctx);\n";
if isinstance(ty, idl.KeyedUnion):
if vparent is None or wparent is None:
raise Exception("KeyedUnion type must have a parent")
s += "%s = %s;\n" % ((vparent + ty.keyvar.name), (wparent + ty.keyvar.name))
s += "switch (%s) {\n" % (wparent + ty.keyvar.name)
for f in ty.fields:
(vnparent,vfexpr) = ty.member(v, f, vparent is None)
(wnparent,wfexpr) = ty.member(w, f, wparent is None)
s += "case %s:\n" % f.enumname
if f.type is not None:
s += libxl_C_type_copy(f.type, vfexpr, wfexpr, indent + " ",
vnparent, wnparent)
s += " break;\n"
s += "}\n"
elif isinstance(ty, idl.Array):
if vparent is None or wparent is None:
raise Exception("Array type must have a parent")
s += "%s = libxl__calloc(NOGC, %s, sizeof(*%s));\n" % (ty.pass_arg(v, vparent is None),
(wparent + ty.lenvar.name),
ty.pass_arg(w, wparent is None))
s += "%s = %s;\n" % ((vparent + ty.lenvar.name), (wparent + ty.lenvar.name))
s += "{\n"
s += " int i;\n"
s += " for (i=0; i<%s; i++)\n" % (wparent + ty.lenvar.name)
s += libxl_C_type_copy(ty.elem_type, v+"[i]", w+"[i]",
indent + " ", vparent, wparent)
s += "}\n"
elif isinstance(ty, idl.Struct) and ((vparent is None and wparent is None) or ty.copy_fn is None):
for f in [f for f in ty.fields if not f.const and not f.type.private]:
(vnparent,vfexpr) = ty.member(v, f, vparent is None)
(wnparent,wfexpr) = ty.member(w, f, wparent is None)
s += libxl_C_type_copy(f.type, vfexpr, wfexpr, "", vnparent, wnparent)
else:
if ty.copy_fn is not None:
s += "%s(ctx, %s, %s);\n" % (ty.copy_fn,
ty.pass_arg(v, vparent is None, passby=idl.PASS_BY_REFERENCE),
ty.pass_arg(w, wparent is None, passby=idl.PASS_BY_REFERENCE))
else:
s += "%s = %s;\n" % (ty.pass_arg(v, vparent is None, passby=idl.PASS_BY_VALUE),
ty.pass_arg(w, wparent is None, passby=idl.PASS_BY_VALUE))
if vparent is None:
s += "GC_FREE;\n"
if s != "":
s = indent + s
return s.replace("\n", "\n%s" % indent).rstrip(indent)
def libxl_init_members(ty, nesting = 0):
"""Returns a list of members of ty which require a separate init"""
if isinstance(ty, idl.Aggregate):
return [f for f in ty.fields if not f.const and isinstance(f.type,idl.KeyedUnion)]
else:
return []
def _libxl_C_type_init(ty, v, indent = " ", parent = None, subinit=False):
s = ""
if isinstance(ty, idl.KeyedUnion):
if parent is None:
raise Exception("KeyedUnion type must have a parent")
if subinit:
s += "switch (%s) {\n" % (parent + ty.keyvar.name)
for f in ty.fields:
(nparent,fexpr) = ty.member(v, f, parent is None)
s += "case %s:\n" % f.enumname
if f.type is not None:
s += _libxl_C_type_init(f.type, fexpr, " ", nparent)
s += " break;\n"
s += "}\n"
else:
if ty.keyvar.init_val:
s += "%s = %s;\n" % (parent + ty.keyvar.name, ty.keyvar.init_val)
elif ty.keyvar.type.init_val:
s += "%s = %s;\n" % (parent + ty.keyvar.name, ty.keyvar.type.init_val)
elif isinstance(ty, idl.Struct) and (parent is None or ty.init_fn is None):
for f in [f for f in ty.fields if not f.const]:
(nparent,fexpr) = ty.member(v, f, parent is None)
if f.init_val is not None:
s += "%s = %s;\n" % (fexpr, f.init_val)
else:
s += _libxl_C_type_init(f.type, fexpr, "", nparent)
else:
if ty.init_val is not None:
s += "%s = %s;\n" % (ty.pass_arg(v, parent is None), ty.init_val)
elif ty.init_fn is not None:
s += "%s(%s);\n" % (ty.init_fn, ty.pass_arg(v, parent is None))
if s != "":
s = indent + s
return s.replace("\n", "\n%s" % indent).rstrip(indent)
def libxl_C_type_init(ty):
s = ""
s += "void %s(%s)\n" % (ty.init_fn, ty.make_arg("p", passby=idl.PASS_BY_REFERENCE))
s += "{\n"
s += " memset(p, '\\0', sizeof(*p));\n"
s += _libxl_C_type_init(ty, "p")
s += "}\n"
s += "\n"
return s
def libxl_C_type_member_init(ty, field):
if not isinstance(field.type, idl.KeyedUnion):
raise Exception("Only KeyedUnion is supported for member init")
ku = field.type
s = ""
s += "void %s(%s, %s)\n" % (ty.init_fn + "_" + ku.keyvar.name,
ty.make_arg("p", passby=idl.PASS_BY_REFERENCE),
ku.keyvar.type.make_arg(ku.keyvar.name))
s += "{\n"
if ku.keyvar.init_val is not None:
init_val = ku.keyvar.init_val
elif ku.keyvar.type.init_val is not None:
init_val = ku.keyvar.type.init_val
else:
init_val = None
(nparent,fexpr) = ty.member(ty.pass_arg("p"), ku.keyvar, isref=True)
if init_val is not None:
s += " assert(%s == %s);\n" % (fexpr, init_val)
else:
s += " assert(!%s);\n" % (fexpr)
s += " %s = %s;\n" % (fexpr, ku.keyvar.name)
(nparent,fexpr) = ty.member(ty.pass_arg("p"), field, isref=True)
s += _libxl_C_type_init(ku, fexpr, parent=nparent, subinit=True)
s += "}\n"
s += "\n"
return s
def libxl_C_type_gen_map_key(f, parent, indent = ""):
s = ""
if isinstance(f.type, idl.KeyedUnion):
s += "switch (%s) {\n" % (parent + f.type.keyvar.name)
for x in f.type.fields:
v = f.type.keyvar.name + "." + x.name
s += "case %s:\n" % x.enumname
s += " s = yajl_gen_string(hand, (const unsigned char *)\"%s\", sizeof(\"%s\")-1);\n" % (v, v)
s += " if (s != yajl_gen_status_ok)\n"
s += " goto out;\n"
s += " break;\n"
s += "}\n"
else:
s += "s = yajl_gen_string(hand, (const unsigned char *)\"%s\", sizeof(\"%s\")-1);\n" % (f.name, f.name)
s += "if (s != yajl_gen_status_ok)\n"
s += " goto out;\n"
if s != "":
s = indent + s
return s.replace("\n", "\n%s" % indent).rstrip(indent)
def get_init_val(f):
if f.init_val is not None:
return f.init_val
elif f.type.init_val is not None:
return f.type.init_val
return None
def get_default_expr(f, nparent, fexpr):
if isinstance(f.type, idl.Aggregate):
return "1 /* always generate JSON output for aggregate type */"
if isinstance(f.type, idl.Array):
return "%s && %s" % (fexpr, nparent + f.type.lenvar.name)
init_val = get_init_val(f)
if init_val is not None:
return "%s != %s" % (fexpr, init_val)
if f.type.check_default_fn:
return "!%s(&%s)" % (f.type.check_default_fn, fexpr)
return "%s" % fexpr
def libxl_C_type_gen_json(ty, v, indent = " ", parent = None):
s = ""
if parent is None:
s += "yajl_gen_status s;\n"
if isinstance(ty, idl.Array):
if parent is None:
raise Exception("Array type must have a parent")
s += "{\n"
s += " int i;\n"
s += " s = yajl_gen_array_open(hand);\n"
s += " if (s != yajl_gen_status_ok)\n"
s += " goto out;\n"
s += " for (i=0; i<%s; i++) {\n" % (parent + ty.lenvar.name)
s += libxl_C_type_gen_json(ty.elem_type, v+"[i]",
indent + " ", parent)
s += " }\n"
s += " s = yajl_gen_array_close(hand);\n"
s += " if (s != yajl_gen_status_ok)\n"
s += " goto out;\n"
s += "}\n"
elif isinstance(ty, idl.Enumeration):
s += "s = libxl__yajl_gen_enum(hand, %s_to_string(%s));\n" % (ty.typename, ty.pass_arg(v, parent is None))
s += "if (s != yajl_gen_status_ok)\n"
s += " goto out;\n"
elif isinstance(ty, idl.KeyedUnion):
if parent is None:
raise Exception("KeyedUnion type must have a parent")
s += "switch (%s) {\n" % (parent + ty.keyvar.name)
for f in ty.fields:
(nparent,fexpr) = ty.member(v, f, parent is None)
s += "case %s:\n" % f.enumname
if f.type is not None:
s += libxl_C_type_gen_json(f.type, fexpr, indent + " ", nparent)
else:
s += " s = yajl_gen_map_open(hand);\n"
s += " if (s != yajl_gen_status_ok)\n"
s += " goto out;\n"
s += " s = yajl_gen_map_close(hand);\n"
s += " if (s != yajl_gen_status_ok)\n"
s += " goto out;\n"
s += " break;\n"
s += "}\n"
elif isinstance(ty, idl.Struct) and (parent is None or ty.json_gen_fn is None):
s += "s = yajl_gen_map_open(hand);\n"
s += "if (s != yajl_gen_status_ok)\n"
s += " goto out;\n"
for f in [f for f in ty.fields if not f.const and not f.type.private]:
(nparent,fexpr) = ty.member(v, f, parent is None)
default_expr = get_default_expr(f, nparent, fexpr)
s += "if (%s) {\n" % default_expr
s += libxl_C_type_gen_map_key(f, nparent, " ")
s += libxl_C_type_gen_json(f.type, fexpr, " ", nparent)
s += "}\n"
s += "s = yajl_gen_map_close(hand);\n"
s += "if (s != yajl_gen_status_ok)\n"
s += " goto out;\n"
else:
if ty.json_gen_fn is not None:
s += "s = %s(hand, %s);\n" % (ty.json_gen_fn, ty.pass_arg(v, parent is None))
s += "if (s != yajl_gen_status_ok)\n"
s += " goto out;\n"
if parent is None:
s += "out:\n"
s += "return s;\n"
if s != "":
s = indent + s
return s.replace("\n", "\n%s" % indent).rstrip(indent)
def libxl_C_type_to_json(ty, v, indent = " "):
s = ""
gen = "(libxl__gen_json_callback)&%s_gen_json" % ty.typename
s += "return libxl__object_to_json(ctx, \"%s\", %s, (void *)%s);\n" % (ty.typename, gen, ty.pass_arg(v, passby=idl.PASS_BY_REFERENCE))
if s != "":
s = indent + s
return s.replace("\n", "\n%s" % indent).rstrip(indent)
def libxl_C_type_parse_json(ty, w, v, indent = " ", parent = None, discriminator = None):
s = ""
if parent is None:
s += "int rc = 0;\n"
s += "const libxl__json_object *x = o;\n"
if isinstance(ty, idl.Array):
if parent is None:
raise Exception("Array type must have a parent")
if discriminator is not None:
raise Exception("Only KeyedUnion can have discriminator")
lenvar = parent + ty.lenvar.name
s += "{\n"
s += " libxl__json_object *t;\n"
s += " int i;\n"
s += " if (!libxl__json_object_is_array(x)) {\n"
s += " rc = -1;\n"
s += " goto out;\n"
s += " }\n"
s += " %s = x->u.array->count;\n" % lenvar
s += " %s = libxl__calloc(NOGC, %s, sizeof(*%s));\n" % (v, lenvar, v)
s += " if (!%s && %s != 0) {\n" % (v, lenvar)
s += " rc = -1;\n"
s += " goto out;\n"
s += " }\n"
s += " for (i=0; (t=libxl__json_array_get(x,i)); i++) {\n"
s += libxl_C_type_parse_json(ty.elem_type, "t", v+"[i]",
indent + " ", parent)
s += " }\n"
s += " if (i != %s) {\n" % lenvar
s += " rc = -1;\n"
s += " goto out;\n"
s += " }\n"
s += "}\n"
elif isinstance(ty, idl.Enumeration):
if discriminator is not None:
raise Exception("Only KeyedUnion can have discriminator")
s += "{\n"
s += " const char *enum_str;\n"
s += " if (!libxl__json_object_is_string(x)) {\n"
s += " rc = -1;\n"
s += " goto out;\n"
s += " }\n"
s += " enum_str = libxl__json_object_get_string(x);\n"
s += " rc = %s_from_string(enum_str, %s);\n" % (ty.typename, ty.pass_arg(v, parent is None, idl.PASS_BY_REFERENCE))
s += " if (rc)\n"
s += " goto out;\n"
s += "}\n"
elif isinstance(ty, idl.KeyedUnion):
if parent is None:
raise Exception("KeyedUnion type must have a parent")
if discriminator is None:
raise Excpetion("KeyedUnion type must have a discriminator")
for f in ty.fields:
if f.enumname != discriminator:
continue
(nparent,fexpr) = ty.member(v, f, parent is None)
if f.type is not None:
s += libxl_C_type_parse_json(f.type, w, fexpr, indent + " ", nparent)
elif isinstance(ty, idl.Struct) and (parent is None or ty.json_parse_fn is None):
if discriminator is not None:
raise Exception("Only KeyedUnion can have discriminator")
for f in [f for f in ty.fields if not f.const and not f.type.private]:
saved_var_name = "saved_%s" % f.name
s += "{\n"
s += " const libxl__json_object *%s = NULL;\n" % saved_var_name
s += " %s = x;\n" % saved_var_name
if isinstance(f.type, idl.KeyedUnion):
for x in f.type.fields:
s += " x = libxl__json_map_get(\"%s\", %s, JSON_MAP);\n" % \
(f.type.keyvar.name + "." + x.name, w)
s += " if (x) {\n"
(nparent, fexpr) = ty.member(v, f.type.keyvar, parent is None)
s += " %s_init_%s(%s, %s);\n" % (ty.typename, f.type.keyvar.name, v, x.enumname)
(nparent,fexpr) = ty.member(v, f, parent is None)
s += libxl_C_type_parse_json(f.type, "x", fexpr, " ", nparent, x.enumname)
s += " }\n"
else:
s += " x = libxl__json_map_get(\"%s\", %s, %s);\n" % (f.name, w, f.type.json_parse_type)
s += " if (x) {\n"
(nparent,fexpr) = ty.member(v, f, parent is None)
s += libxl_C_type_parse_json(f.type, "x", fexpr, " ", nparent)
s += " }\n"
s += " x = %s;\n" % saved_var_name
s += "}\n"
else:
if discriminator is not None:
raise Exception("Only KeyedUnion can have discriminator")
if ty.json_parse_fn is not None:
s += "rc = %s(gc, %s, &%s);\n" % (ty.json_parse_fn, w, v)
s += "if (rc)\n"
s += " goto out;\n"
if parent is None:
s += "out:\n"
s += "return rc;\n"
if s != "":
s = indent +s
return s.replace("\n", "\n%s" % indent).rstrip(indent)
def libxl_C_type_from_json(ty, v, w, indent = " "):
s = ""
parse = "(libxl__json_parse_callback)&%s_parse_json" % (ty.namespace + "_" + ty.rawname)
s += "return libxl__object_from_json(ctx, \"%s\", %s, %s, %s);\n" % (ty.typename, parse, v, w)
if s != "":
s = indent + s
return s.replace("\n", "\n%s" % indent).rstrip(indent)
def libxl_C_enum_to_string(ty, e, indent = " "):
s = ""
s += "switch(%s) {\n" % e
for v in ty.values:
s += " case %s:\n" % (v.name)
s += " return \"%s\";\n" % (v.valuename.lower())
s += " default:\n "
s += " return NULL;\n"
s += "}\n"
if s != "":
s = indent + s
return s.replace("\n", "\n%s" % indent).rstrip(indent)
def libxl_C_enum_strings(ty, indent=""):
s = ""
s += "libxl_enum_string_table %s_string_table[] = {\n" % (ty.typename)
for v in ty.values:
s += " { .s = \"%s\", .v = %s },\n" % (v.valuename.lower(), v.name)
s += " { NULL, -1 },\n"
s += "};\n"
s += "\n"
if s != "":
s = indent + s
return s.replace("\n", "\n%s" % indent).rstrip(indent)
def libxl_C_enum_from_string(ty, str, e, indent = " "):
s = ""
s += "return libxl__enum_from_string(%s_string_table,\n" % ty.typename
s += " %s, (int *)%s);\n" % (str, e)
if s != "":
s = indent + s
return s.replace("\n", "\n%s" % indent).rstrip(indent)
if __name__ == '__main__':
if len(sys.argv) != 6:
print >>sys.stderr, "Usage: gentypes.py <idl> <header> <header-private> <header-json> <implementation>"
sys.exit(1)
(_, idlname, header, header_private, header_json, impl) = sys.argv
(builtins,types) = idl.parse(idlname)
print "outputting libxl type definitions to %s" % header
f = open(header, "w")
header_define = header.upper().replace('.','_')
f.write("""#ifndef %s
#define %s
/*
* DO NOT EDIT.
*
* This file is autogenerated by
* "%s"
*/
""" % (header_define, header_define, " ".join(sys.argv)))
for ty in types:
f.write(libxl_C_type_define(ty) + ";\n")
if ty.dispose_fn is not None:
f.write("%svoid %s(%s);\n" % (ty.hidden(), ty.dispose_fn, ty.make_arg("p")))
if ty.copy_fn is not None:
f.write("%svoid %s(libxl_ctx *ctx, %s, %s);\n" % (ty.hidden(), ty.copy_fn,
ty.make_arg("dst"), ty.make_arg("src")))
if ty.init_fn is not None:
f.write("%svoid %s(%s);\n" % (ty.hidden(), ty.init_fn, ty.make_arg("p")))
for field in libxl_init_members(ty):
if not isinstance(field.type, idl.KeyedUnion):
raise Exception("Only KeyedUnion is supported for member init")
ku = field.type
f.write("%svoid %s(%s, %s);\n" % (ty.hidden(), ty.init_fn + "_" + ku.keyvar.name,
ty.make_arg("p"),
ku.keyvar.type.make_arg(ku.keyvar.name)))
if ty.json_gen_fn is not None:
f.write("%schar *%s_to_json(libxl_ctx *ctx, %s);\n" % (ty.hidden(), ty.typename, ty.make_arg("p")))
if ty.json_parse_fn is not None:
f.write("%sint %s_from_json(libxl_ctx *ctx, %s, const char *s);\n" % (ty.hidden(), ty.typename, ty.make_arg("p", passby=idl.PASS_BY_REFERENCE)))
if isinstance(ty, idl.Enumeration):
f.write("%sconst char *%s_to_string(%s);\n" % (ty.hidden(), ty.typename, ty.make_arg("p")))
f.write("%sint %s_from_string(const char *s, %s);\n" % (ty.hidden(), ty.typename, ty.make_arg("e", passby=idl.PASS_BY_REFERENCE)))
f.write("%sextern libxl_enum_string_table %s_string_table[];\n" % (ty.hidden(), ty.typename))
f.write("\n")
f.write("""#endif /* %s */\n""" % (header_define))
f.close()
print "outputting libxl JSON definitions to %s" % header_json
f = open(header_json, "w")
header_json_define = header_json.upper().replace('.','_')
f.write("""#ifndef %s
#define %s
/*
* DO NOT EDIT.
*
* This file is autogenerated by
* "%s"
*/
""" % (header_json_define, header_json_define, " ".join(sys.argv)))
for ty in [ty for ty in types if ty.json_gen_fn is not None]:
f.write("%syajl_gen_status %s_gen_json(yajl_gen hand, %s);\n" % (ty.hidden(), ty.typename, ty.make_arg("p", passby=idl.PASS_BY_REFERENCE)))
f.write("\n")
f.write("""#endif /* %s */\n""" % header_json_define)
f.close()
print "outputting libxl type internal definitions to %s" % header_private
f = open(header_private, "w")
header_private_define = header_private.upper().replace('.','_')
f.write("""#ifndef %s
#define %s
/*
* DO NOT EDIT.
*
* This file is autogenerated by
* "%s"
*/
""" % (header_private_define, header_private_define, " ".join(sys.argv)))
for ty in [ty for ty in types if ty.json_parse_fn is not None]:
f.write("%sint %s_parse_json(libxl__gc *gc, const libxl__json_object *o, %s);\n" % \
(ty.hidden(), ty.namespace + "_" + ty.rawname,
ty.make_arg("p", passby=idl.PASS_BY_REFERENCE)))
f.write("\n")
f.write("""#endif /* %s */\n""" % header_json_define)
f.close()
print "outputting libxl type implementations to %s" % impl
f = open(impl, "w")
f.write("""
/* DO NOT EDIT.
*
* This file is autogenerated by
* "%s"
*/
#include "libxl_osdeps.h"
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include "libxl_internal.h"
#define LIBXL_DTOR_POISON 0xa5
""" % " ".join(sys.argv))
for ty in [t for t in types if t.dispose_fn is not None and t.autogenerate_dispose_fn]:
f.write("void %s(%s)\n" % (ty.dispose_fn, ty.make_arg("p")))
f.write("{\n")
f.write(libxl_C_type_dispose(ty, "p"))
f.write(" memset(p, LIBXL_DTOR_POISON, sizeof(*p));\n")
f.write("}\n")
f.write("\n")
for ty in [t for t in types if t.copy_fn and t.autogenerate_copy_fn]:
f.write("void %s(libxl_ctx *ctx, %s, %s)\n" % (ty.copy_fn,
ty.make_arg("dst", passby=idl.PASS_BY_REFERENCE),
ty.make_arg("src", passby=idl.PASS_BY_REFERENCE)))
f.write("{\n")
f.write(libxl_C_type_copy(ty, "dst", "src"))
f.write("}\n")
f.write("\n")
for ty in [t for t in types if t.init_fn is not None and t.autogenerate_init_fn]:
f.write(libxl_C_type_init(ty))
for field in libxl_init_members(ty):
f.write(libxl_C_type_member_init(ty, field))
for ty in [t for t in types if isinstance(t,idl.Enumeration)]:
f.write("const char *%s_to_string(%s e)\n" % (ty.typename, ty.typename))
f.write("{\n")
f.write(libxl_C_enum_to_string(ty, "e"))
f.write("}\n")
f.write("\n")
f.write(libxl_C_enum_strings(ty))
f.write("int %s_from_string(const char *s, %s *e)\n" % (ty.typename, ty.typename))
f.write("{\n")
f.write(libxl_C_enum_from_string(ty, "s", "e"))
f.write("}\n")
f.write("\n")
for ty in [t for t in types if t.json_gen_fn is not None]:
f.write("yajl_gen_status %s_gen_json(yajl_gen hand, %s)\n" % (ty.typename, ty.make_arg("p", passby=idl.PASS_BY_REFERENCE)))
f.write("{\n")
f.write(libxl_C_type_gen_json(ty, "p"))
f.write("}\n")
f.write("\n")
f.write("char *%s_to_json(libxl_ctx *ctx, %s)\n" % (ty.typename, ty.make_arg("p")))
f.write("{\n")
f.write(libxl_C_type_to_json(ty, "p"))
f.write("}\n")
f.write("\n")
for ty in [t for t in types if t.json_parse_fn is not None]:
f.write("int %s_parse_json(libxl__gc *gc, const libxl__json_object *%s, %s)\n" % \
(ty.namespace + "_" + ty.rawname,"o",ty.make_arg("p", passby=idl.PASS_BY_REFERENCE)))
f.write("{\n")
f.write(libxl_C_type_parse_json(ty, "o", "p"))
f.write("}\n")
f.write("\n")
f.write("int %s_from_json(libxl_ctx *ctx, %s, const char *s)\n" % (ty.typename, ty.make_arg("p", passby=idl.PASS_BY_REFERENCE)))
f.write("{\n")
if not isinstance(ty, idl.Enumeration):
f.write(" %s_init(p);\n" % ty.typename)
f.write(libxl_C_type_from_json(ty, "p", "s"))
f.write("}\n")
f.write("\n")
f.close()
|
gpl-2.0
| -8,458,651,872,971,083,000 | 1,481,200,631,810,910,700 | 37.649296 | 156 | 0.486535 | false |
normanyahq/Parameterized-Remote-Shell-Execution-Service
|
server.py
|
1
|
1415
|
from flask import Flask, request
from subprocess import Popen, PIPE
import json
app = Flask(__name__)
HelpMessage = """
Usage:
POST command to this URL with following payload:
{"file": "...", args:[...]}
We are using this format to keep it the same with NodeJs spawnSync
Example:
{"file": "ls", args: ["-l", "-a"]}
Test with curl:
curl -X POST -H "Content-type: application/json" --data '{"file": "ls", "args":["-a", "-l"]}' localhost:41414
"""
@app.route("/", methods=["POST", "GET"])
def commandExecutor():
if request.method == "GET":
return HelpMessage
elif request.method == "POST":
commandObject = request.get_json()
print ('Command Object: {}'.format(commandObject))
process = Popen([commandObject["file"]] + commandObject["args"],
stdin=PIPE,
stdout=PIPE,
stderr=PIPE)
(stdout, stderr) = process.communicate(input=commandObject.get("input", "").encode('utf-8'))
result = json.dumps({ "stdout": stdout,
"stderr": stderr,
"exit_code": process.returncode,
"error": process.returncode!=0})
print ("\tstdout: {}".format(stdout))
if stderr:
print ("\tstderr: {}".format(stderr))
print ("\tresult: {}".format(result))
return result
|
mit
| -4,344,755,810,036,026,400 | -7,063,194,369,836,066,000 | 36.236842 | 113 | 0.54629 | false |
vijayendrabvs/ssl-neutron
|
neutron/plugins/openvswitch/ovs_models_v2.py
|
7
|
3864
|
# Copyright 2011 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Boolean, Column, ForeignKey, Integer, String
from sqlalchemy.schema import UniqueConstraint
from neutron.db import models_v2
from neutron.db.models_v2 import model_base
from sqlalchemy import orm
class VlanAllocation(model_base.BASEV2):
"""Represents allocation state of vlan_id on physical network."""
__tablename__ = 'ovs_vlan_allocations'
physical_network = Column(String(64), nullable=False, primary_key=True)
vlan_id = Column(Integer, nullable=False, primary_key=True,
autoincrement=False)
allocated = Column(Boolean, nullable=False)
def __init__(self, physical_network, vlan_id):
self.physical_network = physical_network
self.vlan_id = vlan_id
self.allocated = False
def __repr__(self):
return "<VlanAllocation(%s,%d,%s)>" % (self.physical_network,
self.vlan_id, self.allocated)
class TunnelAllocation(model_base.BASEV2):
"""Represents allocation state of tunnel_id."""
__tablename__ = 'ovs_tunnel_allocations'
tunnel_id = Column(Integer, nullable=False, primary_key=True,
autoincrement=False)
allocated = Column(Boolean, nullable=False)
def __init__(self, tunnel_id):
self.tunnel_id = tunnel_id
self.allocated = False
def __repr__(self):
return "<TunnelAllocation(%d,%s)>" % (self.tunnel_id, self.allocated)
class NetworkBinding(model_base.BASEV2):
"""Represents binding of virtual network to physical realization."""
__tablename__ = 'ovs_network_bindings'
network_id = Column(String(36),
ForeignKey('networks.id', ondelete="CASCADE"),
primary_key=True)
# 'gre', 'vlan', 'flat', 'local'
network_type = Column(String(32), nullable=False)
physical_network = Column(String(64))
segmentation_id = Column(Integer) # tunnel_id or vlan_id
network = orm.relationship(
models_v2.Network,
backref=orm.backref("binding", lazy='joined',
uselist=False, cascade='delete'))
def __init__(self, network_id, network_type, physical_network,
segmentation_id):
self.network_id = network_id
self.network_type = network_type
self.physical_network = physical_network
self.segmentation_id = segmentation_id
def __repr__(self):
return "<NetworkBinding(%s,%s,%s,%d)>" % (self.network_id,
self.network_type,
self.physical_network,
self.segmentation_id)
class TunnelEndpoint(model_base.BASEV2):
"""Represents tunnel endpoint in RPC mode."""
__tablename__ = 'ovs_tunnel_endpoints'
__table_args__ = (
UniqueConstraint('id', name='uniq_ovs_tunnel_endpoints0id'),
)
ip_address = Column(String(64), primary_key=True)
id = Column(Integer, nullable=False)
def __init__(self, ip_address, id):
self.ip_address = ip_address
self.id = id
def __repr__(self):
return "<TunnelEndpoint(%s,%s)>" % (self.ip_address, self.id)
|
apache-2.0
| 7,972,159,455,215,564,000 | -3,273,527,571,174,622,700 | 35.45283 | 78 | 0.621377 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.